diff --git a/.gitignore b/.gitignore
index 99e70aca7f63d0e545f12a81f0547d88b1226f39..55cf1b5d6609ccb0e3b9b55a09ccd809c242b56d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,11 +1,12 @@
+AUTHORS
+ChangeLog
*.py[cod]
# C extensions
*.so
# Packages
-*.egg
-*.egg-info
+*.egg*
dist
build
_build
@@ -16,7 +17,6 @@ var
sdist
develop-eggs
.installed.cfg
-lib
lib64
# Installer logs
@@ -27,7 +27,7 @@ pip-log.txt
.tox
nosetests.xml
cover
-.testrepository
+.stestr/
*.sqlite
.venv
@@ -38,3 +38,7 @@ cover
.mr.developer.cfg
.project
.pydevproject
+.idea
+
+# reno build
+releasenotes/build
diff --git a/.gitreview b/.gitreview
index 2b1a33cff9a9443eb6b6d08b53330888953fe466..244d76ddc987bc4d1c26328bcc7136200c58c847 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
[gerrit]
-host=review.openstack.org
+host=review.opendev.org
port=29418
-project=stackforge/osprofiler.git
+project=openstack/osprofiler.git
diff --git a/.stestr.conf b/.stestr.conf
new file mode 100644
index 0000000000000000000000000000000000000000..6c75b947fdb22eba3e7eb90bcaa9bf0622ee898f
--- /dev/null
+++ b/.stestr.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+test_path=${OS_TEST_PATH:-./osprofiler/tests/unit}
+top_dir=./
+
diff --git a/.testr.conf b/.testr.conf
deleted file mode 100644
index 2109af6ce0fdbd01925e33e32425737835e237ea..0000000000000000000000000000000000000000
--- a/.testr.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-[DEFAULT]
-test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ./tests $LISTOPT $IDOPTION
-test_id_option=--load-list $IDFILE
-test_list_option=--list
diff --git a/.zuul.yaml b/.zuul.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..989fd82c90aa52b4543a31160f5fb2279dd8eaa6
--- /dev/null
+++ b/.zuul.yaml
@@ -0,0 +1,52 @@
+- project:
+ templates:
+ - check-requirements
+ - lib-forward-testing-python3
+ - openstack-cover-jobs
+ - openstack-lower-constraints-jobs
+ - openstack-python3-victoria-jobs
+ - periodic-stable-jobs
+ - publish-openstack-docs-pti
+ - release-notes-jobs-python3
+ check:
+ jobs:
+ - openstack-tox-functional-py36
+ - tempest-smoke-py3-osprofiler-redis
+ - tempest-smoke-py3-osprofiler-sqlalchemy
+ gate:
+ jobs:
+ - openstack-tox-functional-py36
+
+- job:
+ name: tempest-smoke-py3-osprofiler-redis
+ parent: tempest-full-py3
+ voting: false
+ post-run: playbooks/osprofiler-post.yaml
+ description: |
+ Run full tempest on py3 with profiling enabled (redis driver)
+ required-projects:
+ - openstack/osprofiler
+ vars:
+ tox_envlist: smoke
+ devstack_localrc:
+ OSPROFILER_COLLECTOR: redis
+ OSPROFILER_HMAC_KEYS: SECRET_KEY
+ devstack_plugins:
+ osprofiler: https://opendev.org/openstack/osprofiler
+
+- job:
+ name: tempest-smoke-py3-osprofiler-sqlalchemy
+ parent: tempest-full-py3
+ voting: false
+ post-run: playbooks/osprofiler-post.yaml
+ description: |
+ Run full tempest on py3 with profiling enabled (sqlalchemy driver)
+ required-projects:
+ - openstack/osprofiler
+ vars:
+ tox_envlist: smoke
+ devstack_localrc:
+ OSPROFILER_COLLECTOR: sqlalchemy
+ OSPROFILER_HMAC_KEYS: SECRET_KEY
+ devstack_plugins:
+ osprofiler: https://opendev.org/openstack/osprofiler
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0f05d74fdc2527e9753b8f7d99163b0e77edf225
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,16 @@
+If you would like to contribute to the development of OpenStack,
+you must follow the steps in this page:
+
+ https://docs.openstack.org/infra/manual/developers.html
+
+Once those steps have been completed, changes to OpenStack
+should be submitted for review via the Gerrit tool, following
+the workflow documented at:
+
+ https://docs.openstack.org/infra/manual/developers.html#development-workflow
+
+Pull requests submitted through GitHub will be ignored.
+
+Bugs should be filed on Launchpad, not GitHub:
+
+ https://bugs.launchpad.net/osprofiler
diff --git a/README.rst b/README.rst
index 88aff684722a5664443e02f274b9848e19784827..0ccb70e62dc62bcdd5a20116939e63c16344958b 100644
--- a/README.rst
+++ b/README.rst
@@ -1,342 +1,29 @@
-OSProfiler
-==========
-
-OSProfiler is an OpenStack cross-project profiling library.
-
-
-Background
-----------
-
-OpenStack consists of multiple projects. Each project, in turn, is composed of
-multiple services. To process some request, e.g. to boot a virtual machine,
-OpenStack uses multiple services from different projects. In the case something
-works too slowly, it's extremely complicated to understand what exactly goes
-wrong and to locate the bottleneck.
-
-To resolve this issue, we introduce a tiny but powerful library,
-**osprofiler**, that is going to be used by all OpenStack projects and their
-python clients. To be able to generate 1 trace per request, that goes through
-all involved services, and builds a tree of calls (see an
-`example `_).
-
-
-Why not cProfile and etc?
--------------------------
-
-**The scope of this library is quite different:**
-
-* We are interested in getting one trace of points from different service,
- not tracing all python calls inside one process.
-
-* This library should be easy integratable in OpenStack. This means that:
-
- * It shouldn't require too many changes in code bases of integrating
- projects.
-
- * We should be able to turn it off fully.
-
- * We should be able to keep it turned on in lazy mode in production
- (e.g. admin should be able to "trace" on request).
-
-
-OSprofiler API version 0.3.0
-----------------------------
-
-There are a couple of things that you should know about API before using it.
-
-
-* **4 ways to add a new trace point**
-
- .. parsed-literal::
-
- from osprofiler import profiler
-
- def some_func():
- profiler.start("point_name", {"any_key": "with_any_value"})
- # your code
- profiler.stop({"any_info_about_point": "in_this_dict"})
-
-
- @profiler.trace("point_name",
- info={"any_info_about_point": "in_this_dict"},
- hide_args=False)
- def some_func2(*args, **kwargs):
- # If you need to hide args in profile info, put hide_args=True
- pass
-
- def some_func3():
- with profiler.Trace("point_name",
- info={"any_key": "with_any_value"}):
- # some code here
-
- @profiler.trace_cls("point_name", info={}, hide_args=False,
- trace_private=False)
- class TracedClass(object):
-
- def traced_method(self):
- pass
-
- def _traced_only_if_trace_private_true(self):
- pass
-
-* **How profiler works?**
-
- * **@profiler.Trace()** and **profiler.trace()** are just syntax sugar,
- that just calls **profiler.start()** & **profiler.stop()** methods.
-
- * Every call of **profiler.start()** & **profiler.stop()** sends to
- **collector** 1 message. It means that every trace point creates 2 records
- in the collector. *(more about collector & records later)*
-
- * Nested trace points are supported. The sample below produces 2 trace points:
-
- .. parsed-literal::
-
- profiler.start("parent_point")
- profiler.start("child_point")
- profiler.stop()
- profiler.stop()
-
- The implementation is quite simple. Profiler has one stack that contains
- ids of all trace points. E.g.:
-
- .. parsed-literal::
-
- profiler.start("parent_point") # trace_stack.push()
- # send to collector -> trace_stack[-2:]
-
- profiler.start("parent_point") # trace_stack.push()
- # send to collector -> trace_stack[-2:]
- profiler.stop() # send to collector -> trace_stack[-2:]
- # trace_stack.pop()
-
- profiler.stop() # send to collector -> trace_stack[-2:]
- # trace_stack.pop()
-
- It's simple to build a tree of nested trace points, having
- **(parent_id, point_id)** of all trace points.
-
-* **Process of sending to collector**
-
- Trace points contain 2 messages (start and stop). Messages like below are
- sent to a collector:
-
- .. parsed-literal::
- {
- "name": -(start|stop)
- "base_id": ,
- "parent_id": ,
- "trace_id": ,
- "info":
- }
-
- * base_id - that is equal for all trace points that belong
- to one trace, this is done to simplify the process of retrieving
- all trace points related to one trace from collector
- * parent_id - of parent trace point
- * trace_id - of current trace point
- * info - it's dictionary that contains user information passed via calls of
- profiler **start()** & **stop()** methods.
-
-
-
-* **Setting up the collector.**
-
- The profiler doesn't include a trace point collector. The user/developer
- should instead provide a method that sends messages to a collector. Let's
- take a look at a trivial sample, where the collector is just a file:
-
- .. parsed-literal::
-
- import json
-
- from osprofiler import notifier
-
- def send_info_to_file_collector(info, context=None):
- with open("traces", "a") as f:
- f.write(json.dumps(info))
-
- notifier.set(send_info_to_file_collector)
-
- So now on every **profiler.start()** and **profiler.stop()** call we will
- write info about the trace point to the end of the **traces** file.
-
-
-* **Initialization of profiler.**
-
- If profiler is not initialized, all calls to **profiler.start()** and
- **profiler.stop()** will be ignored.
-
- Initialization is a quite simple procedure.
-
- .. parsed-literal::
-
- from osprofiler import profiler
-
- profiler.init("SECRET_HMAC_KEY", base_id=, parent_id=)
-
- ``SECRET_HMAC_KEY`` - will be discussed later, because it's related to the
- integration of OSprofiler & OpenStack.
-
- **base_id** and **trace_id** will be used to initialize stack_trace in
- profiler, e.g. stack_trace = [base_id, trace_id].
-
-
-* **OSProfiler CLI.**
-
- To make it easier for end users to work with profiler from CLI, osprofiler
- has entry point that allows them to retrieve information about traces and
- present it in human readable from.
-
- Available commands:
-
- * Help message with all available commands and their arguments:
-
- .. parsed-literal::
-
- $ osprofiler -h/--help
-
- * OSProfiler version:
-
- .. parsed-literal::
-
- $ osprofiler -v/--version
-
- * Results of profiling can be obtained in JSON (option: ``--json``) and HTML
- (option: ``--html``) formats:
-
- .. parsed-literal::
-
- $ osprofiler trace show --json/--html
-
- hint: option ``--out`` will redirect result of ``osprofiler trace show``
- in specified file:
-
- .. parsed-literal::
-
- $ osprofiler trace show --json/--html --out /path/to/file
-
-Integration with OpenStack
---------------------------
-
-There are 4 topics related to integration OSprofiler & `OpenStack`_:
-
-* **What we should use as a centralized collector?**
-
- We decided to use `Ceilometer`_, because:
-
- * It's already integrated in OpenStack, so it's quite simple to send
- notifications to it from all projects.
-
- * There is an OpenStack API in Ceilometer that allows us to retrieve all
- messages related to one trace. Take a look at
- *osprofiler.parsers.ceilometer:get_notifications*
-
-
-* **How to setup profiler notifier?**
-
- We decided to use olso.messaging Notifier API, because:
-
- * `oslo.messaging`_ is integrated in all projects
-
- * It's the simplest way to send notification to Ceilometer, take a
- look at: *osprofiler.notifiers.messaging.Messaging:notify* method
-
- * We don't need to add any new `CONF`_ options in projects
-
-
-* **How to initialize profiler, to get one trace across all services?**
-
- To enable cross service profiling we actually need to do send from caller
- to callee (base_id & trace_id). So callee will be able to init its profiler
- with these values.
-
- In case of OpenStack there are 2 kinds of interaction between 2 services:
-
- * REST API
-
- It's well known that there are python clients for every project,
- that generate proper HTTP requests, and parse responses to objects.
-
- These python clients are used in 2 cases:
-
- * User access -> OpenStack
-
- * Service from Project 1 would like to access Service from Project 2
-
-
- So what we need is to:
-
- * Put in python clients headers with trace info (if profiler is inited)
-
- * Add `OSprofiler WSGI middleware`_ to your service, this initializes
- the profiler, if and only if there are special trace headers, that
- are signed by one of the HMAC keys from api-paste.ini (if multiple
- keys exist the signing process will continue to use the key that was
- accepted during validation).
-
- * The common items that are used to configure the middleware are the
- following (these can be provided when initializing the middleware
- object or when setting up the api-paste.ini file)::
-
- hmac_keys = KEY1, KEY2 (can be a single key as well)
-
- Actually the algorithm is a bit more complex. The Python client will
- also sign the trace info with a `HMAC`_ key (lets call that key ``A``)
- passed to profiler.init, and on reception the WSGI middleware will
- check that it's signed with *one of* the HMAC keys (the wsgi
- server should have key ``A`` as well, but may also have keys ``B``
- and ``C``) that are specified in api-paste.ini. This ensures that only
- the user that knows the HMAC key ``A`` in api-paste.ini can init a
- profiler properly and send trace info that will be actually
- processed. This ensures that trace info that is sent in that
- does **not** pass the HMAC validation will be discarded. **NOTE:** The
- application of many possible *validation* keys makes it possible to
- roll out a key upgrade in a non-impactful manner (by adding a key into
- the list and rolling out that change and then removing the older key at
- some time in the future).
-
- * RPC API
-
- RPC calls are used for interaction between services of one project.
- It's well known that projects are using `oslo.messaging`_ to deal with
- RPC. It's very good, because projects deal with RPC in similar way.
-
- So there are 2 required changes:
-
- * On callee side put in request context trace info (if profiler was
- initialized)
-
- * On caller side initialize profiler, if there is trace info in request
- context.
-
- * Trace all methods of callee API (can be done via profiler.trace_cls).
-
-
-* **What points should be tracked by default?**
-
- I think that for all projects we should include by default 5 kinds of points:
-
- * All HTTP calls - helps to get information about: what HTTP requests were
- done, duration of calls (latency of service), information about projects
- involved in request.
-
- * All RPC calls - helps to understand duration of parts of request related
- to different services in one project. This information is essential to
- understand which service produce the bottleneck.
-
- * All DB API calls - in some cases slow DB query can produce bottleneck. So
- it's quite useful to track how much time request spend in DB layer.
-
- * All driver calls - in case of nova, cinder and others we have vendor
- drivers. Duration
-
- * ALL SQL requests (turned off by default, because it produce a lot of
- traffic)
-
-.. _CONF: http://docs.openstack.org/developer/oslo.config/
-.. _HMAC: http://en.wikipedia.org/wiki/Hash-based_message_authentication_code
-.. _OpenStack: http://openstack.org/
-.. _Ceilometer: https://wiki.openstack.org/wiki/Ceilometer
-.. _oslo.messaging: https://pypi.python.org/pypi/oslo.messaging
-.. _OSprofiler WSGI middleware: https://github.com/stackforge/osprofiler/blob/master/osprofiler/web.py
+===================================================
+ OSProfiler -- Library for cross-project profiling
+===================================================
+
+.. image:: https://governance.openstack.org/tc/badges/osprofiler.svg
+ :target: https://governance.openstack.org/tc/reference/tags/index.html
+
+.. Change things from this point on
+
+.. image:: https://img.shields.io/pypi/v/osprofiler.svg
+ :target: https://pypi.org/project/osprofiler/
+ :alt: Latest Version
+
+.. image:: https://img.shields.io/pypi/dm/osprofiler.svg
+ :target: https://pypi.org/project/osprofiler/
+ :alt: Downloads
+
+OSProfiler provides a tiny but powerful library that is used by
+most (soon to be all) OpenStack projects and their python clients. It
+provides functionality to be able to generate 1 trace per request, that goes
+through all involved services. This trace can then be extracted and used
+to build a tree of calls which can be quite handy for a variety of
+reasons (for example in isolating cross-project performance issues).
+
+* Free software: Apache license
+* Documentation: https://docs.openstack.org/osprofiler/latest/
+* Source: https://opendev.org/openstack/osprofiler
+* Bugs: https://bugs.launchpad.net/osprofiler
+* Release notes: https://docs.openstack.org/releasenotes/osprofiler
diff --git a/bindep.txt b/bindep.txt
new file mode 100644
index 0000000000000000000000000000000000000000..96001d5fceb87cfb6e7006dc678ec90132fdc66b
--- /dev/null
+++ b/bindep.txt
@@ -0,0 +1,3 @@
+rabbitmq-server [test]
+redis [test platform:rpm]
+redis-server [test platform:dpkg]
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000000000000000000000000000000000000..36f5f0f77dcb929aa1392cc1013c0d937c1fd2af
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,205 @@
+python-osprofiler (3.4.0-4) UNRELEASED; urgency=medium
+
+ * Apply multi-arch hints.
+ + python-osprofiler-doc: Add Multi-Arch: foreign.
+
+ -- Debian Janitor Sun, 22 Aug 2021 02:45:19 -0000
+
+python-osprofiler (3.4.0-3) unstable; urgency=medium
+
+ * Added Restrictions: superficial to d/tests/control (Closes: #974519).
+
+ -- Thomas Goirand Fri, 20 Nov 2020 23:58:47 +0100
+
+python-osprofiler (3.4.0-2) unstable; urgency=medium
+
+ * Uploading to unstable.
+ * Add a debian/salsa-ci.yml.
+
+ -- Thomas Goirand Thu, 15 Oct 2020 13:27:46 +0200
+
+python-osprofiler (3.4.0-1) experimental; urgency=medium
+
+ * New upstream release.
+
+ -- Thomas Goirand Sun, 13 Sep 2020 11:12:47 +0200
+
+python-osprofiler (3.3.0-1) experimental; urgency=medium
+
+ * Fixed homepage field and watch file.
+ * New upstream release.
+ * Fixed (build-)depends for this release.
+
+ -- Thomas Goirand Mon, 07 Sep 2020 17:47:16 +0200
+
+python-osprofiler (3.1.0-2) unstable; urgency=medium
+
+ * Uploading to unstable.
+
+ -- Thomas Goirand Fri, 08 May 2020 18:23:19 +0200
+
+python-osprofiler (3.1.0-1) experimental; urgency=medium
+
+ * New upstream release.
+
+ -- Thomas Goirand Tue, 07 Apr 2020 16:41:21 +0200
+
+python-osprofiler (3.0.0-1) experimental; urgency=medium
+
+ * New upstream release.
+
+ -- Thomas Goirand Mon, 06 Apr 2020 22:44:37 +0200
+
+python-osprofiler (2.8.2-3) unstable; urgency=medium
+
+ * Removed Python 2 autopkgtest (Closes: #937988).
+
+ -- Thomas Goirand Sun, 19 Jan 2020 22:39:09 +0100
+
+python-osprofiler (2.8.2-2) unstable; urgency=medium
+
+ * Uploading to unstable.
+
+ -- Thomas Goirand Mon, 21 Oct 2019 10:06:21 +0200
+
+python-osprofiler (2.8.2-1) experimental; urgency=medium
+
+ * New upstream release.
+
+ -- Thomas Goirand Mon, 23 Sep 2019 17:32:39 +0200
+
+python-osprofiler (2.6.0-2) unstable; urgency=medium
+
+ * Removed python3-osprofiler.postinst which had update-alternatives for dual
+ Python support (Closes: #940313).
+
+ -- Thomas Goirand Mon, 16 Sep 2019 11:55:39 +0200
+
+python-osprofiler (2.6.0-1) unstable; urgency=medium
+
+ [ Ondřej Nový ]
+ * Running wrap-and-sort -bast.
+ * Use debhelper-compat instead of debian/compat.
+
+ [ Thomas Goirand ]
+ * New upstream release.
+ * Removed Python 2 support (Closes: #937988).
+
+ -- Thomas Goirand Thu, 12 Sep 2019 08:48:43 +0200
+
+python-osprofiler (2.3.0-2) unstable; urgency=medium
+
+ * Uploading to unstable.
+
+ -- Thomas Goirand Tue, 04 Sep 2018 15:36:20 +0200
+
+python-osprofiler (2.3.0-1) experimental; urgency=medium
+
+ [ Ondřej Nový ]
+ * d/control: Set Vcs-* to salsa.debian.org
+ * d/tests: Use AUTOPKGTEST_TMP instead of ADTTMP
+ * d/control: Use team+openstack@tracker.debian.org as maintainer
+
+ [ Thomas Goirand ]
+ * New upstream release.
+ * Fixed (build-)depends for this release.
+ * Build doc with Python 3.
+ * Blacklist all test_jaeger test.
+
+ -- Thomas Goirand Wed, 22 Aug 2018 17:36:48 +0200
+
+python-osprofiler (1.11.0-2) unstable; urgency=medium
+
+ * Uploading to unstable.
+
+ -- Thomas Goirand Thu, 02 Nov 2017 00:18:21 +0000
+
+python-osprofiler (1.11.0-1) experimental; urgency=medium
+
+ [ Corey Bryant ]
+ * d/control: Add python-oslo.config to BDs to fix test import error.
+ * d/p/drop-sphinx-git.patch, d/control: Drop use of git from sphinx config.
+
+ [ Ondřej Nový ]
+ * Standards-Version is 3.9.8 now (no change)
+ * d/copyright: Added myself to Debian part
+ * Added simple Debian tests
+ * Fix copyright year in documentation to make build reproducible
+ * d/rules: Changed UPSTREAM_GIT to new URL
+ * d/copyright: Changed source URL to new one
+
+ [ Daniel Baumann ]
+ * Updating vcs fields.
+ * Updating copyright format url.
+ * Running wrap-and-sort -bast.
+ * Updating maintainer field.
+ * Updating standards version to 4.0.0.
+ * Removing gbp.conf, not used anymore or should be specified in the
+ developers dotfiles.
+ * Correcting permissions in debian packaging files.
+ * Updating standards version to 4.0.1.
+ * Updating standards version to 4.1.0.
+
+ [ Thomas Goirand ]
+ * New upstream release.
+ * Fixed (build-)depends for this release.
+ * Using pkgos-dh_auto_{install,test}.
+ * Removed all patches, now useless.
+
+ -- Thomas Goirand Tue, 03 Oct 2017 14:03:29 +0200
+
+python-osprofiler (1.2.0-2) unstable; urgency=medium
+
+ [ Thomas Goirand ]
+ * Uploading to unstable.
+
+ -- Thomas Goirand Mon, 04 Apr 2016 13:13:51 +0000
+
+python-osprofiler (1.2.0-1) experimental; urgency=medium
+
+ [ Ondřej Nový ]
+ * Fixed VCS URLs (https).
+
+ [ Thomas Goirand ]
+ * New upstream release.
+ * Fixed (build-)depends for this release.
+ * Standards-Version: 3.9.7 (no change).
+
+ -- Thomas Goirand Thu, 03 Mar 2016 22:40:52 +0800
+
+python-osprofiler (0.4.0-1) experimental; urgency=medium
+
+ * New upstream release.
+ * Fixed (build-)depends for this release.
+ * Fixed watch file to use github tag rather than broken pypi.
+ * Fixed debian/copyright ordering.
+ * Standards-Version is now 3.9.6 (no change).
+ * Ran wrap-and-sort -t -a.
+
+ -- Thomas Goirand Thu, 21 Jan 2016 03:32:01 +0000
+
+python-osprofiler (0.3.1-1) unstable; urgency=medium
+
+ * New upstream release.
+ * Removed the no-intersphinx patch.
+
+ -- Thomas Goirand Mon, 07 Dec 2015 18:08:09 +0100
+
+python-osprofiler (0.3.0-3) unstable; urgency=medium
+
+ * override_dh_python3 to fix Py3 shebang.
+ * Added dh-python as b-d.
+
+ -- Thomas Goirand Fri, 23 Oct 2015 23:33:21 +0000
+
+python-osprofiler (0.3.0-2) unstable; urgency=medium
+
+ * Made build reproducible (Closes: #788503). Thanks to Juan Picca.
+
+ -- Thomas Goirand Fri, 12 Jun 2015 11:18:04 +0200
+
+python-osprofiler (0.3.0-1) unstable; urgency=medium
+
+ * Initial release. (Closes: #760529)
+
+ -- Thomas Goirand Fri, 05 Sep 2014 09:38:53 +0800
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000000000000000000000000000000000000..97e3a2f04ef3b8e4b7cd09b0bafabf3d75bad35b
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,90 @@
+Source: python-osprofiler
+Section: python
+Priority: optional
+Maintainer: Debian OpenStack
+Uploaders:
+ Thomas Goirand ,
+Build-Depends:
+ debhelper-compat (= 10),
+ dh-python,
+ openstack-pkg-tools,
+ python3-all,
+ python3-pbr,
+ python3-setuptools,
+ python3-sphinx,
+Build-Depends-Indep:
+ python3-ddt,
+ python3-docutils,
+ python3-elasticsearch,
+ python3-hacking,
+ python3-importlib-metadata,
+ python3-netaddr,
+ python3-openstackdocstheme,
+ python3-oslo.concurrency,
+ python3-oslo.config,
+ python3-oslo.serialization,
+ python3-oslo.utils,
+ python3-prettytable,
+ python3-pymongo,
+ python3-redis,
+ python3-requests,
+ python3-six,
+ python3-stestr,
+ python3-testtools,
+ python3-webob,
+ subunit,
+Standards-Version: 4.1.0
+Vcs-Browser: https://salsa.debian.org/openstack-team/libs/python-osprofiler
+Vcs-Git: https://salsa.debian.org/openstack-team/libs/python-osprofiler.git
+Homepage: https://opendev.org/openstack/osprofiler
+
+Package: python-osprofiler-doc
+Section: doc
+Architecture: all
+Depends:
+ ${misc:Depends},
+ ${sphinxdoc:Depends},
+Multi-Arch: foreign
+Description: OpenStack Profiler Library - doc
+ OpenStack consists of multiple projects. Each project, in turn, is composed of
+ multiple services. To process some request, e.g. to boot a virtual machine,
+ OpenStack uses multiple services from different projects. In the case somethin
+ works too slowly, it's extremely complicated to understand what exactly goes
+ wrong and to locate the bottleneck.
+ .
+ To resolve this issue, a tiny but powerful library, osprofiler, has been
+ interoduced, and can be used by all OpenStack projects and their Python
+ clients. To be able to generate one trace per request, that goes through all
+ involved services, and builds a tree of calls (see an example
+ http://pavlovic.me/rally/profiler/).
+ .
+ This package contains the documentation.
+
+Package: python3-osprofiler
+Architecture: all
+Depends:
+ python3-importlib-metadata,
+ python3-netaddr,
+ python3-oslo.concurrency,
+ python3-oslo.serialization,
+ python3-oslo.utils,
+ python3-prettytable,
+ python3-requests,
+ python3-six,
+ python3-webob,
+ ${misc:Depends},
+ ${python3:Depends},
+Description: OpenStack Profiler Library - Python 3.x
+ OpenStack consists of multiple projects. Each project, in turn, is composed of
+ multiple services. To process some request, e.g. to boot a virtual machine,
+ OpenStack uses multiple services from different projects. In the case somethin
+ works too slowly, it's extremely complicated to understand what exactly goes
+ wrong and to locate the bottleneck.
+ .
+ To resolve this issue, a tiny but powerful library, osprofiler, has been
+ interoduced, and can be used by all OpenStack projects and their Python
+ clients. To be able to generate one trace per request, that goes through all
+ involved services, and builds a tree of calls (see an example
+ http://pavlovic.me/rally/profiler/).
+ .
+ This package contains the Python 3.x module.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000000000000000000000000000000000000..8719727d3cac6a767840cd3d60585f0beaf62e9d
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,28 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: osprofiler
+Source: https://github.com/openstack/osprofiler.git
+
+Files: *
+Copyright: (c) 2013-2016, OpenStack Foundation
+License: Apache-2
+
+Files: debian/*
+Copyright: (c) 2014-2016, Thomas Goirand
+ (c) 2016, Ondřej Nový
+License: Apache-2
+
+License: Apache-2
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian-based systems the full text of the Apache version 2.0 license
+ can be found in /usr/share/common-licenses/Apache-2.0.
diff --git a/debian/python-osprofiler-doc.doc-base b/debian/python-osprofiler-doc.doc-base
new file mode 100644
index 0000000000000000000000000000000000000000..930aa64045e8e64d4213ff62ad8e159efe8f4cea
--- /dev/null
+++ b/debian/python-osprofiler-doc.doc-base
@@ -0,0 +1,9 @@
+Document: python-osprofiler-doc
+Title: osprofiler Documentation
+Author: N/A
+Abstract: Sphinx documentation for osprofiler
+Section: Programming/Python
+
+Format: HTML
+Index: /usr/share/doc/python-osprofiler-doc/html/index.html
+Files: /usr/share/doc/python-osprofiler-doc/html/*
diff --git a/debian/python3-osprofiler.install b/debian/python3-osprofiler.install
new file mode 100644
index 0000000000000000000000000000000000000000..9b4d16b635da2b4d8cfa8ca2bd8e97e52fdfdb21
--- /dev/null
+++ b/debian/python3-osprofiler.install
@@ -0,0 +1 @@
+/usr/*
diff --git a/debian/python3-osprofiler.postrm b/debian/python3-osprofiler.postrm
new file mode 100755
index 0000000000000000000000000000000000000000..547310bce4d78a6da9cbc07e57babe290e16f05d
--- /dev/null
+++ b/debian/python3-osprofiler.postrm
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+set -e
+
+if [ "$1" = "remove" ] || [ "$1" = "disappear" ]; then
+ update-alternatives --remove osprofiler /usr/bin/python3-osprofiler
+fi
+
+#DEBHELPER#
+
+exit 0
+
diff --git a/debian/python3-osprofiler.prerm b/debian/python3-osprofiler.prerm
new file mode 100755
index 0000000000000000000000000000000000000000..ab6f58a4227c3a6ee19a08fd29653af2569a06c6
--- /dev/null
+++ b/debian/python3-osprofiler.prerm
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+set -e
+
+if [ "$1" = "remove" ] ; then
+ update-alternatives --remove osprofiler /usr/bin/python3-osprofiler
+fi
+
+#DEBHELPER#
+
+exit 0
+
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000000000000000000000000000000000000..a12fbb608cc24efcc52d5bb4f6060430c3b6292e
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,35 @@
+#!/usr/bin/make -f
+
+LAST_CHANGE = $(shell dpkg-parsechangelog -S Date)
+BUILD_DATE = $(shell LC_ALL=C date -u "+%B %d, %Y" -d "$(LAST_CHANGE)")
+SPHINXOPTS := -D html_last_updated_fmt="$(BUILD_DATE)"
+
+UPSTREAM_GIT := https://github.com/openstack/osprofiler.git
+include /usr/share/openstack-pkg-tools/pkgos.make
+
+%:
+ dh $@ --buildsystem=python_distutils --with python3,sphinxdoc
+
+override_dh_auto_clean:
+ rm -rf build
+
+override_dh_auto_build:
+ echo "Do nothing..."
+
+override_dh_auto_test:
+ echo "Do nothing..."
+
+override_dh_auto_install:
+ pkgos-dh_auto_install --no-py2 --in-tmp
+ifeq (,$(findstring nocheck, $(DEB_BUILD_OPTIONS)))
+ pkgos-dh_auto_test --no-py2 'osprofiler\.tests\.(?!.*OptsTestCase\.test_entry_point.*|.*unit\.drivers\.test_jaeger.*)'
+endif
+
+override_dh_sphinxdoc:
+ifeq (,$(findstring nodoc, $(DEB_BUILD_OPTIONS)))
+ PYTHONPATH=. python3 -m sphinx $(SPHINXOPTS) -b html doc/source debian/python-osprofiler-doc/usr/share/doc/python-osprofiler-doc/html
+ dh_sphinxdoc
+endif
+
+override_dh_python3:
+ dh_python3 --shebang=/usr/bin/python3
diff --git a/debian/salsa-ci.yml b/debian/salsa-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0c22dc4373420a05999d6f9bc2235f73fc0ffa14
--- /dev/null
+++ b/debian/salsa-ci.yml
@@ -0,0 +1,3 @@
+include:
+ - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/salsa-ci.yml
+ - https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/pipeline-jobs.yml
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000000000000000000000000000000000000..163aaf8d82b6c54f23c45f32895dbdfdcc27b047
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/debian/source/options b/debian/source/options
new file mode 100644
index 0000000000000000000000000000000000000000..cb61fa5267b6ad8b3bbc2a612754b79dae466292
--- /dev/null
+++ b/debian/source/options
@@ -0,0 +1 @@
+extend-diff-ignore = "^[^/]*[.]egg-info/"
diff --git a/debian/tests/control b/debian/tests/control
new file mode 100644
index 0000000000000000000000000000000000000000..640724bc1404cdc41d194f9e5bfa4256e3553240
--- /dev/null
+++ b/debian/tests/control
@@ -0,0 +1,4 @@
+Restrictions: superficial
+Depends:
+ python3-osprofiler,
+Test-Command: cd "$AUTOPKGTEST_TMP" ; python3 -c "import osprofiler; print(osprofiler.__version__)"
diff --git a/debian/watch b/debian/watch
new file mode 100644
index 0000000000000000000000000000000000000000..619902fbbe09fcb1dffc5b2a37fef730ea811e0e
--- /dev/null
+++ b/debian/watch
@@ -0,0 +1,3 @@
+version=3
+opts="uversionmangle=s/\.(b|rc)/~$1/" \
+https://github.com/openstack/osprofiler/tags .*/(\d[\d\.]+)\.tar\.gz
diff --git a/devstack/README.rst b/devstack/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7469f798eaf75fe984ec87399fbc8e6b536a2115
--- /dev/null
+++ b/devstack/README.rst
@@ -0,0 +1,92 @@
+==================================
+Enabling OSProfiler using DevStack
+==================================
+
+This directory contains the files necessary to run OpenStack with enabled
+OSProfiler in DevStack.
+
+OSProfiler can send trace data into different collectors. There are 2 parameters
+that control this:
+
+* ``OSPROFILER_COLLECTOR`` specifies which collector to install in DevStack.
+ By default OSProfiler plugin does not install anything, thus default
+ messaging driver will be used.
+
+ Possible values:
+
+ * ```` - default messaging driver is used
+ * ``redis`` - Redis is installed
+ * ``jaeger`` - Jaeger is installed
+ * ``sqlalchemy`` - SQLAlchemy driver is installed
+
+ The default value of ``OSPROFILER_CONNECTION_STRING`` is set automatically
+ depending on ``OSPROFILER_COLLECTOR`` value.
+
+* ``OSPROFILER_CONNECTION_STRING`` specifies which driver is used by OSProfiler.
+
+ Possible values:
+
+ * ``messaging://`` - use messaging as trace collector (with the transport configured by oslo.messaging)
+ * ``redis://[:password]@host[:port][/db]`` - use Redis as trace storage
+ * ``elasticsearch://host:port`` - use Elasticsearch as trace storage
+ * ``mongodb://host:port`` - use MongoDB as trace storage
+ * ``loginsight://username:password@host`` - use LogInsight as trace collector/storage
+ * ``jaeger://host:port`` - use Jaeger as trace collector
+ * ``mysql+pymysql://username:password@host/profiler?charset=utf8`` - use SQLAlchemy driver with MySQL database
+
+
+To configure DevStack and enable OSProfiler edit ``${DEVSTACK_DIR}/local.conf``
+file and add the following to ``[[local|localrc]]`` section:
+
+* to use Redis collector::
+
+ enable_plugin osprofiler https://opendev.org/openstack/osprofiler master
+ OSPROFILER_COLLECTOR=redis
+
+ OSProfiler plugin will install Redis and configure OSProfiler to use Redis driver
+
+* to use specified driver::
+
+ enable_plugin osprofiler https://opendev.org/openstack/osprofiler master
+ OSPROFILER_CONNECTION_STRING=
+
+ the driver is chosen depending on the value of
+ ``OSPROFILER_CONNECTION_STRING`` variable (refer to the next section for
+ details)
+
+
+Run DevStack as normal::
+
+ $ ./stack.sh
+
+
+Config variables
+----------------
+
+**OSPROFILER_HMAC_KEYS** - a set of HMAC secrets, that are used for triggering
+of profiling in OpenStack services: only the requests that specify one of these
+keys in HTTP headers will be profiled. E.g. multiple secrets are specified as
+a comma-separated list of string values::
+
+ OSPROFILER_HMAC_KEYS=swordfish,foxtrot,charlie
+
+**OSPROFILER_CONNECTION_STRING** - connection string to identify the driver.
+Default value is ``messaging://`` refers to messaging driver. For a full
+list of drivers please refer to
+``https://opendev.org/openstack/osprofiler/src/branch/master/osprofiler/drivers``.
+Example: enable ElasticSearch driver with the server running on localhost::
+
+ OSPROFILER_CONNECTION_STRING=elasticsearch://127.0.0.1:9200
+
+**OSPROFILER_COLLECTOR** - controls which collector to install into DevStack.
+The driver is then chosen automatically based on the collector. Empty value assumes
+that the default messaging driver is used.
+Example: enable Redis collector::
+
+ OSPROFILER_COLLECTOR=redis
+
+**OSPROFILER_TRACE_SQLALCHEMY** - controls tracing of SQL statements. If enabled,
+all SQL statements processed by SQL Alchemy are added into traces. By default enabled.
+Example: disable SQL statements tracing::
+
+ OSPROFILER_TRACE_SQLALCHEMY=False
diff --git a/devstack/lib/osprofiler b/devstack/lib/osprofiler
new file mode 100644
index 0000000000000000000000000000000000000000..704b3b60ccad700259f5e776ab36a192558045c6
--- /dev/null
+++ b/devstack/lib/osprofiler
@@ -0,0 +1,150 @@
+#!/bin/bash
+
+# lib/osprofiler
+# Functions to control the configuration and operation of the **OSProfiler**
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+CONF_FILES=(
+ $CINDER_CONF
+ $HEAT_CONF
+ $KEYSTONE_CONF
+ $NOVA_CONF
+ $NEUTRON_CONF
+ $GLANCE_API_CONF
+ $GLANCE_REGISTRY_CONF
+ $TROVE_CONF
+ $TROVE_CONDUCTOR_CONF
+ $TROVE_GUESTAGENT_CONF
+ $TROVE_TASKMANAGER_CONF
+ $SENLIN_CONF
+ $MAGNUM_CONF
+ $ZUN_CONF
+ $PLACEMENT_CONF
+)
+
+# Add config files of Nova Cells
+NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1}
+for i in $(seq 1 ${NOVA_NUM_CELLS}); do
+ # call function `conductor_conf` defined in lib/nova to get file name
+ conf=$(conductor_conf $i)
+ CONF_FILES+=(${conf})
+done
+
+
+# Functions
+# ---------
+
+function install_redis() {
+ if is_fedora; then
+ install_package redis
+ elif is_ubuntu; then
+ install_package redis-server
+ elif is_suse; then
+ install_package redis
+ else
+ exit_distro_not_supported "redis installation"
+ fi
+
+ start_service redis
+
+ pip_install_gr redis
+}
+
+function install_jaeger() {
+ if is_ubuntu; then
+ install_package docker.io
+ start_service docker
+ add_user_to_group $STACK_USER docker
+ sg docker -c "docker run -d --name jaeger -p 6831:6831/udp -p 16686:16686 jaegertracing/all-in-one:1.7"
+ else
+ exit_distro_not_supported "docker.io installation"
+ fi
+
+ pip_install jaeger-client
+}
+
+function install_elasticsearch() {
+ if is_ubuntu; then
+ install_package docker.io
+ start_service docker
+ add_user_to_group $STACK_USER docker
+ # https://www.elastic.co/guide/en/elasticsearch/reference/5.6/docker.html#docker-cli-run-dev-mode
+ sg docker -c 'docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:5.6.14'
+ else
+ exit_distro_not_supported "docker.io installation"
+ fi
+
+ pip_install elasticsearch
+}
+
+function install_mongodb {
+ pip_install pymongo
+ if is_ubuntu; then
+ install_package mongodb-server
+ start_service mongodb
+ elif is_fedora; then
+ install_package mongodb
+ install_package mongodb-server
+ start_service mongod
+ else
+ exit_distro_not_supported "mongodb installation"
+ fi
+}
+
+function install_osprofiler_collector() {
+ if [ -z "$OSPROFILER_COLLECTOR" ]; then
+ OSPROFILER_CONNECTION_STRING=${OSPROFILER_CONNECTION_STRING:-"messaging://"}
+ elif [ "$OSPROFILER_COLLECTOR" == "redis" ]; then
+ install_redis
+ OSPROFILER_CONNECTION_STRING=${OSPROFILER_CONNECTION_STRING:-"redis://localhost:6379"}
+ elif [ "$OSPROFILER_COLLECTOR" == "jaeger" ]; then
+ install_jaeger
+ OSPROFILER_CONNECTION_STRING=${OSPROFILER_CONNECTION_STRING:-"jaeger://localhost:6831"}
+ elif [ "$OSPROFILER_COLLECTOR" == "elasticsearch" ]; then
+ install_elasticsearch
+ OSPROFILER_CONNECTION_STRING=${OSPROFILER_CONNECTION_STRING:-"elasticsearch://elastic:changeme@localhost:9200"}
+ elif [ "$OSPROFILER_COLLECTOR" == "mongodb" ]; then
+ install_mongodb
+ OSPROFILER_CONNECTION_STRING=${OSPROFILER_CONNECTION_STRING:-"mongodb://localhost:27017"}
+ elif [ "$OSPROFILER_COLLECTOR" == "sqlalchemy" ]; then
+ local db=`database_connection_url osprofiler`
+ OSPROFILER_CONNECTION_STRING=${OSPROFILER_CONNECTION_STRING:-${db}}
+ recreate_database osprofiler
+ else
+ die $LINENO "OSProfiler collector $OSPROFILER_COLLECTOR is not supported"
+ fi
+
+ echo ${OSPROFILER_CONNECTION_STRING} > $HOME/.osprofiler_connection_string
+}
+
+function configure_osprofiler() {
+
+ for conf in ${CONF_FILES[@]}; do
+ if [ -f $conf ]
+ then
+ iniset $conf profiler enabled True
+ iniset $conf profiler trace_sqlalchemy $OSPROFILER_TRACE_SQLALCHEMY
+ iniset $conf profiler hmac_keys $OSPROFILER_HMAC_KEYS
+ iniset $conf profiler connection_string $OSPROFILER_CONNECTION_STRING
+ fi
+ done
+
+ # Keystone is already running, should be reloaded to apply osprofiler config
+ reload_service devstack@keystone
+}
+
+function configure_osprofiler_in_tempest() {
+
+ iniset $TEMPEST_CONFIG profiler key $OSPROFILER_HMAC_KEYS
+}
+
+
+# Restore xtrace
+$XTRACE
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
new file mode 100644
index 0000000000000000000000000000000000000000..72c0bca7219041a4f33adba75fdbbb68b17731b7
--- /dev/null
+++ b/devstack/plugin.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# DevStack extras script to install osprofiler
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set -o xtrace
+
+source $DEST/osprofiler/devstack/lib/osprofiler
+
+if [[ "$1" == "stack" && "$2" == "install" ]]; then
+ echo_summary "Configuring system services for OSProfiler"
+ install_osprofiler_collector
+
+elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+ echo_summary "Configuring OSProfiler"
+ configure_osprofiler
+
+elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
+ echo_summary "Configuring Tempest"
+ configure_osprofiler_in_tempest
+
+fi
+
+# Restore xtrace
+$XTRACE
diff --git a/devstack/settings b/devstack/settings
new file mode 100644
index 0000000000000000000000000000000000000000..69eb5cca7cb0eb3c04ff8494f70ee9515215f970
--- /dev/null
+++ b/devstack/settings
@@ -0,0 +1,11 @@
+# Devstack settings
+
+# A comma-separated list of secrets, that will be used for triggering
+# of profiling in OpenStack services: profiling is only performed for
+# requests that specify one of these keys in HTTP headers.
+OSPROFILER_HMAC_KEYS=${OSPROFILER_HMAC_KEYS:-"SECRET_KEY"}
+
+# Set whether tracing of SQL requests is enabled or not
+OSPROFILER_TRACE_SQLALCHEMY=${OSPROFILER_TRACE_SQLALCHEMY:-"True"}
+
+enable_service osprofiler
diff --git a/doc/source/conf.py b/doc/source/conf.py
index fe3a6913a38f13ff67992bf0355d2a526077d31e..67978eb1578b54432815f81615c02770047f76d7 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -11,7 +11,6 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
-import datetime
import os
import sys
@@ -33,12 +32,19 @@ sys.path.extend([
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
- 'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
+ 'openstackdocstheme',
]
+
+# openstackdocstheme options
+openstackdocs_repo_name = 'openstack/osprofiler'
+openstackdocs_auto_name = False
+openstackdocs_bug_project = 'osprofiler'
+openstackdocs_bug_tag = ''
+
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
@@ -55,16 +61,7 @@ master_doc = 'index'
# General information about the project.
project = u'OSprofiler'
-copyright = u'%d, Mirantis Inc.' % datetime.datetime.now().year
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '0.2.5'
-# The full version, including alpha/beta/rc tags.
-release = '0.2.5'
+copyright = u'2016, OpenStack Foundation'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -95,7 +92,7 @@ add_module_names = True
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
@@ -105,7 +102,7 @@ pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'default'
+html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@@ -136,11 +133,6 @@ html_theme = 'default'
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
-html_last_updated_fmt = os.popen(git_cmd).read()
-
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
@@ -257,7 +249,3 @@ texinfo_documents = [
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
-
-
-# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/doc/source/index.rst b/doc/source/index.rst
deleted file mode 120000
index c768ff7d970600e1009b304bc9bfe15a1064147e..0000000000000000000000000000000000000000
--- a/doc/source/index.rst
+++ /dev/null
@@ -1 +0,0 @@
-../../README.rst
\ No newline at end of file
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..01c8b3e323a58b1e9f402722856fab1fd1db5837
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1,22 @@
+=============================================
+OSProfiler -- Cross-project profiling library
+=============================================
+
+OSProfiler provides a tiny but powerful library that is used by
+most (soon to be all) OpenStack projects and their python clients. It
+provides functionality to generate 1 trace per request, that goes
+through all involved services. This trace can then be extracted and used
+to build a tree of calls which can be quite handy for a variety of
+reasons (for example in isolating cross-project performance issues).
+
+.. toctree::
+ :maxdepth: 2
+
+ user/index
+
+.. rubric:: Indices and tables
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/doc/source/user/api.rst b/doc/source/user/api.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c449158d5e5ab35d46b4ee47edd3104d3c9ed649
--- /dev/null
+++ b/doc/source/user/api.rst
@@ -0,0 +1,240 @@
+===
+API
+===
+
+There are few things that you should know about API before using it.
+
+Five ways to add a new trace point.
+-----------------------------------
+
+.. code-block:: python
+
+ from osprofiler import profiler
+
+ def some_func():
+ profiler.start("point_name", {"any_key": "with_any_value"})
+ # your code
+ profiler.stop({"any_info_about_point": "in_this_dict"})
+
+
+ @profiler.trace("point_name",
+ info={"any_info_about_point": "in_this_dict"},
+ hide_args=False)
+ def some_func2(*args, **kwargs):
+ # If you need to hide args in profile info, put hide_args=True
+ pass
+
+ def some_func3():
+ with profiler.Trace("point_name",
+ info={"any_key": "with_any_value"}):
+ # some code here
+
+ @profiler.trace_cls("point_name", info={}, hide_args=False,
+ trace_private=False)
+ class TracedClass(object):
+
+ def traced_method(self):
+ pass
+
+ def _traced_only_if_trace_private_true(self):
+ pass
+
+ @six.add_metaclass(profiler.TracedMeta)
+ class RpcManagerClass(object):
+ __trace_args__ = {'name': 'rpc',
+ 'info': None,
+ 'hide_args': False,
+ 'trace_private': False}
+
+ def my_method(self, some_args):
+ pass
+
+ def my_method2(self, some_arg1, some_arg2, kw=None, kw2=None)
+ pass
+
+How profiler works?
+-------------------
+
+* **profiler.Trace()** and **@profiler.trace()** are just syntax sugar,
+ that just calls **profiler.start()** & **profiler.stop()** methods.
+
+* Every call of **profiler.start()** & **profiler.stop()** sends to
+ **collector** 1 message. It means that every trace point creates 2 records
+ in the collector. *(more about collector & records later)*
+
+* Nested trace points are supported. The sample below produces 2 trace points:
+
+ .. code-block:: python
+
+ profiler.start("parent_point")
+ profiler.start("child_point")
+ profiler.stop()
+ profiler.stop()
+
+ The implementation is quite simple. Profiler has one stack that contains
+ ids of all trace points. E.g.:
+
+ .. code-block:: python
+
+ profiler.start("parent_point") # trace_stack.push()
+ # send to collector -> trace_stack[-2:]
+
+ profiler.start("parent_point") # trace_stack.push()
+ # send to collector -> trace_stack[-2:]
+ profiler.stop() # send to collector -> trace_stack[-2:]
+ # trace_stack.pop()
+
+ profiler.stop() # send to collector -> trace_stack[-2:]
+ # trace_stack.pop()
+
+ It's simple to build a tree of nested trace points, having
+ **(parent_id, point_id)** of all trace points.
+
+Process of sending to collector.
+--------------------------------
+
+Trace points contain 2 messages (start and stop). Messages like below are
+sent to a collector:
+
+.. parsed-literal::
+
+ {
+ "name": -(start|stop)
+ "base_id": ,
+ "parent_id": ,
+ "trace_id": ,
+ "info":
+ }
+
+The fields are defined as the following:
+
+* base_id - ```` that is equal for all trace points that belong
+ to one trace, this is done to simplify the process of retrieving
+ all trace points related to one trace from collector
+* parent_id - ```` of parent trace point
+* trace_id - ```` of current trace point
+* info - the dictionary that contains user information passed when calling
+ profiler **start()** & **stop()** methods.
+
+Setting up the collector.
+-------------------------
+
+Using OSProfiler notifier.
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. note:: The following way of configuring OSProfiler is deprecated. The new
+ version description is located below - `Using OSProfiler initializer.`_.
+ Don't use OSproliler notifier directly! Its support will be removed soon
+ from OSProfiler.
+
+The profiler doesn't include a trace point collector. The user/developer
+should instead provide a method that sends messages to a collector. Let's
+take a look at a trivial sample, where the collector is just a file:
+
+.. code-block:: python
+
+ import json
+
+ from osprofiler import notifier
+
+ def send_info_to_file_collector(info, context=None):
+ with open("traces", "a") as f:
+ f.write(json.dumps(info))
+
+ notifier.set(send_info_to_file_collector)
+
+So now on every **profiler.start()** and **profiler.stop()** call we will
+write info about the trace point to the end of the **traces** file.
+
+Using OSProfiler initializer.
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+OSProfiler now contains various storage drivers to collect tracing data.
+Information about what driver to use and what options to pass to OSProfiler
+are now stored in OpenStack services configuration files. Example of such
+configuration can be found below:
+
+.. code-block:: bash
+
+ [profiler]
+ enabled = True
+ trace_sqlalchemy = True
+ hmac_keys = SECRET_KEY
+ connection_string = messaging://
+
+If such configuration is provided, OSProfiler setting up can be processed in
+following way:
+
+.. code-block:: python
+
+ if CONF.profiler.enabled:
+ osprofiler_initializer.init_from_conf(
+ conf=CONF,
+ context=context.get_admin_context().to_dict(),
+ project="cinder",
+ service=binary,
+ host=host
+ )
+
+Initialization of profiler.
+---------------------------
+
+If profiler is not initialized, all calls to **profiler.start()** and
+**profiler.stop()** will be ignored.
+
+Initialization is a quite simple procedure.
+
+.. code-block:: python
+
+ from osprofiler import profiler
+
+ profiler.init("SECRET_HMAC_KEY", base_id=, parent_id=)
+
+``SECRET_HMAC_KEY`` - will be discussed later, because it's related to the
+integration of OSprofiler & OpenStack.
+
+**base_id** and **trace_id** will be used to initialize stack_trace in
+profiler, e.g. ``stack_trace = [base_id, trace_id]``.
+
+OSProfiler CLI.
+---------------
+
+To make it easier for end users to work with profiler from CLI, OSProfiler
+has entry point that allows them to retrieve information about traces and
+present it in human readable form.
+
+Available commands:
+
+* Help message with all available commands and their arguments:
+
+ .. parsed-literal::
+
+ $ osprofiler -h/--help
+
+* OSProfiler version:
+
+ .. parsed-literal::
+
+ $ osprofiler -v/--version
+
+* Results of profiling can be obtained in JSON (option: ``--json``) and HTML
+ (option: ``--html``) formats:
+
+ .. parsed-literal::
+
+ $ osprofiler trace show --json/--html
+
+ hint: option ``--out`` will redirect result of ``osprofiler trace show``
+ in specified file:
+
+ .. parsed-literal::
+
+ $ osprofiler trace show --json/--html --out /path/to/file
+
+* In latest versions of OSProfiler with storage drivers (e.g. MongoDB (URI:
+ ``mongodb://``), Messaging (URI: ``messaging://``))
+ ``--connection-string`` parameter should be set up:
+
+ .. parsed-literal::
+
+ $ osprofiler trace show --connection-string= --json/--html
diff --git a/doc/source/user/background.rst b/doc/source/user/background.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5845d9cb1ac093d3d1740829bc4c0f42e68a3d6d
--- /dev/null
+++ b/doc/source/user/background.rst
@@ -0,0 +1,32 @@
+==========
+Background
+==========
+
+OpenStack consists of multiple projects. Each project, in turn, is composed of
+multiple services. To process some request, e.g. to boot a virtual machine,
+OpenStack uses multiple services from different projects. In the case something
+works too slow, it's extremely complicated to understand what exactly goes
+wrong and to locate the bottleneck.
+
+To resolve this issue, we introduce a tiny but powerful library,
+**osprofiler**, that is going to be used by all OpenStack projects and their
+python clients. It generates 1 trace per request, that goes through
+all involved services, and builds a tree of calls.
+
+Why not cProfile and etc?
+-------------------------
+
+**The scope of this library is quite different:**
+
+* We are interested in getting one trace of points from different services,
+ not tracing all Python calls inside one process.
+
+* This library should be easy integrable into OpenStack. This means that:
+
+ * It shouldn't require too many changes in code bases of projects it's
+ integrated with.
+
+ * We should be able to fully turn it off.
+
+ * We should be able to keep it turned on in lazy mode in production
+ (e.g. admin should be able to "trace" on request).
diff --git a/doc/source/user/collectors.rst b/doc/source/user/collectors.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a2c11e3e67adc992e3e3a8bb2cdd37e1718156e4
--- /dev/null
+++ b/doc/source/user/collectors.rst
@@ -0,0 +1,73 @@
+==========
+Collectors
+==========
+
+There are a number of drivers to support different collector backends:
+
+Redis
+-----
+
+* Overview
+
+ The Redis driver allows profiling data to be collected into a redis
+ database instance. The traces are stored as key-value pairs where the
+ key is a string built using trace ids and timestamps and the values
+ are JSON strings containing the trace information. A second driver is
+ included to use Redis Sentinel in addition to single node Redis.
+
+* Capabilities
+
+ * Write trace data to the database.
+ * Query Traces in database: This allows for pulling trace data
+ querying on the keys used to save the data in the database.
+ * Generate a report based on the traces stored in the database.
+ * Supports use of Redis Sentinel for robustness.
+
+* Usage
+
+ The driver is used by OSProfiler when using a connection-string URL
+ of the form redis://[:password]@host[:port][/db]. To use the Sentinel version
+ use a connection-string of the form
+ redissentinel://[:password]@host[:port][/db]
+
+* Configuration
+
+ * No config changes are required by for the base Redis driver.
+ * There are two configuration options for the Redis Sentinel driver:
+
+ * socket_timeout: specifies the sentinel connection socket timeout
+ value. Defaults to: 0.1 seconds
+ * sentinel_service_name: The name of the Sentinel service to use.
+ Defaults to: "mymaster"
+
+SQLAlchemy
+----------
+
+The SQLAlchemy collector allows you to store profiling data into a database
+supported by SQLAlchemy.
+
+Usage
+=====
+To use the driver, the `connection_string` in the `[osprofiler]` config section
+needs to be set to a connection string that `SQLAlchemy understands`_
+For example::
+
+ [osprofiler]
+ connection_string = mysql+pymysql://username:password@192.168.192.81/profiler?charset=utf8
+
+where `username` is the database username, `password` is the database password,
+`192.168.192.81` is the database IP address and `profiler` is the database name.
+
+The database (in this example called `profiler`) needs to be created manually and
+the database user (in this example called `username`) needs to have priviliges
+to create tables and select and insert rows.
+
+.. note::
+
+ SQLAlchemy collector requires database JSON data type support.
+ This type of data is supported by versions listed below or higher:
+
+ - MariaDB 10.2
+ - MySQL 5.7.8
+
+.. _SQLAlchemy understands: https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
diff --git a/doc/source/user/history.rst b/doc/source/user/history.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f69be70b3cb6fb7e7a4908bbf7ffcc618b86f6b0
--- /dev/null
+++ b/doc/source/user/history.rst
@@ -0,0 +1 @@
+.. include:: ../../../ChangeLog
diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fe317da93ac6a79711e7a7aeb258f667f9f1834d
--- /dev/null
+++ b/doc/source/user/index.rst
@@ -0,0 +1,27 @@
+================
+Using OSProfiler
+================
+
+OSProfiler provides a tiny but powerful library that is used by
+most (soon to be all) OpenStack projects and their python clients. It
+provides functionality to generate 1 trace per request, that goes
+through all involved services. This trace can then be extracted and used
+to build a tree of calls which can be quite handy for a variety of
+reasons (for example in isolating cross-project performance issues).
+
+.. toctree::
+ :maxdepth: 2
+
+ background
+ api
+ integration
+ collectors
+ similar_projects
+
+Release Notes
+=============
+
+.. toctree::
+ :maxdepth: 1
+
+ history
diff --git a/doc/source/user/integration.rst b/doc/source/user/integration.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ff2ab51cfcf605c4db7fe7539c82e460570b454c
--- /dev/null
+++ b/doc/source/user/integration.rst
@@ -0,0 +1,134 @@
+===========
+Integration
+===========
+
+There are 4 topics related to integration OSprofiler & `OpenStack`_:
+
+What we should use as a centralized collector?
+----------------------------------------------
+
+We primarily decided to use `Ceilometer`_, because:
+
+* It's already integrated in OpenStack, so it's quite simple to send
+ notifications to it from all projects.
+
+* There is an OpenStack API in Ceilometer that allows us to retrieve all
+ messages related to one trace. Take a look at
+ *osprofiler.drivers.ceilometer.Ceilometer:get_report*
+
+In OSProfiler starting with 1.4.0 version other options (MongoDB driver in
+1.4.0 release, Elasticsearch driver added later, etc.) are also available.
+
+
+How to setup profiler notifier?
+-------------------------------
+
+We primarily decided to use oslo.messaging Notifier API, because:
+
+* `oslo.messaging`_ is integrated in all projects
+
+* It's the simplest way to send notification to Ceilometer, take a
+ look at: *osprofiler.drivers.messaging.Messaging:notify* method
+
+* We don't need to add any new `CONF`_ options in projects
+
+In OSProfiler starting with 1.4.0 version other options (MongoDB driver in
+1.4.0 release, Elasticsearch driver added later, etc.) are also available.
+
+How to initialize profiler, to get one trace across all services?
+-----------------------------------------------------------------
+
+To enable cross service profiling we actually need to do send from caller
+to callee (base_id & trace_id). So callee will be able to init its profiler
+with these values.
+
+In case of OpenStack there are 2 kinds of interaction between 2 services:
+
+* REST API
+
+ It's well known that there are python clients for every project,
+ that generate proper HTTP requests, and parse responses to objects.
+
+ These python clients are used in 2 cases:
+
+ * User access -> OpenStack
+
+ * Service from Project 1 would like to access Service from Project 2
+
+
+ So what we need is to:
+
+ * Put in python clients headers with trace info (if profiler is inited)
+
+ * Add `OSprofiler WSGI middleware`_ to your service, this initializes
+ the profiler, if and only if there are special trace headers, that
+ are signed by one of the HMAC keys from api-paste.ini (if multiple
+ keys exist the signing process will continue to use the key that was
+ accepted during validation).
+
+ * The common items that are used to configure the middleware are the
+ following (these can be provided when initializing the middleware
+ object or when setting up the api-paste.ini file)::
+
+ hmac_keys = KEY1, KEY2 (can be a single key as well)
+
+ Actually the algorithm is a bit more complex. The Python client will
+ also sign the trace info with a `HMAC`_ key (lets call that key ``A``)
+ passed to profiler.init, and on reception the WSGI middleware will
+ check that it's signed with *one of* the HMAC keys (the wsgi
+ server should have key ``A`` as well, but may also have keys ``B``
+ and ``C``) that are specified in api-paste.ini. This ensures that only
+ the user that knows the HMAC key ``A`` in api-paste.ini can init a
+ profiler properly and send trace info that will be actually
+ processed. This ensures that trace info that is sent in that
+ does **not** pass the HMAC validation will be discarded. **NOTE:** The
+ application of many possible *validation* keys makes it possible to
+ roll out a key upgrade in a non-impactful manner (by adding a key into
+ the list and rolling out that change and then removing the older key at
+ some time in the future).
+
+* RPC API
+
+ RPC calls are used for interaction between services of one project.
+ It's well known that projects are using `oslo.messaging`_ to deal with
+ RPC. It's very good, because projects deal with RPC in similar way.
+
+ So there are 2 required changes:
+
+ * On callee side put in request context trace info (if profiler was
+ initialized)
+
+ * On caller side initialize profiler, if there is trace info in request
+ context.
+
+ * Trace all methods of callee API (can be done via profiler.trace_cls).
+
+
+What points should be tracked by default?
+-----------------------------------------
+
+I think that for all projects we should include by default 5 kinds of points:
+
+* All HTTP calls - helps to get information about: what HTTP requests were
+ done, duration of calls (latency of service), information about projects
+ involved in request.
+
+* All RPC calls - helps to understand duration of parts of request related
+ to different services in one project. This information is essential to
+ understand which service produce the bottleneck.
+
+* All DB API calls - in some cases slow DB query can produce bottleneck. So
+ it's quite useful to track how much time request spend in DB layer.
+
+* All driver calls - in case of nova, cinder and others we have vendor
+ drivers. Duration
+
+* ALL SQL requests (turned off by default, because it produce a lot of
+ traffic)
+
+.. _CONF: https://docs.openstack.org/oslo.config/latest/
+.. _HMAC: https://en.wikipedia.org/wiki/Hash-based_message_authentication_code
+.. _OpenStack: https://www.openstack.org/
+.. _Ceilometer: https://wiki.openstack.org/wiki/Ceilometer
+.. _oslo.messaging: https://pypi.org/project/oslo.messaging
+.. _OSprofiler WSGI middleware: https://github.com/openstack/osprofiler/blob/master/osprofiler/web.py
diff --git a/doc/source/user/similar_projects.rst b/doc/source/user/similar_projects.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7b449ac7224abd073675eca4dedeacd2ed2a35ae
--- /dev/null
+++ b/doc/source/user/similar_projects.rst
@@ -0,0 +1,20 @@
+================
+Similar projects
+================
+
+Other projects (some alive, some abandoned, some research prototypes)
+that are similar (in idea and ideal to OSprofiler).
+
+* `Zipkin`_
+* `Dapper`_
+* `Tomograph`_
+* `HTrace`_
+* `Jaeger`_
+* `OpenTracing`_
+
+.. _Zipkin: https://zipkin.io/
+.. _Dapper: http://research.google.com/pubs/pub36356.html
+.. _Tomograph: https://github.com/stackforge/tomograph
+.. _HTrace: https://htrace.incubator.apache.org/
+.. _Jaeger: https://uber.github.io/jaeger/
+.. _OpenTracing: https://opentracing.io/
diff --git a/doc/specs/README.rst b/doc/specs/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3c22dd39c6d186dec1e2e55cd5b82a112d933d97
--- /dev/null
+++ b/doc/specs/README.rst
@@ -0,0 +1,11 @@
+OSProfiler Specs
+================
+
+Specs are detailed description of proposed changes in project. Usually they
+answer on what, why, how to change in project and who is going to work on
+change.
+
+This directory contains 2 subdirectories:
+
+- in-progress - These specs are approved, but they are not implemented yet
+- implemented - Implemented specs archive
diff --git a/doc/specs/implemented/README.rst b/doc/specs/implemented/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6d86ff80eae583226214cc8193392be5e18f161a
--- /dev/null
+++ b/doc/specs/implemented/README.rst
@@ -0,0 +1,8 @@
+OSprofiler Implemented Specs
+============================
+
+Specs are detailed description of proposed changes in project. Usually they
+answer on what, why, how to change in project and who is going to work on
+change.
+
+This directory contains files with implemented specs, 1 file is 1 spec.
diff --git a/doc/specs/implemented/make_paste_ini_config_optional.rst b/doc/specs/implemented/make_paste_ini_config_optional.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1d091d314bc15e4caa25b7583612bde768bb8b7a
--- /dev/null
+++ b/doc/specs/implemented/make_paste_ini_config_optional.rst
@@ -0,0 +1,82 @@
+..
+ This work is licensed under a Creative Commons Attribution 3.0 Unported
+ License.
+
+ http://creativecommons.org/licenses/by/3.0/legalcode
+
+..
+ This template should be in ReSTructured text. The filename in the git
+ repository should match the launchpad URL, for example a URL of
+ https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named
+ awesome-thing.rst . Please do not delete any of the sections in this
+ template. If you have nothing to say for a whole section, just write: None
+ For help with syntax, see http://www.sphinx-doc.org/en/stable/rest.html
+ To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html
+
+======================================
+ Make api-paste.ini Arguments Optional
+======================================
+
+Problem description
+===================
+
+Integration of OSprofiler with OpenStack projects is harder than it should be,
+it requires keeping part of arguments inside api-paste.ini files and part in
+projects.conf file.
+
+We should make all configuration options from api-paste.ini file optional
+and add alternative way to configure osprofiler.web.WsgiMiddleware
+
+
+Proposed change
+===============
+
+Integration of OSprofiler requires 2 changes in api-paste.ini file:
+
+- One is adding osprofiler.web.WsgiMiddleware to pipelines:
+ https://github.com/openstack/cinder/blob/master/etc/cinder/api-paste.ini#L13
+
+- Another is to add it's arguments:
+ https://github.com/openstack/cinder/blob/master/etc/cinder/api-paste.ini#L31-L32
+
+ so WsgiMiddleware will be correctly initialized here:
+ https://github.com/openstack/osprofiler/blob/51761f375189bdc03b7e72a266ad0950777f32b1/osprofiler/web.py#L64
+
+We should make ``hmac_keys`` and ``enabled`` variable optional, create
+separated method from initialization of wsgi middleware and cut new release.
+After that remove
+
+
+Alternatives
+------------
+
+None.
+
+
+Implementation
+==============
+
+Assignee(s)
+-----------
+
+Primary assignee:
+ dbelova
+
+Work Items
+----------
+
+- Modify osprofiler.web.WsgiMiddleware to make ``hmac_keys`` optional (done)
+
+- Add alternative way to setup osprofiler.web.WsgiMiddleware, e.g. extra
+ argument hmac_keys to enable() method (done)
+
+- Cut new release 0.3.1 (tbd)
+
+- Fix the code in all projects: remove api-paste.ini arguments and use
+ osprofiler.web.enable with extra argument (tbd)
+
+
+Dependencies
+============
+
+- Cinder, Glance, Trove - projects should be fixed
diff --git a/doc/specs/implemented/multi_backend_support.rst b/doc/specs/implemented/multi_backend_support.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bd962985fe3f473fe68aa0e0941087cdd82aa18b
--- /dev/null
+++ b/doc/specs/implemented/multi_backend_support.rst
@@ -0,0 +1,91 @@
+..
+ This work is licensed under a Creative Commons Attribution 3.0 Unported
+ License.
+
+ http://creativecommons.org/licenses/by/3.0/legalcode
+
+..
+ This template should be in ReSTructured text. The filename in the git
+ repository should match the launchpad URL, for example a URL of
+ https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named
+ awesome-thing.rst . Please do not delete any of the sections in this
+ template. If you have nothing to say for a whole section, just write: None
+ For help with syntax, see http://www.sphinx-doc.org/en/stable/rest.html
+ To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html
+
+=====================
+Multi backend support
+=====================
+
+Make OSProfiler more flexible and production ready.
+
+Problem description
+===================
+
+Currently OSprofiler works only with one backend Ceilometer which actually
+doesn't work well and adds huge overhead. More over often Ceilometer is not
+installed/used at all. To resolve this we should add support for different
+backends like: MongoDB, InfluxDB, ElasticSearch, ...
+
+
+Proposed change
+===============
+
+And new osprofiler.drivers mechanism, each driver will do 2 things:
+send notifications and parse all notification in unified tree structure
+that can be processed by the REST lib.
+
+Deprecate osprofiler.notifiers and osprofiler.parsers
+
+Change all projects that are using OSprofiler to new model
+
+Alternatives
+------------
+
+I don't know any good alternative.
+
+Implementation
+==============
+
+Assignee(s)
+-----------
+
+Primary assignees:
+ dbelova
+ ayelistratov
+
+
+Work Items
+----------
+
+To add support of multi backends we should change few places in osprofiler
+that are hardcoded on Ceilometer:
+
+- CLI command ``show``:
+
+ I believe we should add extra argument "connection_string" which will allow
+ people to specify where is backend. So it will look like:
+ ://[[user[:password]]@[address][:port][/database]]
+
+- Merge osprofiler.notifiers and osprofiler.parsers to osprofiler.drivers
+
+ Notifiers and Parsers are tightly related. Like for MongoDB notifier you
+ should use MongoDB parsers, so there is better solution to keep both
+ in the same place.
+
+ This change should be done with keeping backward compatibility,
+ in other words
+ we should create separated directory osprofiler.drivers and put first
+ Ceilometer and then start working on other backends.
+
+ These drivers will be chosen based on connection string
+
+- Deprecate osprofiler.notifiers and osprofiler.parsers
+
+- Switch all projects to new model with connection string
+
+
+Dependencies
+============
+
+- Cinder, Glance, Trove, Heat should be changed
diff --git a/doc/specs/in-progress/README.rst b/doc/specs/in-progress/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b17b7d3b9887d02923e6ae9e5f281437ddaf2f66
--- /dev/null
+++ b/doc/specs/in-progress/README.rst
@@ -0,0 +1,9 @@
+OSprofiler In-Progress Specs
+============================
+
+Specs are detailed description of proposed changes in project. Usually they
+answer on what, why, how to change in project and who is going to work on
+change.
+
+This directory contains files with accepted by not implemented specs,
+1 file is 1 spec.
diff --git a/doc/specs/in-progress/better_devstack_integration.rst b/doc/specs/in-progress/better_devstack_integration.rst
new file mode 100644
index 0000000000000000000000000000000000000000..22d6a83c5fbf084e0f107ed1c686423bbf4d13a5
--- /dev/null
+++ b/doc/specs/in-progress/better_devstack_integration.rst
@@ -0,0 +1,63 @@
+..
+ This work is licensed under a Creative Commons Attribution 3.0 Unported
+ License.
+
+ http://creativecommons.org/licenses/by/3.0/legalcode
+
+..
+ This template should be in ReSTructured text. The filename in the git
+ repository should match the launchpad URL, for example a URL of
+ https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named
+ awesome-thing.rst . Please do not delete any of the sections in this
+ template. If you have nothing to say for a whole section, just write: None
+ For help with syntax, see http://www.sphinx-doc.org/en/stable/rest.html
+ To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html
+
+============================
+ Better DevStack Integration
+============================
+
+Make it simple to enable OSprofiler like it is simple to enable DEBUG log level
+
+Problem description
+===================
+
+It's hard to turn on OSProfiler in DevStack, you have to change
+notification_topic and enable Ceilometer and in future do other magic.
+As well if something is done wrong it's hard to debug
+
+
+Proposed change
+===============
+
+Make a single argument: PROFILING=True/False
+
+Alternatives
+------------
+
+Do nothing and keep things hard.
+
+Implementation
+==============
+
+Assignee(s)
+-----------
+
+Primary assignee:
+ boris-42
+
+
+Work Items
+----------
+
+- Make DevStack plugin for OSprofiler
+
+- Configure Ceilometer
+
+- Configure services that support OSprofiler
+
+
+Dependencies
+============
+
+- DevStack
diff --git a/doc/specs/in-progress/integration_testing.rst b/doc/specs/in-progress/integration_testing.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f48e153fbc6b37e8983c627ed66a246f5554ba15
--- /dev/null
+++ b/doc/specs/in-progress/integration_testing.rst
@@ -0,0 +1,63 @@
+..
+ This work is licensed under a Creative Commons Attribution 3.0 Unported
+ License.
+
+ http://creativecommons.org/licenses/by/3.0/legalcode
+
+..
+ This template should be in ReSTructured text. The filename in the git
+ repository should match the launchpad URL, for example a URL of
+ https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named
+ awesome-thing.rst . Please do not delete any of the sections in this
+ template. If you have nothing to say for a whole section, just write: None
+ For help with syntax, see http://www.sphinx-doc.org/en/stable/rest.html
+ To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html
+
+===================
+Integration Testing
+===================
+
+We should create DSVM job that check that proposed changes in OSprofiler
+don't break projects that are using OSProfiler.
+
+
+Problem description
+===================
+
+Currently we don't have CI for testing that OSprofiler changes are backward
+compatible and don't break projects that are using OSprofiler. In other words
+without this job each time when we are releasing OSProfiler we can break
+some of OpenStack projects which is quite bad.
+
+Proposed change
+===============
+
+Create DSVM job that will install OSprofiler with proposed patch instead of
+the latest release and run some basic tests.
+
+Alternatives
+------------
+
+Do nothing and break the OpenStack..
+
+Implementation
+==============
+
+Assignee(s)
+-----------
+
+Primary assignee:
+
+
+
+Work Items
+----------
+
+- Create DSVM job
+- Run Rally tests to make sure that everything works
+
+
+Dependencies
+============
+
+None
diff --git a/doc/specs/template.rst b/doc/specs/template.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9bb293e9c703e58479b3272da88c7d2cf46447b9
--- /dev/null
+++ b/doc/specs/template.rst
@@ -0,0 +1,78 @@
+..
+ This work is licensed under a Creative Commons Attribution 3.0 Unported
+ License.
+
+ http://creativecommons.org/licenses/by/3.0/legalcode
+
+..
+ This template should be in ReSTructured text. The filename in the git
+ repository should match the launchpad URL, for example a URL of
+ https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named
+ awesome-thing.rst . Please do not delete any of the sections in this
+ template. If you have nothing to say for a whole section, just write: None
+ For help with syntax, see http://www.sphinx-doc.org/en/stable/rest.html
+ To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html
+
+=======================
+ The title of your Spec
+=======================
+
+Introduction paragraph -- why are we doing anything?
+
+Problem description
+===================
+
+A detailed description of the problem.
+
+Proposed change
+===============
+
+Here is where you cover the change you propose to make in detail. How do you
+propose to solve this problem?
+
+If this is one part of a larger effort make it clear where this piece ends. In
+other words, what's the scope of this effort?
+
+Include where in the heat tree hierarchy this will reside.
+
+Alternatives
+------------
+
+This is an optional section, where it does apply we'd just like a demonstration
+that some thought has been put into why the proposed approach is the best one.
+
+Implementation
+==============
+
+Assignee(s)
+-----------
+
+Who is leading the writing of the code? Or is this a blueprint where you're
+throwing it out there to see who picks it up?
+
+If more than one person is working on the implementation, please designate the
+primary author and contact.
+
+Primary assignee:
+
+
+Can optionally can list additional ids if they intend on doing
+substantial implementation work on this blueprint.
+
+Work Items
+----------
+
+Work items or tasks -- break the feature up into the things that need to be
+done to implement it. Those parts might end up being done by different people,
+but we're mostly trying to understand the timeline for implementation.
+
+
+Dependencies
+============
+
+- Include specific references to specs and/or blueprints in heat, or in other
+ projects, that this one either depends on or is related to.
+
+- Does this feature require any new library dependencies or code otherwise not
+ included in OpenStack? Or does it depend on a specific version of library?
+
diff --git a/lower-constraints.txt b/lower-constraints.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2ced6ac7e1fd47a0ec5d4f57be9899c6680c7ef4
--- /dev/null
+++ b/lower-constraints.txt
@@ -0,0 +1,22 @@
+coverage===4.0
+ddt===1.0.1
+dulwich===0.15.0
+elasticsearch===2.0.0
+importlib_metadata==1.7.0
+jaeger-client==3.8.0
+netaddr===0.7.18
+openstackdocstheme==2.2.1
+oslo.concurrency===3.26.0
+oslo.config===5.2.0
+oslo.serialization===2.18.0
+oslo.utils===3.33.0
+PrettyTable===0.7.2
+pymongo===3.0.2
+redis===2.10.0
+reno==3.1.0
+requests===2.14.2
+six===1.10.0
+Sphinx===2.0.0
+stestr==2.0.0
+testtools===2.2.0
+WebOb===1.7.1
diff --git a/osprofiler/__init__.py b/osprofiler/__init__.py
index 19220d2c44e7d1f3b79407b96bdc7489c0f366af..e9adf59ae88b2f6ad60b4c7ef443aa4aef489c07 100644
--- a/osprofiler/__init__.py
+++ b/osprofiler/__init__.py
@@ -13,19 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
-
-from six.moves import configparser
-
-from osprofiler import _utils as utils
-
-
-utils.import_modules_from_package("osprofiler._notifiers")
-
-_conf = configparser.ConfigParser()
-_conf.read(os.path.join(
- os.path.dirname(os.path.dirname(__file__)), 'setup.cfg'))
try:
- __version__ = _conf.get('metadata', 'version')
-except (configparser.NoOptionError, configparser.NoSectionError):
- __version__ = None
+ # For Python 3.8 and later
+ import importlib.metadata as importlib_metadata
+except ImportError:
+ # For everyone else
+ import importlib_metadata
+
+__version__ = importlib_metadata.version("osprofiler")
diff --git a/osprofiler/_notifiers/base.py b/osprofiler/_notifiers/base.py
deleted file mode 100644
index cf0fa64572f12fab69c38811394f3d2dca79a32f..0000000000000000000000000000000000000000
--- a/osprofiler/_notifiers/base.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2014 Mirantis Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from osprofiler import _utils as utils
-
-
-class Notifier(object):
-
- def notify(self, info, context=None):
- """This method will be called on each notifier.notify() call.
-
- To add new drivers you should, create new subclass of this class and
- implement notify method.
-
- :param info: Contains information about trace element.
- In payload dict there are always 3 ids:
- "base_id" - uuid that is common for all notifications
- related to one trace. Used to simplify
- retrieving of all trace elements from
- Ceilometer.
- "parent_id" - uuid of parent element in trace
- "trace_id" - uuid of current element in trace
-
- With parent_id and trace_id it's quite simple to build
- tree of trace elements, which simplify analyze of trace.
-
- :param context: request context that is mostly used to specify
- current active user and tenant.
- """
-
- @staticmethod
- def factory(name, *args, **kwargs):
- for driver in utils.itersubclasses(Notifier):
- if name == driver.__name__:
- return driver(*args, **kwargs).notify
-
- raise TypeError("There is no driver, with name: %s" % name)
diff --git a/osprofiler/_notifiers/messaging.py b/osprofiler/_notifiers/messaging.py
deleted file mode 100644
index ee19c83cd64994bbdede5206280124070f844694..0000000000000000000000000000000000000000
--- a/osprofiler/_notifiers/messaging.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014 Mirantis Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from osprofiler._notifiers import base
-
-
-class Messaging(base.Notifier):
-
- def __init__(self, messaging, context, transport, project, service, host):
- """Init Messaging notify driver.
-
- """
- super(Messaging, self).__init__()
- self.messaging = messaging
- self.context = context
- self.project = project
- self.service = service
-
- self.notifier = messaging.Notifier(
- transport, publisher_id=host, driver="messaging",
- topic="profiler", retry=0)
-
- def notify(self, info, context=None):
- """Send notifications to Ceilometer via oslo.messaging notifier API.
-
- :param info: Contains information about trace element.
- In payload dict there are always 3 ids:
- "base_id" - uuid that is common for all notifications
- related to one trace. Used to simplify
- retrieving of all trace elements from
- Ceilometer.
- "parent_id" - uuid of parent element in trace
- "trace_id" - uuid of current element in trace
-
- With parent_id and trace_id it's quite simple to build
- tree of trace elements, which simplify analyze of trace.
-
- :param context: request context that is mostly used to specify
- current active user and tenant.
- """
-
- info["project"] = self.project
- info["service"] = self.service
- self.notifier.info(context or self.context,
- "profiler.%s" % self.service, info)
diff --git a/osprofiler/_utils.py b/osprofiler/_utils.py
index 02fdcb8fdf031c728b61a915a9782e2e65b30bb6..c23d6008be38fcb65873bc78609aaa8b8d5e931b 100644
--- a/osprofiler/_utils.py
+++ b/osprofiler/_utils.py
@@ -18,33 +18,12 @@ import hashlib
import hmac
import json
import os
+import uuid
+from oslo_utils import secretutils
+from oslo_utils import uuidutils
import six
-try:
- # Only in python 2.7.7+ (and python 3.3+)
- # https://docs.python.org/2/library/hmac.html#hmac.compare_digest
- from hmac import compare_digest # noqa
-except (AttributeError, ImportError):
- # Taken/slightly modified from:
- # https://mail.python.org/pipermail/python-checkins/2012-June/114532.html
- def compare_digest(a, b):
- """Returns the equivalent of 'a == b', but avoids content based short
- circuiting to reduce the vulnerability to timing attacks.
- """
- # We assume the length of the expected digest is public knowledge,
- # thus this early return isn't leaking anything an attacker wouldn't
- # already know
- if len(a) != len(b):
- return False
-
- # We assume that integers in the bytes range are all cached,
- # thus timing shouldn't vary much due to integer object creation
- result = 0
- for x, y in zip(a, b):
- result |= ord(x) ^ ord(y)
- return result == 0
-
def split(text, strip=True):
"""Splits a comma separated text blob into its components.
@@ -61,7 +40,7 @@ def split(text, strip=True):
return text.split(",")
-def binary_encode(text, encoding='utf-8'):
+def binary_encode(text, encoding="utf-8"):
"""Converts a string of into a binary type using given encoding.
Does nothing if text not unicode string.
@@ -74,7 +53,7 @@ def binary_encode(text, encoding='utf-8'):
raise TypeError("Expected binary or string type")
-def binary_decode(data, encoding='utf-8'):
+def binary_decode(data, encoding="utf-8"):
"""Converts a binary type into a text type using given encoding.
Does nothing if data is already unicode string.
@@ -126,14 +105,14 @@ def signed_unpack(data, hmac_data, hmac_keys):
for hmac_key in hmac_keys:
try:
user_hmac_data = generate_hmac(data, hmac_key)
- except Exception:
+ except Exception: # nosec
pass
else:
- if compare_digest(hmac_data, user_hmac_data):
+ if secretutils.constant_time_compare(hmac_data, user_hmac_data):
try:
contents = json.loads(
binary_decode(base64.urlsafe_b64decode(data)))
- contents['hmac_key'] = hmac_key
+ contents["hmac_key"] = hmac_key
return contents
except Exception:
return None
@@ -161,12 +140,25 @@ def import_modules_from_package(package):
:param: package - Full package name. For example: rally.deploy.engines
"""
- path = [os.path.dirname(__file__), '..'] + package.split('.')
+ path = [os.path.dirname(__file__), ".."] + package.split(".")
path = os.path.join(*path)
for root, dirs, files in os.walk(path):
for filename in files:
- if filename.startswith('__') or not filename.endswith('.py'):
+ if filename.startswith("__") or not filename.endswith(".py"):
continue
new_package = ".".join(root.split(os.sep)).split("....")[1]
- module_name = '%s.%s' % (new_package, filename[:-3])
+ module_name = "%s.%s" % (new_package, filename[:-3])
__import__(module_name)
+
+
+def shorten_id(span_id):
+ """Convert from uuid4 to 64 bit id for OpenTracing"""
+ int64_max = (1 << 64) - 1
+ if isinstance(span_id, six.integer_types):
+ return span_id & int64_max
+ try:
+ short_id = uuid.UUID(span_id).int & int64_max
+ except ValueError:
+ # Return a new short id for this
+ short_id = shorten_id(uuidutils.generate_uuid())
+ return short_id
diff --git a/osprofiler/cmd/cliutils.py b/osprofiler/cmd/cliutils.py
index ded95bdc31b7b94edcda90e25985f5afd763a6d9..bcd413bbdbecd4e7cd521622017836df263540bd 100644
--- a/osprofiler/cmd/cliutils.py
+++ b/osprofiler/cmd/cliutils.py
@@ -25,7 +25,7 @@ def env(*args, **kwargs):
value = os.environ.get(arg)
if value:
return value
- return kwargs.get('default', '')
+ return kwargs.get("default", "")
def arg(*args, **kwargs):
@@ -46,7 +46,7 @@ def arg(*args, **kwargs):
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
- if not hasattr(func, 'arguments'):
+ if not hasattr(func, "arguments"):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
diff --git a/osprofiler/cmd/commands.py b/osprofiler/cmd/commands.py
index 99a3acb7114effda12da2f94a1357fa784811dcb..c4750a53b7d4787f6ed859fac556de8966045e0d 100644
--- a/osprofiler/cmd/commands.py
+++ b/osprofiler/cmd/commands.py
@@ -16,9 +16,14 @@
import json
import os
+from oslo_utils import encodeutils
+from oslo_utils import uuidutils
+import prettytable
+import six
+
from osprofiler.cmd import cliutils
-from osprofiler.cmd import exc
-from osprofiler.parsers import ceilometer as ceiloparser
+from osprofiler.drivers import base
+from osprofiler import exc
class BaseCommand(object):
@@ -28,57 +33,162 @@ class BaseCommand(object):
class TraceCommands(BaseCommand):
group_name = "trace"
- @cliutils.arg('trace_id', help='trace id')
- @cliutils.arg('--json', dest='use_json', action='store_true',
- help='show trace in JSON')
- @cliutils.arg('--html', dest='use_html', action='store_true',
- help='show trace in HTML')
- @cliutils.arg('--out', dest='file_name', help='save output in file')
+ @cliutils.arg("trace", help="File with trace or trace id")
+ @cliutils.arg("--connection-string", dest="conn_str",
+ default=(cliutils.env("OSPROFILER_CONNECTION_STRING")),
+ help="Storage driver's connection string. Defaults to "
+ "env[OSPROFILER_CONNECTION_STRING] if set")
+ @cliutils.arg("--transport-url", dest="transport_url",
+ help="Oslo.messaging transport URL (for messaging:// driver "
+ "only), e.g. rabbit://user:password@host:5672/")
+ @cliutils.arg("--idle-timeout", dest="idle_timeout", type=int, default=1,
+ help="How long to wait for the trace to finish, in seconds "
+ "(for messaging:// driver only)")
+ @cliutils.arg("--json", dest="use_json", action="store_true",
+ help="show trace in JSON")
+ @cliutils.arg("--html", dest="use_html", action="store_true",
+ help="show trace in HTML")
+ @cliutils.arg("--local-libs", dest="local_libs", action="store_true",
+ help="use local static files of html in /libs/")
+ @cliutils.arg("--dot", dest="use_dot", action="store_true",
+ help="show trace in DOT language")
+ @cliutils.arg("--render-dot", dest="render_dot_filename",
+ help="filename for rendering the dot graph in pdf format")
+ @cliutils.arg("--out", dest="file_name", help="save output in file")
def show(self, args):
- """Displays trace-results by given trace id in HTML or JSON format."""
- try:
- import ceilometerclient.client
- import ceilometerclient.exc
- import ceilometerclient.shell
- except ImportError:
- raise ImportError(
- "To use this command, you should install 'ceilometerclient' "
- "manually. Use command:\n 'pip install ceilometerclient'.")
- try:
- client = ceilometerclient.client.get_client(
- args.ceilometer_api_version, **args.__dict__)
- notifications = ceiloparser.get_notifications(
- client, args.trace_id)
- except Exception as e:
- if hasattr(e, 'http_status') and e.http_status == 401:
- msg = "Invalid OpenStack Identity credentials."
- else:
- msg = "Something has gone wrong. See logs for more details."
+ """Display trace results in HTML, JSON or DOT format."""
- raise exc.CommandError(msg)
+ if not args.conn_str:
+ raise exc.CommandError(
+ "You must provide connection string via"
+ " either --connection-string or "
+ "via env[OSPROFILER_CONNECTION_STRING]")
- if not notifications:
- msg = ("Trace with UUID %s not found. "
- "There are 2 possible reasons: \n"
- " 1) You are using not admin credentials\n"
- " 2) You specified wrong trace id" % args.trace_id)
+ trace = None
+
+ if not uuidutils.is_uuid_like(args.trace):
+ trace = json.load(open(args.trace))
+ else:
+ try:
+ engine = base.get_driver(args.conn_str, **args.__dict__)
+ except Exception as e:
+ raise exc.CommandError(e.message)
+
+ trace = engine.get_report(args.trace)
+
+ if not trace or not trace.get("children"):
+ msg = ("Trace with UUID %s not found. Please check the HMAC key "
+ "used in the command." % args.trace)
raise exc.CommandError(msg)
- parsed_notifications = ceiloparser.parse_notifications(notifications)
+ # Since datetime.datetime is not JSON serializable by default,
+ # this method will handle that.
+ def datetime_json_serialize(obj):
+ if hasattr(obj, "isoformat"):
+ return obj.isoformat()
+ else:
+ return obj
if args.use_json:
- output = json.dumps(parsed_notifications)
+ output = json.dumps(trace, default=datetime_json_serialize,
+ separators=(",", ": "),
+ indent=2)
elif args.use_html:
with open(os.path.join(os.path.dirname(__file__),
"template.html")) as html_template:
output = html_template.read().replace(
- "$DATA", json.dumps(parsed_notifications))
+ "$DATA", json.dumps(trace, indent=4,
+ separators=(",", ": "),
+ default=datetime_json_serialize))
+ if args.local_libs:
+ output = output.replace("$LOCAL", "true")
+ else:
+ output = output.replace("$LOCAL", "false")
+ elif args.use_dot:
+ dot_graph = self._create_dot_graph(trace)
+ output = dot_graph.source
+ if args.render_dot_filename:
+ dot_graph.render(args.render_dot_filename, cleanup=True)
else:
raise exc.CommandError("You should choose one of the following "
- "output-formats: --json or --html.")
+ "output formats: json, html or dot.")
if args.file_name:
- with open(args.file_name, 'w+') as output_file:
+ with open(args.file_name, "w+") as output_file:
output_file.write(output)
else:
- print (output)
+ print(output)
+
+ def _create_dot_graph(self, trace):
+ try:
+ import graphviz
+ except ImportError:
+ raise exc.CommandError(
+ "graphviz library is required to use this option.")
+
+ dot = graphviz.Digraph(format="pdf")
+ next_id = [0]
+
+ def _create_node(info):
+ time_taken = info["finished"] - info["started"]
+ service = info["service"] + ":" if "service" in info else ""
+ name = info["name"]
+ label = "%s%s - %d ms" % (service, name, time_taken)
+
+ if name == "wsgi":
+ req = info["meta.raw_payload.wsgi-start"]["info"]["request"]
+ label = "%s\\n%s %s.." % (label, req["method"],
+ req["path"][:30])
+ elif name == "rpc" or name == "driver":
+ raw = info["meta.raw_payload.%s-start" % name]
+ fn_name = raw["info"]["function"]["name"]
+ label = "%s\\n%s" % (label, fn_name.split(".")[-1])
+
+ node_id = str(next_id[0])
+ next_id[0] += 1
+ dot.node(node_id, label)
+ return node_id
+
+ def _create_sub_graph(root):
+ rid = _create_node(root["info"])
+ for child in root["children"]:
+ cid = _create_sub_graph(child)
+ dot.edge(rid, cid)
+ return rid
+
+ _create_sub_graph(trace)
+ return dot
+
+ @cliutils.arg("--connection-string", dest="conn_str",
+ default=cliutils.env("OSPROFILER_CONNECTION_STRING"),
+ help="Storage driver's connection string. Defaults to "
+ "env[OSPROFILER_CONNECTION_STRING] if set")
+ @cliutils.arg("--error-trace", dest="error_trace",
+ type=bool, default=False,
+ help="List all traces that contain error.")
+ def list(self, args):
+ """List all traces"""
+ if not args.conn_str:
+ raise exc.CommandError(
+ "You must provide connection string via"
+ " either --connection-string or "
+ "via env[OSPROFILER_CONNECTION_STRING]")
+ try:
+ engine = base.get_driver(args.conn_str, **args.__dict__)
+ except Exception as e:
+ raise exc.CommandError(e.message)
+
+ fields = ("base_id", "timestamp")
+ pretty_table = prettytable.PrettyTable(fields)
+ pretty_table.align = "l"
+ if not args.error_trace:
+ traces = engine.list_traces(fields)
+ else:
+ traces = engine.list_error_traces()
+ for trace in traces:
+ row = [trace[field] for field in fields]
+ pretty_table.add_row(row)
+ if six.PY3:
+ print(encodeutils.safe_encode(pretty_table.get_string()).decode())
+ else:
+ print(encodeutils.safe_encode(pretty_table.get_string()))
diff --git a/osprofiler/cmd/shell.py b/osprofiler/cmd/shell.py
index c7113c89fbbb8c0458c66b14179210b9348195a5..628865947ae878fbcb238fe1679e75ad1fde82c7 100644
--- a/osprofiler/cmd/shell.py
+++ b/osprofiler/cmd/shell.py
@@ -18,46 +18,23 @@
Command-line interface to the OpenStack Profiler.
"""
+import argparse
import inspect
import sys
-import argparse
+from oslo_config import cfg
import osprofiler
-from osprofiler.cmd import cliutils
from osprofiler.cmd import commands
-from osprofiler.cmd import exc
+from osprofiler import exc
+from osprofiler import opts
class OSProfilerShell(object):
def __init__(self, argv):
args = self._get_base_parser().parse_args(argv)
-
- if not (args.os_auth_token and args.ceilometer_url):
- if not args.os_username:
- raise exc.CommandError(
- "You must provide a username via either --os-username or "
- "via env[OS_USERNAME]")
-
- if not args.os_password:
- raise exc.CommandError(
- "You must provide a password via either --os-password or "
- "via env[OS_PASSWORD]")
-
- if self._no_project_and_domain_set(args):
- # steer users towards Keystone V3 API
- raise exc.CommandError(
- "You must provide a project_id via either --os-project-id "
- "or via env[OS_PROJECT_ID] and a domain_name via either "
- "--os-user-domain-name or via env[OS_USER_DOMAIN_NAME] or "
- "a domain_id via either --os-user-domain-id or via "
- "env[OS_USER_DOMAIN_ID]")
-
- if not args.os_auth_url:
- raise exc.CommandError(
- "You must provide an auth url via either --os-auth-url or "
- "via env[OS_AUTH_URL]")
+ opts.set_defaults(cfg.CONF)
args.func(args)
@@ -68,150 +45,26 @@ class OSProfilerShell(object):
add_help=True
)
- parser.add_argument('-v', '--version',
- action='version',
+ parser.add_argument("-v", "--version",
+ action="version",
version=osprofiler.__version__)
- self._append_ceilometer_args(parser)
- self._append_identity_args(parser)
self._append_subcommands(parser)
return parser
- def _append_ceilometer_args(self, parent_parser):
- parser = parent_parser.add_argument_group('ceilometer')
- parser.add_argument(
- '--ceilometer-url', default=cliutils.env('CEILOMETER_URL'),
- help='Defaults to env[CEILOMETER_URL].')
- parser.add_argument(
- '--ceilometer-api-version',
- default=cliutils.env('CEILOMETER_API_VERSION', default='2'),
- help='Defaults to env[CEILOMETER_API_VERSION] or 2.')
-
- def _append_identity_args(self, parent_parser):
- # FIXME(fabgia): identity related parameters should be passed by the
- # Keystone client itself to avoid constant update in all the services
- # clients. When this fix is merged this method can be made obsolete.
- # Bug: https://bugs.launchpad.net/python-keystoneclient/+bug/1332337
- parser = parent_parser.add_argument_group('identity')
- parser.add_argument('-k', '--insecure',
- default=False,
- action='store_true',
- help="Explicitly allow osprofiler to "
- "perform \"insecure\" SSL (https) requests. "
- "The server's certificate will "
- "not be verified against any certificate "
- "authorities. This option should be used with "
- "caution.")
-
- # User related options
- parser.add_argument('--os-username',
- default=cliutils.env('OS_USERNAME'),
- help='Defaults to env[OS_USERNAME].')
-
- parser.add_argument('--os-user-id',
- default=cliutils.env('OS_USER_ID'),
- help='Defaults to env[OS_USER_ID].')
-
- parser.add_argument('--os-password',
- default=cliutils.env('OS_PASSWORD'),
- help='Defaults to env[OS_PASSWORD].')
-
- # Domain related options
- parser.add_argument('--os-user-domain-id',
- default=cliutils.env('OS_USER_DOMAIN_ID'),
- help='Defaults to env[OS_USER_DOMAIN_ID].')
-
- parser.add_argument('--os-user-domain-name',
- default=cliutils.env('OS_USER_DOMAIN_NAME'),
- help='Defaults to env[OS_USER_DOMAIN_NAME].')
-
- parser.add_argument('--os-project-domain-id',
- default=cliutils.env('OS_PROJECT_DOMAIN_ID'),
- help='Defaults to env[OS_PROJECT_DOMAIN_ID].')
-
- parser.add_argument('--os-project-domain-name',
- default=cliutils.env('OS_PROJECT_DOMAIN_NAME'),
- help='Defaults to env[OS_PROJECT_DOMAIN_NAME].')
-
- # Project V3 or Tenant V2 related options
- parser.add_argument('--os-project-id',
- default=cliutils.env('OS_PROJECT_ID'),
- help='Another way to specify tenant ID. '
- 'This option is mutually exclusive with '
- ' --os-tenant-id. '
- 'Defaults to env[OS_PROJECT_ID].')
-
- parser.add_argument('--os-project-name',
- default=cliutils.env('OS_PROJECT_NAME'),
- help='Another way to specify tenant name. '
- 'This option is mutually exclusive with '
- ' --os-tenant-name. '
- 'Defaults to env[OS_PROJECT_NAME].')
-
- parser.add_argument('--os-tenant-id',
- default=cliutils.env('OS_TENANT_ID'),
- help='This option is mutually exclusive with '
- ' --os-project-id. '
- 'Defaults to env[OS_PROJECT_ID].')
-
- parser.add_argument('--os-tenant-name',
- default=cliutils.env('OS_TENANT_NAME'),
- help='Defaults to env[OS_TENANT_NAME].')
-
- # Auth related options
- parser.add_argument('--os-auth-url',
- default=cliutils.env('OS_AUTH_URL'),
- help='Defaults to env[OS_AUTH_URL].')
-
- parser.add_argument('--os-auth-token',
- default=cliutils.env('OS_AUTH_TOKEN'),
- help='Defaults to env[OS_AUTH_TOKEN].')
-
- parser.add_argument('--os-cacert',
- metavar='',
- dest='os_cacert',
- default=cliutils.env('OS_CACERT'),
- help='Path of CA TLS certificate(s) used to verify'
- ' the remote server\'s certificate. Without this '
- 'option ceilometer looks for the default system CA'
- ' certificates.')
-
- parser.add_argument('--os-cert',
- help='Path of certificate file to use in SSL '
- 'connection. This file can optionally be '
- 'prepended with the private key.')
-
- parser.add_argument('--os-key',
- help='Path of client key to use in SSL '
- 'connection. This option is not necessary '
- 'if your key is prepended to your cert file.')
-
- # Service Catalog related options
- parser.add_argument('--os-service-type',
- default=cliutils.env('OS_SERVICE_TYPE'),
- help='Defaults to env[OS_SERVICE_TYPE].')
-
- parser.add_argument('--os-endpoint-type',
- default=cliutils.env('OS_ENDPOINT_TYPE'),
- help='Defaults to env[OS_ENDPOINT_TYPE].')
-
- parser.add_argument('--os-region-name',
- default=cliutils.env('OS_REGION_NAME'),
- help='Defaults to env[OS_REGION_NAME].')
-
def _append_subcommands(self, parent_parser):
- subcommands = parent_parser.add_subparsers(help='')
+ subcommands = parent_parser.add_subparsers(help="")
for group_cls in commands.BaseCommand.__subclasses__():
group_parser = subcommands.add_parser(group_cls.group_name)
subcommand_parser = group_parser.add_subparsers()
for name, callback in inspect.getmembers(
group_cls(), predicate=inspect.ismethod):
- command = name.replace('_', '-')
- desc = callback.__doc__ or ''
- help_message = desc.strip().split('\n')[0]
- arguments = getattr(callback, 'arguments', [])
+ command = name.replace("_", "-")
+ desc = callback.__doc__ or ""
+ help_message = desc.strip().split("\n")[0]
+ arguments = getattr(callback, "arguments", [])
command_parser = subcommand_parser.add_parser(
command, help=help_message, description=desc)
@@ -220,9 +73,9 @@ class OSProfilerShell(object):
command_parser.set_defaults(func=callback)
def _no_project_and_domain_set(self, args):
- if not (args.os_project_id or (args.os_project_name and
- (args.os_user_domain_name or args.os_user_domain_id)) or
- (args.os_tenant_id or args.os_tenant_name)):
+ if not (args.os_project_id or (args.os_project_name
+ and (args.os_user_domain_name or args.os_user_domain_id))
+ or (args.os_tenant_id or args.os_tenant_name)):
return True
else:
return False
@@ -235,7 +88,7 @@ def main(args=None):
try:
OSProfilerShell(args)
except exc.CommandError as e:
- print (e.message)
+ print(e.message)
return 1
diff --git a/osprofiler/cmd/template.html b/osprofiler/cmd/template.html
index a63f12dcbaa36b5c004ba0b57ba4ce72dd478ad3..9cf4c90245d7884967f9ce4fba801d6be9d56313 100644
--- a/osprofiler/cmd/template.html
+++ b/osprofiler/cmd/template.html
@@ -1,209 +1,332 @@
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
-
+ .trace tr td {
+ width: 14%;
+ white-space: nowrap;
+ padding: 2px;
+ border-right: 1px solid #EEE;
+ }
-
+
+
+
-
-
-
-
-
-
-
-
-
-
Levels
-
Duration
-
Type
-
Project
-
Service
-
Host
-
Details
-
-
-
+
+
+
+
+
+
+
+
+
+
Levels
+
Duration
+
Type
+
Project
+
Service
+
Host
+
Details
+
+
+
+
+
+
+
+
-
-
-
\ No newline at end of file
+
diff --git a/osprofiler/drivers/__init__.py b/osprofiler/drivers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..022b09408f9525116e500feaa3ecd0c19e00b4ab
--- /dev/null
+++ b/osprofiler/drivers/__init__.py
@@ -0,0 +1,8 @@
+from osprofiler.drivers import base # noqa
+from osprofiler.drivers import elasticsearch_driver # noqa
+from osprofiler.drivers import jaeger # noqa
+from osprofiler.drivers import loginsight # noqa
+from osprofiler.drivers import messaging # noqa
+from osprofiler.drivers import mongodb # noqa
+from osprofiler.drivers import redis_driver # noqa
+from osprofiler.drivers import sqlalchemy_driver # noqa
diff --git a/osprofiler/drivers/base.py b/osprofiler/drivers/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb0dc7ea6e539962cf3caade95411a501f8bd4b1
--- /dev/null
+++ b/osprofiler/drivers/base.py
@@ -0,0 +1,277 @@
+# Copyright 2016 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import logging
+
+import six.moves.urllib.parse as urlparse
+
+from osprofiler import _utils
+
+LOG = logging.getLogger(__name__)
+
+
+def get_driver(connection_string, *args, **kwargs):
+ """Create driver's instance according to specified connection string"""
+ # NOTE(ayelistratov) Backward compatibility with old Messaging notation
+ # Remove after patching all OS services
+ # NOTE(ishakhat) Raise exception when ParsedResult.scheme is empty
+ if "://" not in connection_string:
+ connection_string += "://"
+
+ parsed_connection = urlparse.urlparse(connection_string)
+ LOG.debug("String %s looks like a connection string, trying it.",
+ connection_string)
+
+ backend = parsed_connection.scheme
+ # NOTE(toabctl): To be able to use the connection_string for as sqlalchemy
+ # connection string, transform the backend to the correct driver
+ # See https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
+ if backend in ["mysql", "mysql+pymysql", "mysql+mysqldb",
+ "postgresql", "postgresql+psycopg2"]:
+ backend = "sqlalchemy"
+ for driver in _utils.itersubclasses(Driver):
+ if backend == driver.get_name():
+ return driver(connection_string, *args, **kwargs)
+
+ raise ValueError("Driver not found for connection string: "
+ "%s" % connection_string)
+
+
+class Driver(object):
+ """Base Driver class.
+
+ This class provides protected common methods that
+ do not rely on a specific storage backend. Public methods notify() and/or
+ get_report(), which require using storage backend API, must be overridden
+ and implemented by any class derived from this class.
+ """
+
+ default_trace_fields = {"base_id", "timestamp"}
+
+ def __init__(self, connection_str, project=None, service=None, host=None,
+ **kwargs):
+ self.connection_str = connection_str
+ self.project = project
+ self.service = service
+ self.host = host
+ self.result = {}
+ self.started_at = None
+ self.finished_at = None
+ # Last trace started time
+ self.last_started_at = None
+
+ profiler_config = kwargs.get("conf", {}).get("profiler", {})
+ if hasattr(profiler_config, "filter_error_trace"):
+ self.filter_error_trace = profiler_config.filter_error_trace
+ else:
+ self.filter_error_trace = False
+
+ def notify(self, info, **kwargs):
+ """This method will be called on each notifier.notify() call.
+
+ To add new drivers you should, create new subclass of this class and
+ implement notify method.
+
+ :param info: Contains information about trace element.
+ In payload dict there are always 3 ids:
+ "base_id" - uuid that is common for all notifications
+ related to one trace. Used to simplify
+ retrieving of all trace elements from
+ the backend.
+ "parent_id" - uuid of parent element in trace
+ "trace_id" - uuid of current element in trace
+
+ With parent_id and trace_id it's quite simple to build
+ tree of trace elements, which simplify analyze of trace.
+
+ """
+ raise NotImplementedError("{0}: This method is either not supported "
+ "or has to be overridden".format(
+ self.get_name()))
+
+ def get_report(self, base_id):
+ """Forms and returns report composed from the stored notifications.
+
+ :param base_id: Base id of trace elements.
+ """
+ raise NotImplementedError("{0}: This method is either not supported "
+ "or has to be overridden".format(
+ self.get_name()))
+
+ @classmethod
+ def get_name(cls):
+ """Returns backend specific name for the driver."""
+ return cls.__name__
+
+ def list_traces(self, fields=None):
+ """Query all traces from the storage.
+
+ :param fields: Set of trace fields to return. Defaults to 'base_id'
+ and 'timestamp'
+ :return List of traces, where each trace is a dictionary containing
+ at least `base_id` and `timestamp`.
+ """
+ raise NotImplementedError("{0}: This method is either not supported "
+ "or has to be overridden".format(
+ self.get_name()))
+
+ def list_error_traces(self):
+ """Query all error traces from the storage.
+
+ :return List of traces, where each trace is a dictionary containing
+ `base_id` and `timestamp`.
+ """
+ raise NotImplementedError("{0}: This method is either not supported "
+ "or has to be overridden".format(
+ self.get_name()))
+
+ @staticmethod
+ def _build_tree(nodes):
+ """Builds the tree (forest) data structure based on the list of nodes.
+
+ Tree building works in O(n*log(n)).
+
+ :param nodes: dict of nodes, where each node is a dictionary with fields
+ "parent_id", "trace_id", "info"
+ :returns: list of top level ("root") nodes in form of dictionaries,
+ each containing the "info" and "children" fields, where
+ "children" is the list of child nodes ("children" will be
+ empty for leafs)
+ """
+
+ tree = []
+
+ for trace_id in nodes:
+ node = nodes[trace_id]
+ node.setdefault("children", [])
+ parent_id = node["parent_id"]
+ if parent_id in nodes:
+ nodes[parent_id].setdefault("children", [])
+ nodes[parent_id]["children"].append(node)
+ else:
+ tree.append(node) # no parent => top-level node
+
+ for trace_id in nodes:
+ nodes[trace_id]["children"].sort(
+ key=lambda x: x["info"]["started"])
+
+ return sorted(tree, key=lambda x: x["info"]["started"])
+
+ def _append_results(self, trace_id, parent_id, name, project, service,
+ host, timestamp, raw_payload=None):
+ """Appends the notification to the dictionary of notifications.
+
+ :param trace_id: UUID of current trace point
+ :param parent_id: UUID of parent trace point
+ :param name: name of operation
+ :param project: project name
+ :param service: service name
+ :param host: host name or FQDN
+ :param timestamp: Unicode-style timestamp matching the pattern
+ "%Y-%m-%dT%H:%M:%S.%f" , e.g. 2016-04-18T17:42:10.77
+ :param raw_payload: raw notification without any filtering, with all
+ fields included
+ """
+ timestamp = datetime.datetime.strptime(timestamp,
+ "%Y-%m-%dT%H:%M:%S.%f")
+ if trace_id not in self.result:
+ self.result[trace_id] = {
+ "info": {
+ "name": name.split("-")[0],
+ "project": project,
+ "service": service,
+ "host": host,
+ },
+ "trace_id": trace_id,
+ "parent_id": parent_id,
+ }
+
+ self.result[trace_id]["info"]["meta.raw_payload.%s"
+ % name] = raw_payload
+
+ if name.endswith("stop"):
+ self.result[trace_id]["info"]["finished"] = timestamp
+ self.result[trace_id]["info"]["exception"] = "None"
+ if raw_payload and "info" in raw_payload:
+ exc = raw_payload["info"].get("etype", "None")
+ self.result[trace_id]["info"]["exception"] = exc
+ else:
+ self.result[trace_id]["info"]["started"] = timestamp
+ if not self.last_started_at or self.last_started_at < timestamp:
+ self.last_started_at = timestamp
+
+ if not self.started_at or self.started_at > timestamp:
+ self.started_at = timestamp
+
+ if not self.finished_at or self.finished_at < timestamp:
+ self.finished_at = timestamp
+
+ def _parse_results(self):
+ """Parses Driver's notifications placed by _append_results() .
+
+ :returns: full profiling report
+ """
+
+ def msec(dt):
+ # NOTE(boris-42): Unfortunately this is the simplest way that works
+ # in py26 and py27
+ microsec = (dt.microseconds + (dt.seconds + dt.days * 24 * 3600)
+ * 1e6)
+ return int(microsec / 1000.0)
+
+ stats = {}
+
+ for r in self.result.values():
+ # NOTE(boris-42): We are not able to guarantee that the backend
+ # consumed all messages => so we should at make duration 0ms.
+
+ if "started" not in r["info"]:
+ r["info"]["started"] = r["info"]["finished"]
+ if "finished" not in r["info"]:
+ r["info"]["finished"] = r["info"]["started"]
+
+ op_type = r["info"]["name"]
+ op_started = msec(r["info"]["started"] - self.started_at)
+ op_finished = msec(r["info"]["finished"]
+ - self.started_at)
+ duration = op_finished - op_started
+
+ r["info"]["started"] = op_started
+ r["info"]["finished"] = op_finished
+
+ if op_type not in stats:
+ stats[op_type] = {
+ "count": 1,
+ "duration": duration
+ }
+ else:
+ stats[op_type]["count"] += 1
+ stats[op_type]["duration"] += duration
+
+ return {
+ "info": {
+ "name": "total",
+ "started": 0,
+ "finished": msec(
+ self.finished_at - self.started_at
+ ) if self.started_at else None,
+ "last_trace_started": msec(
+ self.last_started_at - self.started_at
+ ) if self.started_at else None
+ },
+ "children": self._build_tree(self.result),
+ "stats": stats
+ }
diff --git a/osprofiler/drivers/elasticsearch_driver.py b/osprofiler/drivers/elasticsearch_driver.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e80fe057c2eb1ffbafc9ca210e5e8c6114c2548
--- /dev/null
+++ b/osprofiler/drivers/elasticsearch_driver.py
@@ -0,0 +1,169 @@
+# Copyright 2016 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+import six.moves.urllib.parse as parser
+
+from osprofiler.drivers import base
+from osprofiler import exc
+
+
+class ElasticsearchDriver(base.Driver):
+ def __init__(self, connection_str, index_name="osprofiler-notifications",
+ project=None, service=None, host=None, conf=cfg.CONF,
+ **kwargs):
+ """Elasticsearch driver for OSProfiler."""
+
+ super(ElasticsearchDriver, self).__init__(connection_str,
+ project=project,
+ service=service,
+ host=host,
+ conf=conf,
+ **kwargs)
+ try:
+ from elasticsearch import Elasticsearch
+ except ImportError:
+ raise exc.CommandError(
+ "To use OSProfiler with ElasticSearch driver, "
+ "please install `elasticsearch` library. "
+ "To install with pip:\n `pip install elasticsearch`.")
+
+ client_url = parser.urlunparse(parser.urlparse(self.connection_str)
+ ._replace(scheme="http"))
+ self.conf = conf
+ self.client = Elasticsearch(client_url)
+ self.index_name = index_name
+ self.index_name_error = "osprofiler-notifications-error"
+
+ @classmethod
+ def get_name(cls):
+ return "elasticsearch"
+
+ def notify(self, info):
+ """Send notifications to Elasticsearch.
+
+ :param info: Contains information about trace element.
+ In payload dict there are always 3 ids:
+ "base_id" - uuid that is common for all notifications
+ related to one trace. Used to simplify
+ retrieving of all trace elements from
+ Elasticsearch.
+ "parent_id" - uuid of parent element in trace
+ "trace_id" - uuid of current element in trace
+
+ With parent_id and trace_id it's quite simple to build
+ tree of trace elements, which simplify analyze of trace.
+
+ """
+
+ info = info.copy()
+ info["project"] = self.project
+ info["service"] = self.service
+ self.client.index(index=self.index_name,
+ doc_type=self.conf.profiler.es_doc_type, body=info)
+
+ if (self.filter_error_trace
+ and info.get("info", {}).get("etype") is not None):
+ self.notify_error_trace(info)
+
+ def notify_error_trace(self, info):
+ """Store base_id and timestamp of error trace to a separate index."""
+ self.client.index(
+ index=self.index_name_error,
+ doc_type=self.conf.profiler.es_doc_type,
+ body={"base_id": info["base_id"], "timestamp": info["timestamp"]}
+ )
+
+ def _hits(self, response):
+ """Returns all hits of search query using scrolling
+
+ :param response: ElasticSearch query response
+ """
+ scroll_id = response["_scroll_id"]
+ scroll_size = len(response["hits"]["hits"])
+ result = []
+
+ while scroll_size > 0:
+ for hit in response["hits"]["hits"]:
+ result.append(hit["_source"])
+ response = self.client.scroll(scroll_id=scroll_id,
+ scroll=self.conf.profiler.
+ es_scroll_time)
+ scroll_id = response["_scroll_id"]
+ scroll_size = len(response["hits"]["hits"])
+
+ return result
+
+ def list_traces(self, fields=None):
+ """Query all traces from the storage.
+
+ :param fields: Set of trace fields to return. Defaults to 'base_id'
+ and 'timestamp'
+ :return List of traces, where each trace is a dictionary containing
+ at least `base_id` and `timestamp`.
+ """
+ query = {"match_all": {}}
+ fields = set(fields or self.default_trace_fields)
+
+ response = self.client.search(index=self.index_name,
+ doc_type=self.conf.profiler.es_doc_type,
+ size=self.conf.profiler.es_scroll_size,
+ scroll=self.conf.profiler.es_scroll_time,
+ body={"_source": fields, "query": query,
+ "sort": [{"timestamp": "asc"}]})
+
+ return self._hits(response)
+
+ def list_error_traces(self):
+ """Returns all traces that have error/exception."""
+ response = self.client.search(
+ index=self.index_name_error,
+ doc_type=self.conf.profiler.es_doc_type,
+ size=self.conf.profiler.es_scroll_size,
+ scroll=self.conf.profiler.es_scroll_time,
+ body={
+ "_source": self.default_trace_fields,
+ "query": {"match_all": {}},
+ "sort": [{"timestamp": "asc"}]
+ }
+ )
+
+ return self._hits(response)
+
+ def get_report(self, base_id):
+ """Retrieves and parses notification from Elasticsearch.
+
+ :param base_id: Base id of trace elements.
+ """
+ response = self.client.search(index=self.index_name,
+ doc_type=self.conf.profiler.es_doc_type,
+ size=self.conf.profiler.es_scroll_size,
+ scroll=self.conf.profiler.es_scroll_time,
+ body={"query": {
+ "match": {"base_id": base_id}}})
+
+ for n in self._hits(response):
+ trace_id = n["trace_id"]
+ parent_id = n["parent_id"]
+ name = n["name"]
+ project = n["project"]
+ service = n["service"]
+ host = n["info"]["host"]
+ timestamp = n["timestamp"]
+
+ self._append_results(trace_id, parent_id, name, project, service,
+ host, timestamp, n)
+
+ return self._parse_results()
diff --git a/osprofiler/drivers/jaeger.py b/osprofiler/drivers/jaeger.py
new file mode 100644
index 0000000000000000000000000000000000000000..262f6a693d25f644f9ed8f8fb0fb38a9015e03e3
--- /dev/null
+++ b/osprofiler/drivers/jaeger.py
@@ -0,0 +1,149 @@
+# Copyright 2018 Fujitsu Ltd.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import datetime
+import time
+
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+import six.moves.urllib.parse as parser
+
+from osprofiler import _utils as utils
+from osprofiler.drivers import base
+from osprofiler import exc
+
+
+class Jaeger(base.Driver):
+ def __init__(self, connection_str, project=None, service=None, host=None,
+ conf=cfg.CONF, **kwargs):
+ """Jaeger driver for OSProfiler."""
+
+ super(Jaeger, self).__init__(connection_str, project=project,
+ service=service, host=host,
+ conf=conf, **kwargs)
+ try:
+ import jaeger_client
+ self.jaeger_client = jaeger_client
+ except ImportError:
+ raise exc.CommandError(
+ "To use OSProfiler with Uber Jaeger tracer, "
+ "please install `jaeger-client` library. "
+ "To install with pip:\n `pip install jaeger-client`."
+ )
+
+ parsed_url = parser.urlparse(connection_str)
+ cfg = {
+ "local_agent": {
+ "reporting_host": parsed_url.hostname,
+ "reporting_port": parsed_url.port,
+ }
+ }
+
+ # Initialize tracer for each profiler
+ service_name = "{}-{}".format(project, service)
+ config = jaeger_client.Config(cfg, service_name=service_name)
+ self.tracer = config.initialize_tracer()
+
+ self.spans = collections.deque()
+
+ @classmethod
+ def get_name(cls):
+ return "jaeger"
+
+ def notify(self, payload):
+ if payload["name"].endswith("start"):
+ timestamp = datetime.datetime.strptime(payload["timestamp"],
+ "%Y-%m-%dT%H:%M:%S.%f")
+ epoch = datetime.datetime.utcfromtimestamp(0)
+ start_time = (timestamp - epoch).total_seconds()
+
+ # Create parent span
+ child_of = self.jaeger_client.SpanContext(
+ trace_id=utils.shorten_id(payload["base_id"]),
+ span_id=utils.shorten_id(payload["parent_id"]),
+ parent_id=None,
+ flags=self.jaeger_client.span.SAMPLED_FLAG
+ )
+
+ # Create Jaeger Tracing span
+ span = self.tracer.start_span(
+ operation_name=payload["name"].rstrip("-start"),
+ child_of=child_of,
+ tags=self.create_span_tags(payload),
+ start_time=start_time
+ )
+
+ # Replace Jaeger Tracing span_id (random id) to OSProfiler span_id
+ span.context.span_id = utils.shorten_id(payload["trace_id"])
+ self.spans.append(span)
+ else:
+ span = self.spans.pop()
+
+ # Store result of db call and function call
+ for call in ("db", "function"):
+ if payload.get("info", {}).get(call) is not None:
+ span.set_tag("result", payload["info"][call]["result"])
+
+ # Span error tag and log
+ if payload["info"].get("etype") is not None:
+ span.set_tag("error", True)
+ span.log_kv({"error.kind": payload["info"]["etype"]})
+ span.log_kv({"message": payload["info"]["message"]})
+
+ span.finish(finish_time=time.time())
+
+ def get_report(self, base_id):
+ """Please use Jaeger Tracing UI for this task."""
+ return self._parse_results()
+
+ def list_traces(self, fields=None):
+ """Please use Jaeger Tracing UI for this task."""
+ return []
+
+ def list_error_traces(self):
+ """Please use Jaeger Tracing UI for this task."""
+ return []
+
+ def create_span_tags(self, payload):
+ """Create tags for OpenTracing span.
+
+ :param info: Information from OSProfiler trace.
+ :returns tags: A dictionary contains standard tags
+ from OpenTracing sematic conventions,
+ and some other custom tags related to http, db calls.
+ """
+ tags = {}
+ info = payload["info"]
+
+ if info.get("db"):
+ # DB calls
+ tags["db.statement"] = info["db"]["statement"]
+ tags["db.params"] = jsonutils.dumps(info["db"]["params"])
+ elif info.get("request"):
+ # WSGI call
+ tags["http.path"] = info["request"]["path"]
+ tags["http.query"] = info["request"]["query"]
+ tags["http.method"] = info["request"]["method"]
+ tags["http.scheme"] = info["request"]["scheme"]
+ elif info.get("function"):
+ # RPC, function calls
+ if "args" in info["function"]:
+ tags["args"] = info["function"]["args"]
+ if "kwargs" in info["function"]:
+ tags["kwargs"] = info["function"]["kwargs"]
+ tags["name"] = info["function"]["name"]
+
+ return tags
diff --git a/osprofiler/drivers/loginsight.py b/osprofiler/drivers/loginsight.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e875aec17b7eae2f2d8f342fbab951d061b44bf
--- /dev/null
+++ b/osprofiler/drivers/loginsight.py
@@ -0,0 +1,263 @@
+# Copyright (c) 2016 VMware, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Classes to use VMware vRealize Log Insight as the trace data store.
+"""
+
+import json
+import logging as log
+
+import netaddr
+from oslo_concurrency.lockutils import synchronized
+import requests
+import six.moves.urllib.parse as urlparse
+
+from osprofiler.drivers import base
+from osprofiler import exc
+
+LOG = log.getLogger(__name__)
+
+
+class LogInsightDriver(base.Driver):
+ """Driver for storing trace data in VMware vRealize Log Insight.
+
+ The driver uses Log Insight ingest service to store trace data and uses
+ the query service to retrieve it. The minimum required Log Insight version
+ is 3.3.
+
+ The connection string to initialize the driver should be of the format:
+ loginsight://:@
+
+ If the username or password contains the character ':' or '@', it must be
+ escaped using URL encoding. For example, the connection string to connect
+ to Log Insight server at 10.1.2.3 using username "osprofiler" and password
+ "p@ssword" is:
+ loginsight://osprofiler:p%40ssword@10.1.2.3
+ """
+ def __init__(
+ self, connection_str, project=None, service=None, host=None,
+ **kwargs):
+ super(LogInsightDriver, self).__init__(connection_str,
+ project=project,
+ service=service,
+ host=host)
+
+ parsed_connection = urlparse.urlparse(connection_str)
+ try:
+ creds, host = parsed_connection.netloc.split("@")
+ username, password = creds.split(":")
+ except ValueError:
+ raise ValueError("Connection string format is: loginsight://"
+ ":@. If the "
+ "username or password contains the character '@' "
+ "or ':', it must be escaped using URL encoding.")
+
+ username = urlparse.unquote(username)
+ password = urlparse.unquote(password)
+ self._client = LogInsightClient(host, username, password)
+
+ self._client.login()
+
+ @classmethod
+ def get_name(cls):
+ return "loginsight"
+
+ def notify(self, info):
+ """Send trace to Log Insight server."""
+
+ trace = info.copy()
+ trace["project"] = self.project
+ trace["service"] = self.service
+
+ event = {"text": "OSProfiler trace"}
+
+ def _create_field(name, content):
+ return {"name": name, "content": content}
+
+ event["fields"] = [_create_field("base_id", trace["base_id"]),
+ _create_field("trace_id", trace["trace_id"]),
+ _create_field("project", trace["project"]),
+ _create_field("service", trace["service"]),
+ _create_field("name", trace["name"]),
+ _create_field("trace", json.dumps(trace))]
+
+ self._client.send_event(event)
+
+ def get_report(self, base_id):
+ """Retrieves and parses trace data from Log Insight.
+
+ :param base_id: Trace base ID
+ """
+ response = self._client.query_events({"base_id": base_id})
+
+ if "events" in response:
+ for event in response["events"]:
+ if "fields" not in event:
+ continue
+
+ for field in event["fields"]:
+ if field["name"] == "trace":
+ trace = json.loads(field["content"])
+ trace_id = trace["trace_id"]
+ parent_id = trace["parent_id"]
+ name = trace["name"]
+ project = trace["project"]
+ service = trace["service"]
+ host = trace["info"]["host"]
+ timestamp = trace["timestamp"]
+
+ self._append_results(
+ trace_id, parent_id, name, project, service, host,
+ timestamp, trace)
+ break
+
+ return self._parse_results()
+
+
+class LogInsightClient(object):
+ """A minimal Log Insight client."""
+
+ LI_OSPROFILER_AGENT_ID = "F52D775B-6017-4787-8C8A-F21AE0AEC057"
+
+ # API paths
+ SESSIONS_PATH = "api/v1/sessions"
+ CURRENT_SESSIONS_PATH = "api/v1/sessions/current"
+ EVENTS_INGEST_PATH = "api/v1/events/ingest/%s" % LI_OSPROFILER_AGENT_ID
+ QUERY_EVENTS_BASE_PATH = "api/v1/events"
+
+ def __init__(self, host, username, password, api_port=9000,
+ api_ssl_port=9543, query_timeout=60000):
+ self._host = host
+ self._username = username
+ self._password = password
+ self._api_port = api_port
+ self._api_ssl_port = api_ssl_port
+ self._query_timeout = query_timeout
+ self._session = requests.Session()
+ self._session_id = None
+
+ def _build_base_url(self, scheme):
+ proto_str = "%s://" % scheme
+ host_str = ("[%s]" % self._host if netaddr.valid_ipv6(self._host)
+ else self._host)
+ port_str = ":%d" % (self._api_ssl_port if scheme == "https"
+ else self._api_port)
+ return proto_str + host_str + port_str
+
+ def _check_response(self, resp):
+ if resp.status_code == 440:
+ raise exc.LogInsightLoginTimeout()
+
+ if not resp.ok:
+ msg = "n/a"
+ if resp.text:
+ try:
+ body = json.loads(resp.text)
+ msg = body.get("errorMessage", msg)
+ except ValueError:
+ pass
+ else:
+ msg = resp.reason
+ raise exc.LogInsightAPIError(msg)
+
+ def _send_request(
+ self, method, scheme, path, headers=None, body=None, params=None):
+ url = "%s/%s" % (self._build_base_url(scheme), path)
+
+ headers = headers or {}
+ headers["content-type"] = "application/json"
+ body = body or {}
+ params = params or {}
+
+ req = requests.Request(
+ method, url, headers=headers, data=json.dumps(body), params=params)
+ req = req.prepare()
+ resp = self._session.send(req, verify=False)
+
+ self._check_response(resp)
+ return resp.json()
+
+ def _get_auth_header(self):
+ return {"X-LI-Session-Id": self._session_id}
+
+ def _trunc_session_id(self):
+ if self._session_id:
+ return self._session_id[-5:]
+
+ def _is_current_session_active(self):
+ try:
+ self._send_request("get",
+ "https",
+ self.CURRENT_SESSIONS_PATH,
+ headers=self._get_auth_header())
+ LOG.debug("Current session %s is active.",
+ self._trunc_session_id())
+ return True
+ except (exc.LogInsightLoginTimeout, exc.LogInsightAPIError):
+ LOG.debug("Current session %s is not active.",
+ self._trunc_session_id())
+ return False
+
+ @synchronized("li_login_lock")
+ def login(self):
+ # Another thread might have created the session while the current
+ # thread was waiting for the lock.
+ if self._session_id and self._is_current_session_active():
+ return
+
+ LOG.info("Logging into Log Insight server: %s.", self._host)
+ resp = self._send_request("post",
+ "https",
+ self.SESSIONS_PATH,
+ body={"username": self._username,
+ "password": self._password})
+
+ self._session_id = resp["sessionId"]
+ LOG.debug("Established session %s.", self._trunc_session_id())
+
+ def send_event(self, event):
+ events = {"events": [event]}
+ self._send_request("post",
+ "http",
+ self.EVENTS_INGEST_PATH,
+ body=events)
+
+ def query_events(self, params):
+ # Assumes that the keys and values in the params are strings and
+ # the operator is "CONTAINS".
+ constraints = []
+ for field, value in params.items():
+ constraints.append("%s/CONTAINS+%s" % (field, value))
+ constraints.append("timestamp/GT+0")
+
+ path = "%s/%s" % (self.QUERY_EVENTS_BASE_PATH, "/".join(constraints))
+
+ def _query_events():
+ return self._send_request("get",
+ "https",
+ path,
+ headers=self._get_auth_header(),
+ params={"limit": 20000,
+ "timeout": self._query_timeout})
+ try:
+ resp = _query_events()
+ except exc.LogInsightLoginTimeout:
+ # Login again and re-try.
+ LOG.debug("Current session timed out.")
+ self.login()
+ resp = _query_events()
+
+ return resp
diff --git a/osprofiler/drivers/messaging.py b/osprofiler/drivers/messaging.py
new file mode 100644
index 0000000000000000000000000000000000000000..3381cf64cd19fb309d149e61b6d2fae3908bda59
--- /dev/null
+++ b/osprofiler/drivers/messaging.py
@@ -0,0 +1,196 @@
+# Copyright 2016 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import signal
+import time
+
+from oslo_utils import importutils
+
+from osprofiler.drivers import base
+
+
+class Messaging(base.Driver):
+ def __init__(self, connection_str, project=None, service=None, host=None,
+ context=None, conf=None, transport_url=None,
+ idle_timeout=1, **kwargs):
+ """Driver that uses messaging as transport for notifications
+
+ :param connection_str: OSProfiler driver connection string,
+ equals to messaging://
+ :param project: project name that will be included into notification
+ :param service: service name that will be included into notification
+ :param host: host name that will be included into notification
+ :param context: oslo.messaging context
+ :param conf: oslo.config CONF object
+ :param transport_url: oslo.messaging transport, e.g.
+ rabbit://rabbit:password@devstack:5672/
+ :param idle_timeout: how long to wait for new notifications after
+ the last one seen in the trace; this parameter is useful to
+ collect full trace of asynchronous commands, e.g. when user
+ runs `osprofiler` right after `openstack server create`
+ :param kwargs: black hole for any other parameters
+ """
+
+ self.oslo_messaging = importutils.try_import("oslo_messaging")
+ if not self.oslo_messaging:
+ raise ValueError("Oslo.messaging library is required for "
+ "messaging driver")
+
+ super(Messaging, self).__init__(connection_str, project=project,
+ service=service, host=host)
+
+ self.context = context
+
+ if not conf:
+ oslo_config = importutils.try_import("oslo_config")
+ if not oslo_config:
+ raise ValueError("Oslo.config library is required for "
+ "messaging driver")
+ conf = oslo_config.cfg.CONF
+
+ transport_kwargs = {}
+ if transport_url:
+ transport_kwargs["url"] = transport_url
+
+ self.transport = self.oslo_messaging.get_notification_transport(
+ conf, **transport_kwargs)
+ self.client = self.oslo_messaging.Notifier(
+ self.transport, publisher_id=self.host, driver="messaging",
+ topics=["profiler"], retry=0)
+
+ self.idle_timeout = idle_timeout
+
+ @classmethod
+ def get_name(cls):
+ return "messaging"
+
+ def notify(self, info, context=None):
+ """Send notifications to backend via oslo.messaging notifier API.
+
+ :param info: Contains information about trace element.
+ In payload dict there are always 3 ids:
+ "base_id" - uuid that is common for all notifications
+ related to one trace.
+ "parent_id" - uuid of parent element in trace
+ "trace_id" - uuid of current element in trace
+
+ With parent_id and trace_id it's quite simple to build
+ tree of trace elements, which simplify analyze of trace.
+
+ :param context: request context that is mostly used to specify
+ current active user and tenant.
+ """
+
+ info["project"] = self.project
+ info["service"] = self.service
+ self.client.info(context or self.context,
+ "profiler.%s" % info["service"],
+ info)
+
+ def get_report(self, base_id):
+ notification_endpoint = NotifyEndpoint(self.oslo_messaging, base_id)
+ endpoints = [notification_endpoint]
+ targets = [self.oslo_messaging.Target(topic="profiler")]
+ server = self.oslo_messaging.notify.get_notification_listener(
+ self.transport, targets, endpoints, executor="threading")
+
+ state = dict(running=False)
+ sfn = functools.partial(signal_handler, state=state)
+
+ # modify signal handlers to handle interruption gracefully
+ old_sigterm_handler = signal.signal(signal.SIGTERM, sfn)
+ old_sigint_handler = signal.signal(signal.SIGINT, sfn)
+
+ try:
+ server.start()
+ except self.oslo_messaging.server.ServerListenError:
+ # failed to start the server
+ raise
+ except SignalExit:
+ print("Execution interrupted while trying to connect to "
+ "messaging server. No data was collected.")
+ return {}
+
+ # connected to server, now read the data
+ try:
+ # run until the trace is complete
+ state["running"] = True
+
+ while state["running"]:
+ last_read_time = notification_endpoint.get_last_read_time()
+ wait = self.idle_timeout - (time.time() - last_read_time)
+ if wait < 0:
+ state["running"] = False
+ else:
+ time.sleep(wait)
+ except SignalExit:
+ print("Execution interrupted. Terminating")
+ finally:
+ server.stop()
+ server.wait()
+
+ # restore original signal handlers
+ signal.signal(signal.SIGTERM, old_sigterm_handler)
+ signal.signal(signal.SIGINT, old_sigint_handler)
+
+ events = notification_endpoint.get_messages()
+
+ if not events:
+ print("No events are collected for Trace UUID %s. Please note "
+ "that osprofiler has read ALL events from profiler topic, "
+ "but has not found any for specified Trace UUID." % base_id)
+
+ for n in events:
+ trace_id = n["trace_id"]
+ parent_id = n["parent_id"]
+ name = n["name"]
+ project = n["project"]
+ service = n["service"]
+ host = n["info"]["host"]
+ timestamp = n["timestamp"]
+
+ self._append_results(trace_id, parent_id, name, project, service,
+ host, timestamp, n)
+
+ return self._parse_results()
+
+
+class NotifyEndpoint(object):
+
+ def __init__(self, oslo_messaging, base_id):
+ self.received_messages = []
+ self.last_read_time = time.time()
+ self.filter_rule = oslo_messaging.NotificationFilter(
+ payload={"base_id": base_id})
+
+ def info(self, ctxt, publisher_id, event_type, payload, metadata):
+ self.received_messages.append(payload)
+ self.last_read_time = time.time()
+
+ def get_messages(self):
+ return self.received_messages
+
+ def get_last_read_time(self):
+ return self.last_read_time # time when the latest event was received
+
+
+class SignalExit(BaseException):
+ pass
+
+
+def signal_handler(signum, frame, state):
+ state["running"] = False
+ raise SignalExit()
diff --git a/osprofiler/drivers/mongodb.py b/osprofiler/drivers/mongodb.py
new file mode 100644
index 0000000000000000000000000000000000000000..86119e4f0d1c81f869c10759c54adaa587626893
--- /dev/null
+++ b/osprofiler/drivers/mongodb.py
@@ -0,0 +1,112 @@
+# Copyright 2016 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from osprofiler.drivers import base
+from osprofiler import exc
+
+
+class MongoDB(base.Driver):
+ def __init__(self, connection_str, db_name="osprofiler", project=None,
+ service=None, host=None, **kwargs):
+ """MongoDB driver for OSProfiler."""
+
+ super(MongoDB, self).__init__(connection_str, project=project,
+ service=service, host=host, **kwargs)
+ try:
+ from pymongo import MongoClient
+ except ImportError:
+ raise exc.CommandError(
+ "To use OSProfiler with MongoDB driver, "
+ "please install `pymongo` library. "
+ "To install with pip:\n `pip install pymongo`.")
+
+ client = MongoClient(self.connection_str, connect=False)
+ self.db = client[db_name]
+
+ @classmethod
+ def get_name(cls):
+ return "mongodb"
+
+ def notify(self, info):
+ """Send notifications to MongoDB.
+
+ :param info: Contains information about trace element.
+ In payload dict there are always 3 ids:
+ "base_id" - uuid that is common for all notifications
+ related to one trace. Used to simplify
+ retrieving of all trace elements from
+ MongoDB.
+ "parent_id" - uuid of parent element in trace
+ "trace_id" - uuid of current element in trace
+
+ With parent_id and trace_id it's quite simple to build
+ tree of trace elements, which simplify analyze of trace.
+
+ """
+ data = info.copy()
+ data["project"] = self.project
+ data["service"] = self.service
+ self.db.profiler.insert_one(data)
+
+ if (self.filter_error_trace
+ and data.get("info", {}).get("etype") is not None):
+ self.notify_error_trace(data)
+
+ def notify_error_trace(self, data):
+ """Store base_id and timestamp of error trace to a separate db."""
+ self.db.profiler_error.update(
+ {"base_id": data["base_id"]},
+ {"base_id": data["base_id"], "timestamp": data["timestamp"]},
+ upsert=True
+ )
+
+ def list_traces(self, fields=None):
+ """Query all traces from the storage.
+
+ :param fields: Set of trace fields to return. Defaults to 'base_id'
+ and 'timestamp'
+ :return List of traces, where each trace is a dictionary containing
+ at least `base_id` and `timestamp`.
+ """
+ fields = set(fields or self.default_trace_fields)
+ ids = self.db.profiler.find({}).distinct("base_id")
+ out_format = {"base_id": 1, "timestamp": 1, "_id": 0}
+ out_format.update({i: 1 for i in fields})
+ return [self.db.profiler.find(
+ {"base_id": i}, out_format).sort("timestamp")[0] for i in ids]
+
+ def list_error_traces(self):
+ """Returns all traces that have error/exception."""
+ out_format = {"base_id": 1, "timestamp": 1, "_id": 0}
+ return self.db.profiler_error.find({}, out_format)
+
+ def get_report(self, base_id):
+ """Retrieves and parses notification from MongoDB.
+
+ :param base_id: Base id of trace elements.
+ """
+ for n in self.db.profiler.find({"base_id": base_id}, {"_id": 0}):
+ trace_id = n["trace_id"]
+ parent_id = n["parent_id"]
+ name = n["name"]
+ project = n["project"]
+ service = n["service"]
+ host = n["info"]["host"]
+ timestamp = n["timestamp"]
+
+ self._append_results(trace_id, parent_id, name, project, service,
+ host, timestamp, n)
+
+ return self._parse_results()
diff --git a/osprofiler/drivers/redis_driver.py b/osprofiler/drivers/redis_driver.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c1fc6246e2413b9b6a382f99c20b97136527c8f
--- /dev/null
+++ b/osprofiler/drivers/redis_driver.py
@@ -0,0 +1,204 @@
+# Copyright 2016 Mirantis Inc.
+# Copyright 2016 IBM Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from debtcollector import removals
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+import six.moves.urllib.parse as parser
+
+from osprofiler.drivers import base
+from osprofiler import exc
+
+
+class Redis(base.Driver):
+ @removals.removed_kwarg("db", message="'db' parameter is deprecated "
+ "and will be removed in future. "
+ "Please specify 'db' in "
+ "'connection_string' instead.")
+ def __init__(self, connection_str, db=0, project=None,
+ service=None, host=None, conf=cfg.CONF, **kwargs):
+ """Redis driver for OSProfiler."""
+
+ super(Redis, self).__init__(connection_str, project=project,
+ service=service, host=host,
+ conf=conf, **kwargs)
+ try:
+ from redis import StrictRedis
+ except ImportError:
+ raise exc.CommandError(
+ "To use OSProfiler with Redis driver, "
+ "please install `redis` library. "
+ "To install with pip:\n `pip install redis`.")
+
+ # only connection over network is supported with schema
+ # redis://[:password]@host[:port][/db]
+ self.db = StrictRedis.from_url(self.connection_str)
+ self.namespace_opt = "osprofiler_opt:"
+ self.namespace = "osprofiler:" # legacy
+ self.namespace_error = "osprofiler_error:"
+
+ @classmethod
+ def get_name(cls):
+ return "redis"
+
+ def notify(self, info):
+ """Send notifications to Redis.
+
+ :param info: Contains information about trace element.
+ In payload dict there are always 3 ids:
+ "base_id" - uuid that is common for all notifications
+ related to one trace. Used to simplify
+ retrieving of all trace elements from
+ Redis.
+ "parent_id" - uuid of parent element in trace
+ "trace_id" - uuid of current element in trace
+
+ With parent_id and trace_id it's quite simple to build
+ tree of trace elements, which simplify analyze of trace.
+
+ """
+ data = info.copy()
+ data["project"] = self.project
+ data["service"] = self.service
+ key = self.namespace_opt + data["base_id"]
+ self.db.lpush(key, jsonutils.dumps(data))
+
+ if (self.filter_error_trace
+ and data.get("info", {}).get("etype") is not None):
+ self.notify_error_trace(data)
+
+ def notify_error_trace(self, data):
+ """Store base_id and timestamp of error trace to a separate key."""
+ key = self.namespace_error + data["base_id"]
+ value = jsonutils.dumps({
+ "base_id": data["base_id"],
+ "timestamp": data["timestamp"]
+ })
+ self.db.set(key, value)
+
+ def list_traces(self, fields=None):
+ """Query all traces from the storage.
+
+ :param fields: Set of trace fields to return. Defaults to 'base_id'
+ and 'timestamp'
+ :return List of traces, where each trace is a dictionary containing
+ at least `base_id` and `timestamp`.
+ """
+ fields = set(fields or self.default_trace_fields)
+
+ # first get legacy events
+ result = self._list_traces_legacy(fields)
+
+ # with optimized schema trace events are stored in a list
+ ids = self.db.scan_iter(match=self.namespace_opt + "*")
+ for i in ids:
+ # for each trace query the first event to have a timestamp
+ first_event = jsonutils.loads(self.db.lindex(i, 1))
+ result.append({key: value for key, value in first_event.items()
+ if key in fields})
+ return result
+
+ def _list_traces_legacy(self, fields):
+ # With current schema every event is stored under its own unique key
+ # To query all traces we first need to get all keys, then
+ # get all events, sort them and pick up only the first one
+ ids = self.db.scan_iter(match=self.namespace + "*")
+ traces = [jsonutils.loads(self.db.get(i)) for i in ids]
+ traces.sort(key=lambda x: x["timestamp"])
+ seen_ids = set()
+ result = []
+ for trace in traces:
+ if trace["base_id"] not in seen_ids:
+ seen_ids.add(trace["base_id"])
+ result.append({key: value for key, value in trace.items()
+ if key in fields})
+ return result
+
+ def list_error_traces(self):
+ """Returns all traces that have error/exception."""
+ ids = self.db.scan_iter(match=self.namespace_error + "*")
+ traces = [jsonutils.loads(self.db.get(i)) for i in ids]
+ traces.sort(key=lambda x: x["timestamp"])
+ seen_ids = set()
+ result = []
+ for trace in traces:
+ if trace["base_id"] not in seen_ids:
+ seen_ids.add(trace["base_id"])
+ result.append(trace)
+
+ return result
+
+ def get_report(self, base_id):
+ """Retrieves and parses notification from Redis.
+
+ :param base_id: Base id of trace elements.
+ """
+ def iterate_events():
+ for key in self.db.scan_iter(
+ match=self.namespace + base_id + "*"): # legacy
+ yield self.db.get(key)
+
+ for event in self.db.lrange(self.namespace_opt + base_id, 0, -1):
+ yield event
+
+ for data in iterate_events():
+ n = jsonutils.loads(data)
+ trace_id = n["trace_id"]
+ parent_id = n["parent_id"]
+ name = n["name"]
+ project = n["project"]
+ service = n["service"]
+ host = n["info"]["host"]
+ timestamp = n["timestamp"]
+
+ self._append_results(trace_id, parent_id, name, project, service,
+ host, timestamp, n)
+
+ return self._parse_results()
+
+
+class RedisSentinel(Redis, base.Driver):
+ @removals.removed_kwarg("db", message="'db' parameter is deprecated "
+ "and will be removed in future. "
+ "Please specify 'db' in "
+ "'connection_string' instead.")
+ def __init__(self, connection_str, db=0, project=None,
+ service=None, host=None, conf=cfg.CONF, **kwargs):
+ """Redis driver for OSProfiler."""
+
+ super(RedisSentinel, self).__init__(connection_str, project=project,
+ service=service, host=host,
+ conf=conf, **kwargs)
+ try:
+ from redis.sentinel import Sentinel
+ except ImportError:
+ raise exc.CommandError(
+ "To use this command, you should install "
+ "'redis' manually. Use command:\n "
+ "'pip install redis'.")
+
+ self.conf = conf
+ socket_timeout = self.conf.profiler.socket_timeout
+ parsed_url = parser.urlparse(self.connection_str)
+ sentinel = Sentinel([(parsed_url.hostname, int(parsed_url.port))],
+ password=parsed_url.password,
+ socket_timeout=socket_timeout)
+ self.db = sentinel.master_for(self.conf.profiler.sentinel_service_name,
+ socket_timeout=socket_timeout)
+
+ @classmethod
+ def get_name(cls):
+ return "redissentinel"
diff --git a/osprofiler/drivers/sqlalchemy_driver.py b/osprofiler/drivers/sqlalchemy_driver.py
new file mode 100644
index 0000000000000000000000000000000000000000..daab1d052695503255712ece58c77f9b2f2fdf95
--- /dev/null
+++ b/osprofiler/drivers/sqlalchemy_driver.py
@@ -0,0 +1,142 @@
+# Copyright 2019 SUSE Linux GmbH
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from oslo_serialization import jsonutils
+
+from osprofiler.drivers import base
+from osprofiler import exc
+
+LOG = logging.getLogger(__name__)
+
+
+class SQLAlchemyDriver(base.Driver):
+ def __init__(self, connection_str, project=None, service=None, host=None,
+ **kwargs):
+ super(SQLAlchemyDriver, self).__init__(connection_str, project=project,
+ service=service, host=host)
+
+ try:
+ from sqlalchemy import create_engine
+ from sqlalchemy import Table, MetaData, Column
+ from sqlalchemy import String, JSON, Integer
+ except ImportError:
+ LOG.exception("To use this command, install 'SQLAlchemy'")
+ else:
+ self._metadata = MetaData()
+ self._data_table = Table(
+ "data", self._metadata,
+ Column("id", Integer, primary_key=True),
+ # timestamp - date/time of the trace point
+ Column("timestamp", String(26), index=True),
+ # base_id - uuid common for all notifications related to
+ # one trace
+ Column("base_id", String(255), index=True),
+ # parent_id - uuid of parent element in trace
+ Column("parent_id", String(255), index=True),
+ # trace_id - uuid of current element in trace
+ Column("trace_id", String(255), index=True),
+ Column("project", String(255), index=True),
+ Column("host", String(255), index=True),
+ Column("service", String(255), index=True),
+ # name - trace point name
+ Column("name", String(255), index=True),
+ Column("data", JSON)
+ )
+
+ # we don't want to kill any service that does use osprofiler
+ try:
+ self._engine = create_engine(connection_str)
+ self._conn = self._engine.connect()
+
+ # FIXME(toabctl): Not the best idea to create the table on every
+ # startup when using the sqlalchemy driver...
+ self._metadata.create_all(self._engine, checkfirst=True)
+ except Exception:
+ LOG.exception("Failed to create engine/connection and setup "
+ "intial database tables")
+
+ @classmethod
+ def get_name(cls):
+ return "sqlalchemy"
+
+ def notify(self, info, context=None):
+ """Write a notification the the database"""
+ data = info.copy()
+ base_id = data.pop("base_id", None)
+ timestamp = data.pop("timestamp", None)
+ parent_id = data.pop("parent_id", None)
+ trace_id = data.pop("trace_id", None)
+ project = data.pop("project", self.project)
+ host = data.pop("host", self.host)
+ service = data.pop("service", self.service)
+ name = data.pop("name", None)
+
+ try:
+ ins = self._data_table.insert().values(
+ timestamp=timestamp,
+ base_id=base_id,
+ parent_id=parent_id,
+ trace_id=trace_id,
+ project=project,
+ service=service,
+ host=host,
+ name=name,
+ data=jsonutils.dumps(data)
+ )
+ self._conn.execute(ins)
+ except Exception:
+ LOG.exception("Can not store osprofiler tracepoint {} "
+ "(base_id {})".format(trace_id, base_id))
+
+ def list_traces(self, fields=None):
+ try:
+ from sqlalchemy.sql import select
+ except ImportError:
+ raise exc.CommandError(
+ "To use this command, you should install 'SQLAlchemy'")
+ stmt = select([self._data_table])
+ seen_ids = set()
+ result = []
+ traces = self._conn.execute(stmt).fetchall()
+ for trace in traces:
+ if trace["base_id"] not in seen_ids:
+ seen_ids.add(trace["base_id"])
+ result.append({key: value for key, value in trace.items()
+ if key in fields})
+ return result
+
+ def get_report(self, base_id):
+ try:
+ from sqlalchemy.sql import select
+ except ImportError:
+ raise exc.CommandError(
+ "To use this command, you should install 'SQLAlchemy'")
+ stmt = select([self._data_table]).where(
+ self._data_table.c.base_id == base_id)
+ results = self._conn.execute(stmt).fetchall()
+ for n in results:
+ timestamp = n["timestamp"]
+ trace_id = n["trace_id"]
+ parent_id = n["parent_id"]
+ name = n["name"]
+ project = n["project"]
+ service = n["service"]
+ host = n["host"]
+ data = jsonutils.loads(n["data"])
+ self._append_results(trace_id, parent_id, name, project, service,
+ host, timestamp, data)
+ return self._parse_results()
diff --git a/osprofiler/cmd/exc.py b/osprofiler/exc.py
similarity index 89%
rename from osprofiler/cmd/exc.py
rename to osprofiler/exc.py
index 0ffc9c9b61a5697cd70115cafda5fdf34898b870..0f2fa33acb6eea2a347272914ef95f184dd43bbc 100644
--- a/osprofiler/cmd/exc.py
+++ b/osprofiler/exc.py
@@ -22,3 +22,11 @@ class CommandError(Exception):
def __str__(self):
return self.message or self.__class__.__doc__
+
+
+class LogInsightAPIError(Exception):
+ pass
+
+
+class LogInsightLoginTimeout(Exception):
+ pass
diff --git a/osprofiler/_notifiers/__init__.py b/osprofiler/hacking/__init__.py
similarity index 100%
rename from osprofiler/_notifiers/__init__.py
rename to osprofiler/hacking/__init__.py
diff --git a/osprofiler/hacking/checks.py b/osprofiler/hacking/checks.py
new file mode 100644
index 0000000000000000000000000000000000000000..2723210d7e380244db70b6edf52aa853046b29d2
--- /dev/null
+++ b/osprofiler/hacking/checks.py
@@ -0,0 +1,382 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Guidelines for writing new hacking checks
+
+ - Use only for OSProfiler specific tests. OpenStack general tests
+ should be submitted to the common 'hacking' module.
+ - Pick numbers in the range N3xx. Find the current test with
+ the highest allocated number and then pick the next value.
+ - Keep the test method code in the source file ordered based
+ on the N3xx value.
+ - List the new rule in the top level HACKING.rst file
+ - Add test cases for each new rule to tests/unit/test_hacking.py
+
+"""
+
+import functools
+import re
+import tokenize
+
+from hacking import core
+
+re_assert_true_instance = re.compile(
+ r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
+ r"(\w|\.|\'|\"|\[|\])+\)\)")
+re_assert_equal_type = re.compile(
+ r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
+ r"(\w|\.|\'|\"|\[|\])+\)")
+re_assert_equal_end_with_none = re.compile(r"assertEqual\(.*?,\s+None\)$")
+re_assert_equal_start_with_none = re.compile(r"assertEqual\(None,")
+re_assert_true_false_with_in_or_not_in = re.compile(
+ r"assert(True|False)\("
+ r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)")
+re_assert_true_false_with_in_or_not_in_spaces = re.compile(
+ r"assert(True|False)\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+"
+ r"[\[|'|\"](, .*)?\)")
+re_assert_equal_in_end_with_true_or_false = re.compile(
+ r"assertEqual\((\w|[][.'\"])+( not)? in (\w|[][.'\", ])+, (True|False)\)")
+re_assert_equal_in_start_with_true_or_false = re.compile(
+ r"assertEqual\((True|False), (\w|[][.'\"])+( not)? in (\w|[][.'\", ])+\)")
+re_no_construct_dict = re.compile(
+ r"\sdict\(\)")
+re_no_construct_list = re.compile(
+ r"\slist\(\)")
+re_str_format = re.compile(r"""
+% # start of specifier
+\(([^)]+)\) # mapping key, in group 1
+[#0 +\-]? # optional conversion flag
+(?:-?\d*)? # optional minimum field width
+(?:\.\d*)? # optional precision
+[hLl]? # optional length modifier
+[A-z%] # conversion modifier
+""", re.X)
+re_raises = re.compile(
+ r"\s:raise[^s] *.*$|\s:raises *:.*$|\s:raises *[^:]+$")
+
+
+@core.flake8ext
+def skip_ignored_lines(func):
+
+ @functools.wraps(func)
+ def wrapper(logical_line, filename):
+ line = logical_line.strip()
+ if not line or line.startswith("#") or line.endswith("# noqa"):
+ return
+ try:
+ yield next(func(logical_line, filename))
+ except StopIteration:
+ return
+
+ return wrapper
+
+
+def _parse_assert_mock_str(line):
+ point = line.find(".assert_")
+
+ if point != -1:
+ end_pos = line[point:].find("(") + point
+ return point, line[point + 1: end_pos], line[: point]
+ else:
+ return None, None, None
+
+
+@skip_ignored_lines
+@core.flake8ext
+def check_assert_methods_from_mock(logical_line, filename):
+ """Ensure that ``assert_*`` methods from ``mock`` library is used correctly
+
+ N301 - base error number
+ N302 - related to nonexistent "assert_called"
+ N303 - related to nonexistent "assert_called_once"
+ """
+
+ correct_names = ["assert_any_call", "assert_called_once_with",
+ "assert_called_with", "assert_has_calls"]
+ ignored_files = ["./tests/unit/test_hacking.py"]
+
+ if filename.startswith("./tests") and filename not in ignored_files:
+ pos, method_name, obj_name = _parse_assert_mock_str(logical_line)
+
+ if pos:
+ if method_name not in correct_names:
+ error_number = "N301"
+ msg = ("%(error_number)s:'%(method)s' is not present in `mock`"
+ " library. %(custom_msg)s For more details, visit "
+ "http://www.voidspace.org.uk/python/mock/ .")
+
+ if method_name == "assert_called":
+ error_number = "N302"
+ custom_msg = ("Maybe, you should try to use "
+ "'assertTrue(%s.called)' instead." %
+ obj_name)
+ elif method_name == "assert_called_once":
+ # For more details, see a bug in Rally:
+ # https://bugs.launchpad.net/rally/+bug/1305991
+ error_number = "N303"
+ custom_msg = ("Maybe, you should try to use "
+ "'assertEqual(1, %s.call_count)' "
+ "or '%s.assert_called_once_with()'"
+ " instead." % (obj_name, obj_name))
+ else:
+ custom_msg = ("Correct 'assert_*' methods: '%s'."
+ % "', '".join(correct_names))
+
+ yield (pos, msg % {
+ "error_number": error_number,
+ "method": method_name,
+ "custom_msg": custom_msg})
+
+
+@skip_ignored_lines
+@core.flake8ext
+def assert_true_instance(logical_line, filename):
+ """Check for assertTrue(isinstance(a, b)) sentences
+
+ N320
+ """
+ if re_assert_true_instance.match(logical_line):
+ yield (0, "N320 assertTrue(isinstance(a, b)) sentences not allowed, "
+ "you should use assertIsInstance(a, b) instead.")
+
+
+@skip_ignored_lines
+@core.flake8ext
+def assert_equal_type(logical_line, filename):
+ """Check for assertEqual(type(A), B) sentences
+
+ N321
+ """
+ if re_assert_equal_type.match(logical_line):
+ yield (0, "N321 assertEqual(type(A), B) sentences not allowed, "
+ "you should use assertIsInstance(a, b) instead.")
+
+
+@skip_ignored_lines
+@core.flake8ext
+def assert_equal_none(logical_line, filename):
+ """Check for assertEqual(A, None) or assertEqual(None, A) sentences
+
+ N322
+ """
+ res = (re_assert_equal_start_with_none.search(logical_line)
+ or re_assert_equal_end_with_none.search(logical_line))
+ if res:
+ yield (0, "N322 assertEqual(A, None) or assertEqual(None, A) "
+ "sentences not allowed, you should use assertIsNone(A) "
+ "instead.")
+
+
+@skip_ignored_lines
+@core.flake8ext
+def assert_true_or_false_with_in(logical_line, filename):
+ """Check assertTrue/False(A in/not in B) with collection contents
+
+ Check for assertTrue/False(A in B), assertTrue/False(A not in B),
+ assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
+ sentences.
+
+ N323
+ """
+ res = (re_assert_true_false_with_in_or_not_in.search(logical_line)
+ or re_assert_true_false_with_in_or_not_in_spaces.search(
+ logical_line))
+ if res:
+ yield (0, "N323 assertTrue/assertFalse(A in/not in B)sentences not "
+ "allowed, you should use assertIn(A, B) or assertNotIn(A, B)"
+ " instead.")
+
+
+@skip_ignored_lines
+@core.flake8ext
+def assert_equal_in(logical_line, filename):
+ """Check assertEqual(A in/not in B, True/False) with collection contents
+
+ Check for assertEqual(A in B, True/False), assertEqual(True/False, A in B),
+ assertEqual(A not in B, True/False) or assertEqual(True/False, A not in B)
+ sentences.
+
+ N324
+ """
+ res = (re_assert_equal_in_end_with_true_or_false.search(logical_line)
+ or re_assert_equal_in_start_with_true_or_false.search(logical_line))
+ if res:
+ yield (0, "N324: Use assertIn/NotIn(A, B) rather than "
+ "assertEqual(A in/not in B, True/False) when checking "
+ "collection contents.")
+
+
+@skip_ignored_lines
+@core.flake8ext
+def check_quotes(logical_line, filename):
+ """Check that single quotation marks are not used
+
+ N350
+ """
+
+ in_string = False
+ in_multiline_string = False
+ single_quotas_are_used = False
+
+ check_tripple = (
+ lambda line, i, char: (
+ i + 2 < len(line)
+ and (char == line[i] == line[i + 1] == line[i + 2])
+ )
+ )
+
+ i = 0
+ while i < len(logical_line):
+ char = logical_line[i]
+
+ if in_string:
+ if char == "\"":
+ in_string = False
+ if char == "\\":
+ i += 1 # ignore next char
+
+ elif in_multiline_string:
+ if check_tripple(logical_line, i, "\""):
+ i += 2 # skip next 2 chars
+ in_multiline_string = False
+
+ elif char == "#":
+ break
+
+ elif char == "'":
+ single_quotas_are_used = True
+ break
+
+ elif char == "\"":
+ if check_tripple(logical_line, i, "\""):
+ in_multiline_string = True
+ i += 3
+ continue
+ in_string = True
+
+ i += 1
+
+ if single_quotas_are_used:
+ yield (i, "N350 Remove Single quotes")
+
+
+@skip_ignored_lines
+@core.flake8ext
+def check_no_constructor_data_struct(logical_line, filename):
+ """Check that data structs (lists, dicts) are declared using literals
+
+ N351
+ """
+
+ match = re_no_construct_dict.search(logical_line)
+ if match:
+ yield (0, "N351 Remove dict() construct and use literal {}")
+ match = re_no_construct_list.search(logical_line)
+ if match:
+ yield (0, "N351 Remove list() construct and use literal []")
+
+
+@core.flake8ext
+def check_dict_formatting_in_string(logical_line, tokens):
+ """Check that strings do not use dict-formatting with a single replacement
+
+ N352
+ """
+ # NOTE(stpierre): Can't use @skip_ignored_lines here because it's
+ # a stupid decorator that only works on functions that take
+ # (logical_line, filename) as arguments.
+ if (not logical_line
+ or logical_line.startswith("#")
+ or logical_line.endswith("# noqa")):
+ return
+
+ current_string = ""
+ in_string = False
+ for token_type, text, start, end, line in tokens:
+ if token_type == tokenize.STRING:
+ if not in_string:
+ current_string = ""
+ in_string = True
+ current_string += text.strip("\"")
+ elif token_type == tokenize.OP:
+ if not current_string:
+ continue
+ # NOTE(stpierre): The string formatting operator % has
+ # lower precedence than +, so we assume that the logical
+ # string has concluded whenever we hit an operator of any
+ # sort. (Most operators don't work for strings anyway.)
+ # Some string operators do have higher precedence than %,
+ # though, so you can technically trick this check by doing
+ # things like:
+ #
+ # "%(foo)s" * 1 % {"foo": 1}
+ # "%(foo)s"[:] % {"foo": 1}
+ #
+ # It also will produce false positives if you use explicit
+ # parenthesized addition for two strings instead of
+ # concatenation by juxtaposition, e.g.:
+ #
+ # ("%(foo)s" + "%(bar)s") % vals
+ #
+ # But if you do any of those things, then you deserve all
+ # of the horrible things that happen to you, and probably
+ # many more.
+ in_string = False
+ if text == "%":
+ format_keys = set()
+ for match in re_str_format.finditer(current_string):
+ format_keys.add(match.group(1))
+ if len(format_keys) == 1:
+ yield (0,
+ "N352 Do not use mapping key string formatting "
+ "with a single key")
+ if text != ")":
+ # NOTE(stpierre): You can have a parenthesized string
+ # followed by %, so a closing paren doesn't obviate
+ # the possibility for a substitution operator like
+ # every other operator does.
+ current_string = ""
+ elif token_type in (tokenize.NL, tokenize.COMMENT):
+ continue
+ else:
+ in_string = False
+ if token_type == tokenize.NEWLINE:
+ current_string = ""
+
+
+@skip_ignored_lines
+@core.flake8ext
+def check_using_unicode(logical_line, filename):
+ """Check crosspython unicode usage
+
+ N353
+ """
+
+ if re.search(r"\bunicode\(", logical_line):
+ yield (0, "N353 'unicode' function is absent in python3. Please "
+ "use 'six.text_type' instead.")
+
+
+@core.flake8ext
+def check_raises(physical_line, filename):
+ """Check raises usage
+
+ N354
+ """
+
+ ignored_files = ["./tests/unit/test_hacking.py",
+ "./tests/hacking/checks.py"]
+ if filename not in ignored_files:
+ if re_raises.search(physical_line):
+ return (0, "N354 ':Please use ':raises Exception: conditions' "
+ "in docstrings.")
diff --git a/osprofiler/initializer.py b/osprofiler/initializer.py
new file mode 100644
index 0000000000000000000000000000000000000000..4befdd6b4bed17e65c58cf2afd7a216a6af59450
--- /dev/null
+++ b/osprofiler/initializer.py
@@ -0,0 +1,41 @@
+# Copyright 2016 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from osprofiler import notifier
+from osprofiler import web
+
+
+def init_from_conf(conf, context, project, service, host, **kwargs):
+ """Initialize notifier from service configuration
+
+ :param conf: service configuration
+ :param context: request context
+ :param project: project name (keystone, cinder etc.)
+ :param service: service name that will be profiled
+ :param host: hostname or host IP address that the service will be
+ running on.
+ :param kwargs: other arguments for notifier creation
+ """
+ connection_str = conf.profiler.connection_string
+ _notifier = notifier.create(
+ connection_str,
+ context=context,
+ project=project,
+ service=service,
+ host=host,
+ conf=conf,
+ **kwargs)
+ notifier.set(_notifier)
+ web.enable(conf.profiler.hmac_keys)
diff --git a/osprofiler/notifier.py b/osprofiler/notifier.py
index 97efc91c4ece2a1710ef34fc9e35daf762aa9acc..8b909f0d3626c276a7ae0d63facd069655bd9d77 100644
--- a/osprofiler/notifier.py
+++ b/osprofiler/notifier.py
@@ -13,7 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from osprofiler._notifiers import base
+import logging
+
+from osprofiler.drivers import base
+
+
+LOG = logging.getLogger(__name__)
def _noop_notifier(info, context=None):
@@ -22,6 +27,7 @@ def _noop_notifier(info, context=None):
# NOTE(boris-42): By default we are using noop notifier.
__notifier = _noop_notifier
+__notifier_cache = {} # map: connection-string -> notifier
def notify(info):
@@ -48,13 +54,29 @@ def set(notifier):
__notifier = notifier
-def create(plugin_name, *args, **kwargs):
+def create(connection_string, *args, **kwargs):
"""Create notifier based on specified plugin_name
- :param plugin_name: Name of plugin that creates notifier
- :param *args: args that will be passed to plugin init method
- :param **kwargs: kwargs that will be passed to plugin init method
+ :param connection_string: connection string which specifies the storage
+ driver for notifier
+ :param args: args that will be passed to the driver's __init__ method
+ :param kwargs: kwargs that will be passed to the driver's __init__ method
:returns: Callable notifier method
- :raise TypeError: In case of invalid name of plugin raises TypeError
"""
- return base.Notifier.factory(plugin_name, *args, **kwargs)
+ global __notifier_cache
+ if connection_string not in __notifier_cache:
+ try:
+ driver = base.get_driver(connection_string, *args, **kwargs)
+ __notifier_cache[connection_string] = driver.notify
+ LOG.info("osprofiler is enabled with connection string: %s",
+ connection_string)
+ except Exception:
+ LOG.exception("Could not initialize driver for connection string "
+ "%s, osprofiler is disabled", connection_string)
+ __notifier_cache[connection_string] = _noop_notifier
+
+ return __notifier_cache[connection_string]
+
+
+def clear_notifier_cache():
+ __notifier_cache.clear()
diff --git a/osprofiler/opts.py b/osprofiler/opts.py
new file mode 100644
index 0000000000000000000000000000000000000000..612a6c874e99ab07b59126a6add5d677eeb37e8f
--- /dev/null
+++ b/osprofiler/opts.py
@@ -0,0 +1,242 @@
+# Copyright 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from osprofiler import web
+
+__all__ = [
+ "list_opts",
+ "set_defaults",
+]
+
+_profiler_opt_group = cfg.OptGroup(
+ "profiler",
+ title="OpenStack cross-service profiling",
+ help="""
+OSprofiler library allows to trace requests going through various OpenStack
+services and create the accumulated report of what time was spent on each
+request processing step.
+""")
+
+_enabled_opt = cfg.BoolOpt(
+ "enabled",
+ default=False,
+ deprecated_name="profiler_enabled",
+ help="""
+Enable the profiling for all services on this node.
+
+Default value is False (fully disable the profiling feature).
+
+Possible values:
+
+* True: Enables the feature
+* False: Disables the feature. The profiling cannot be started via this project
+ operations. If the profiling is triggered by another project, this project
+ part will be empty.
+""")
+
+_trace_sqlalchemy_opt = cfg.BoolOpt(
+ "trace_sqlalchemy",
+ default=False,
+ help="""
+Enable SQL requests profiling in services.
+
+Default value is False (SQL requests won't be traced).
+
+Possible values:
+
+* True: Enables SQL requests profiling. Each SQL query will be part of the
+ trace and can the be analyzed by how much time was spent for that.
+* False: Disables SQL requests profiling. The spent time is only shown on a
+ higher level of operations. Single SQL queries cannot be analyzed this way.
+""")
+
+_hmac_keys_opt = cfg.StrOpt(
+ "hmac_keys",
+ default="SECRET_KEY",
+ help="""
+Secret key(s) to use for encrypting context data for performance profiling.
+
+This string value should have the following format: [,,...],
+where each key is some random string. A user who triggers the profiling via
+the REST API has to set one of these keys in the headers of the REST API call
+to include profiling results of this node for this particular project.
+
+Both "enabled" flag and "hmac_keys" config options should be set to enable
+profiling. Also, to generate correct profiling information across all services
+at least one key needs to be consistent between OpenStack projects. This
+ensures it can be used from client side to generate the trace, containing
+information from all possible resources.
+""")
+
+_connection_string_opt = cfg.StrOpt(
+ "connection_string",
+ default="messaging://",
+ help="""
+Connection string for a notifier backend.
+
+Default value is ``messaging://`` which sets the notifier to oslo_messaging.
+
+Examples of possible values:
+
+* ``messaging://`` - use oslo_messaging driver for sending spans.
+* ``redis://127.0.0.1:6379`` - use redis driver for sending spans.
+* ``mongodb://127.0.0.1:27017`` - use mongodb driver for sending spans.
+* ``elasticsearch://127.0.0.1:9200`` - use elasticsearch driver for sending
+ spans.
+* ``jaeger://127.0.0.1:6831`` - use jaeger tracing as driver for sending spans.
+""")
+
+_es_doc_type_opt = cfg.StrOpt(
+ "es_doc_type",
+ default="notification",
+ help="""
+Document type for notification indexing in elasticsearch.
+""")
+
+_es_scroll_time_opt = cfg.StrOpt(
+ "es_scroll_time",
+ default="2m",
+ help="""
+This parameter is a time value parameter (for example: es_scroll_time=2m),
+indicating for how long the nodes that participate in the search will maintain
+relevant resources in order to continue and support it.
+""")
+
+_es_scroll_size_opt = cfg.IntOpt(
+ "es_scroll_size",
+ default=10000,
+ help="""
+Elasticsearch splits large requests in batches. This parameter defines
+maximum size of each batch (for example: es_scroll_size=10000).
+""")
+
+_socket_timeout_opt = cfg.FloatOpt(
+ "socket_timeout",
+ default=0.1,
+ help="""
+Redissentinel provides a timeout option on the connections.
+This parameter defines that timeout (for example: socket_timeout=0.1).
+""")
+
+_sentinel_service_name_opt = cfg.StrOpt(
+ "sentinel_service_name",
+ default="mymaster",
+ help="""
+Redissentinel uses a service name to identify a master redis service.
+This parameter defines the name (for example:
+``sentinal_service_name=mymaster``).
+""")
+
+_filter_error_trace = cfg.BoolOpt(
+ "filter_error_trace",
+ default=False,
+ help="""
+Enable filter traces that contain error/exception to a separated place.
+
+Default value is set to False.
+
+Possible values:
+
+* True: Enable filter traces that contain error/exception.
+* False: Disable the filter.
+""")
+
+_PROFILER_OPTS = [
+ _enabled_opt,
+ _trace_sqlalchemy_opt,
+ _hmac_keys_opt,
+ _connection_string_opt,
+ _es_doc_type_opt,
+ _es_scroll_time_opt,
+ _es_scroll_size_opt,
+ _socket_timeout_opt,
+ _sentinel_service_name_opt,
+ _filter_error_trace
+]
+
+cfg.CONF.register_opts(_PROFILER_OPTS, group=_profiler_opt_group)
+
+
+def set_defaults(conf, enabled=None, trace_sqlalchemy=None, hmac_keys=None,
+ connection_string=None, es_doc_type=None,
+ es_scroll_time=None, es_scroll_size=None,
+ socket_timeout=None, sentinel_service_name=None):
+ conf.register_opts(_PROFILER_OPTS, group=_profiler_opt_group)
+
+ if enabled is not None:
+ conf.set_default("enabled", enabled,
+ group=_profiler_opt_group.name)
+ if trace_sqlalchemy is not None:
+ conf.set_default("trace_sqlalchemy", trace_sqlalchemy,
+ group=_profiler_opt_group.name)
+ if hmac_keys is not None:
+ conf.set_default("hmac_keys", hmac_keys,
+ group=_profiler_opt_group.name)
+
+ if connection_string is not None:
+ conf.set_default("connection_string", connection_string,
+ group=_profiler_opt_group.name)
+
+ if es_doc_type is not None:
+ conf.set_default("es_doc_type", es_doc_type,
+ group=_profiler_opt_group.name)
+
+ if es_scroll_time is not None:
+ conf.set_default("es_scroll_time", es_scroll_time,
+ group=_profiler_opt_group.name)
+
+ if es_scroll_size is not None:
+ conf.set_default("es_scroll_size", es_scroll_size,
+ group=_profiler_opt_group.name)
+
+ if socket_timeout is not None:
+ conf.set_default("socket_timeout", socket_timeout,
+ group=_profiler_opt_group.name)
+
+ if sentinel_service_name is not None:
+ conf.set_default("sentinel_service_name", sentinel_service_name,
+ group=_profiler_opt_group.name)
+
+
+def is_trace_enabled(conf=None):
+ if conf is None:
+ conf = cfg.CONF
+ return conf.profiler.enabled
+
+
+def is_db_trace_enabled(conf=None):
+ if conf is None:
+ conf = cfg.CONF
+ return conf.profiler.enabled and conf.profiler.trace_sqlalchemy
+
+
+def enable_web_trace(conf=None):
+ if conf is None:
+ conf = cfg.CONF
+ if conf.profiler.enabled:
+ web.enable(conf.profiler.hmac_keys)
+
+
+def disable_web_trace(conf=None):
+ if conf is None:
+ conf = cfg.CONF
+ if conf.profiler.enabled:
+ web.disable()
+
+
+def list_opts():
+ return [(_profiler_opt_group.name, _PROFILER_OPTS)]
diff --git a/osprofiler/parsers/ceilometer.py b/osprofiler/parsers/ceilometer.py
deleted file mode 100644
index a49bedef5cb74893d8f252e9628f53b51ab2b75a..0000000000000000000000000000000000000000
--- a/osprofiler/parsers/ceilometer.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2014 Mirantis Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import datetime
-
-
-def _build_tree(nodes):
- """Builds the tree (forest) data structure based on the list of nodes.
-
- Works in O(n).
-
- :param nodes: list of nodes, where each node is a dictionary with fields
- "parent_id", "trace_id", "info"
- :returns: list of top level ("root") nodes in form of dictionaries,
- each containing the "info" and "children" fields, where
- "children" is the list of child nodes ("children" will be
- empty for leafs)
- """
-
- tree = []
-
- for trace_id in nodes:
- node = nodes[trace_id]
- node.setdefault("children", [])
- parent_id = node["parent_id"]
- if parent_id in nodes:
- nodes[parent_id].setdefault("children", [])
- nodes[parent_id]["children"].append(node)
- else:
- tree.append(node) # no parent => top-level node
-
- for node in nodes:
- nodes[node]["children"].sort(key=lambda x: x["info"]["started"])
-
- return sorted(tree, key=lambda x: x["info"]["started"])
-
-
-def parse_notifications(notifications):
- """Parse & builds tree structure from list of ceilometer notifications."""
-
- result = {}
- started_at = 0
- finished_at = 0
-
- for n in notifications:
- meta = n["metadata"]
- key = meta["trace_id"]
-
- if key not in result:
- result[key] = {
- "info": {
- "name": meta["name"].split("-")[0]
- },
- "parent_id": meta["parent_id"],
- "trace_id": meta["trace_id"]
- }
-
- skip_keys = ["base_id", "trace_id", "parent_id", "name", "event_type"]
-
- for k in meta:
- if k not in skip_keys:
- result[key]["info"][k] = meta[k]
-
- timestamp = datetime.datetime.strptime(n["timestamp"],
- "%Y-%m-%dT%H:%M:%S.%f")
-
- if meta["name"].endswith("stop"):
- result[key]["info"]["finished"] = timestamp
- else:
- result[key]["info"]["started"] = timestamp
-
- if not started_at or started_at > timestamp:
- started_at = timestamp
-
- if not finished_at or finished_at < timestamp:
- finished_at = timestamp
-
- def msec(dt):
- # NOTE(boris-42): Unfortunately this is the simplest way that works in
- # py26 and py27
- microsec = (dt.microseconds + (dt.seconds + dt.days * 24 * 3600) * 1e6)
- return (int)(microsec / 1000.0)
-
- for r in result.values():
- # NOTE(boris-42): We are not able to guarantee that ceilometer consumed
- # all messages => so we should at make duration 0ms.
- if "started" not in r["info"]:
- r["info"]["started"] = r["info"]["finished"]
- if "finished" not in r["info"]:
- r["info"]["finished"] = r["info"]["started"]
-
- r["info"]["started"] = msec(r["info"]["started"] - started_at)
- r["info"]["finished"] = msec(r["info"]["finished"] - started_at)
-
- return {
- "info": {
- "name": "total",
- "started": 0,
- "finished": msec(finished_at - started_at) if started_at else 0
- },
- "children": _build_tree(result)
- }
-
-
-def get_notifications(ceilometer, base_id):
- """Retrieves and parses notification from ceilometer.
-
- :param ceilometer: Initialized ceilometer client.
- :param base_id: Base id of trace elements.
- """
-
- _filter = '{"=": {"resource_id": "profiler-%s"}}' % base_id
- return [n.to_dict()
- for n in ceilometer.query_samples.query(_filter, None, None)]
diff --git a/osprofiler/profiler.py b/osprofiler/profiler.py
index b66b4f1ce6ca11ac7aa0ec8801e31fa4a575d0f6..7949f0e4ee9c10b0a0a3900abf472fcd7e987dda 100644
--- a/osprofiler/profiler.py
+++ b/osprofiler/profiler.py
@@ -14,11 +14,17 @@
# under the License.
import collections
+import datetime
import functools
import inspect
+import socket
import threading
-import uuid
+from oslo_utils import reflection
+from oslo_utils import uuidutils
+import six
+
+from osprofiler import _utils as utils
from osprofiler import notifier
@@ -26,10 +32,20 @@ from osprofiler import notifier
__local_ctx = threading.local()
-def _clean():
+def clean():
__local_ctx.profiler = None
+def _ensure_no_multiple_traced(traceable_attrs):
+ for attr_name, attr in traceable_attrs:
+ traced_times = getattr(attr, "__traced__", 0)
+ if traced_times:
+ raise ValueError("Can not apply new trace on top of"
+ " previously traced attribute '%s' since"
+ " it has been traced %s times previously"
+ % (attr_name, traced_times))
+
+
def init(hmac_key, base_id=None, parent_id=None):
"""Init profiler instance for current thread.
@@ -41,8 +57,9 @@ def init(hmac_key, base_id=None, parent_id=None):
:param parent_id: Used to build tree of traces.
:returns: Profiler instance
"""
- __local_ctx.profiler = _Profiler(hmac_key, base_id=base_id,
- parent_id=parent_id)
+ if get() is None:
+ __local_ctx.profiler = _Profiler(hmac_key, base_id=base_id,
+ parent_id=parent_id)
return __local_ctx.profiler
@@ -73,7 +90,8 @@ def stop(info=None):
profiler.stop(info=info)
-def trace(name, info=None, hide_args=False):
+def trace(name, info=None, hide_args=False, hide_result=True,
+ allow_multiple_trace=True):
"""Trace decorator for functions.
Very useful if you would like to add trace point on existing function:
@@ -88,28 +106,82 @@ def trace(name, info=None, hide_args=False):
:param hide_args: Don't push to trace info args and kwargs. Quite useful
if you have some info in args that you wont to share,
e.g. passwords.
+ :param hide_result: Boolean value to hide/show function result in trace.
+ True - hide function result (default).
+ False - show function result in trace.
+ :param allow_multiple_trace: If the wrapped function has already been
+ traced either allow the new trace to occur
+ or raise a value error denoting that multiple
+ tracing is not allowed (by default allow).
"""
- info = info or {}
+ if not info:
+ info = {}
+ else:
+ info = info.copy()
+ info["function"] = {}
def decorator(f):
+ trace_times = getattr(f, "__traced__", 0)
+ if not allow_multiple_trace and trace_times:
+ raise ValueError("Function '%s' has already"
+ " been traced %s times" % (f, trace_times))
+
+ try:
+ f.__traced__ = trace_times + 1
+ except AttributeError:
+ # Tries to work around the following:
+ #
+ # AttributeError: 'instancemethod' object has no
+ # attribute '__traced__'
+ try:
+ f.im_func.__traced__ = trace_times + 1
+ except AttributeError: # nosec
+ pass
@functools.wraps(f)
def wrapper(*args, **kwargs):
- info["function"] = {"name": _get_full_func_name(f)}
+ # NOTE(tovin07): Workaround for this issue
+ # F823 local variable 'info'
+ # (defined in enclosing scope on line xxx)
+ # referenced before assignment
+ info_ = info
+ if "name" not in info_["function"]:
+ # Get this once (as it should **not** be changing in
+ # subsequent calls).
+ info_["function"]["name"] = reflection.get_callable_name(f)
if not hide_args:
- info["function"]["args"] = str(args)
- info["function"]["kwargs"] = str(kwargs)
-
- with Trace(name, info=info):
- return f(*args, **kwargs)
+ info_["function"]["args"] = str(args)
+ info_["function"]["kwargs"] = str(kwargs)
+
+ stop_info = None
+ try:
+ start(name, info=info_)
+ result = f(*args, **kwargs)
+ except Exception as ex:
+ stop_info = {
+ "etype": reflection.get_class_name(ex),
+ "message": six.text_type(ex)
+ }
+ raise
+ else:
+ if not hide_result:
+ stop_info = {"function": {"result": repr(result)}}
+ return result
+ finally:
+ if stop_info:
+ stop(info=stop_info)
+ else:
+ stop()
return wrapper
return decorator
-def trace_cls(name, info=None, hide_args=False, trace_private=False):
+def trace_cls(name, info=None, hide_args=False, hide_result=True,
+ trace_private=False, allow_multiple_trace=True,
+ trace_class_methods=False, trace_static_methods=False):
"""Trace decorator for instances of class .
Very useful if you would like to add trace point on existing method:
@@ -130,27 +202,127 @@ def trace_cls(name, info=None, hide_args=False, trace_private=False):
:param hide_args: Don't push to trace info args and kwargs. Quite useful
if you have some info in args that you wont to share,
e.g. passwords.
-
+ :param hide_result: Boolean value to hide/show function result in trace.
+ True - hide function result (default).
+ False - show function result in trace.
:param trace_private: Trace methods that starts with "_". It wont trace
methods that starts "__" even if it is turned on.
+ :param trace_static_methods: Trace staticmethods. This may be prone to
+ issues so careful usage is recommended (this
+ is also why this defaults to false).
+ :param trace_class_methods: Trace classmethods. This may be prone to
+ issues so careful usage is recommended (this
+ is also why this defaults to false).
+ :param allow_multiple_trace: If wrapped attributes have already been
+ traced either allow the new trace to occur
+ or raise a value error denoting that multiple
+ tracing is not allowed (by default allow).
"""
+ def trace_checker(attr_name, to_be_wrapped):
+ if attr_name.startswith("__"):
+ # Never trace really private methods.
+ return (False, None)
+ if not trace_private and attr_name.startswith("_"):
+ return (False, None)
+ if isinstance(to_be_wrapped, staticmethod):
+ if not trace_static_methods:
+ return (False, None)
+ return (True, staticmethod)
+ if isinstance(to_be_wrapped, classmethod):
+ if not trace_class_methods:
+ return (False, None)
+ return (True, classmethod)
+ return (True, None)
+
def decorator(cls):
+ clss = cls if inspect.isclass(cls) else cls.__class__
+ mro_dicts = [c.__dict__ for c in inspect.getmro(clss)]
+ traceable_attrs = []
+ traceable_wrappers = []
for attr_name, attr in inspect.getmembers(cls):
if not (inspect.ismethod(attr) or inspect.isfunction(attr)):
continue
- if attr_name.startswith("__"):
+ wrapped_obj = None
+ for cls_dict in mro_dicts:
+ if attr_name in cls_dict:
+ wrapped_obj = cls_dict[attr_name]
+ break
+ should_wrap, wrapper = trace_checker(attr_name, wrapped_obj)
+ if not should_wrap:
continue
- if not trace_private and attr_name.startswith("_"):
- continue
-
- setattr(cls, attr_name,
- trace(name, info=info, hide_args=hide_args)(attr))
+ traceable_attrs.append((attr_name, attr))
+ traceable_wrappers.append(wrapper)
+ if not allow_multiple_trace:
+ # Check before doing any other further work (so we don't
+ # halfway trace this class).
+ _ensure_no_multiple_traced(traceable_attrs)
+ for i, (attr_name, attr) in enumerate(traceable_attrs):
+ wrapped_method = trace(name, info=info, hide_args=hide_args,
+ hide_result=hide_result)(attr)
+ wrapper = traceable_wrappers[i]
+ if wrapper is not None:
+ wrapped_method = wrapper(wrapped_method)
+ setattr(cls, attr_name, wrapped_method)
return cls
return decorator
+class TracedMeta(type):
+ """Metaclass to comfortably trace all children of a specific class.
+
+ Possible usage:
+
+ >>> @six.add_metaclass(profiler.TracedMeta)
+ >>> class RpcManagerClass(object):
+ >>> __trace_args__ = {'name': 'rpc',
+ >>> 'info': None,
+ >>> 'hide_args': False,
+ >>> 'hide_result': True,
+ >>> 'trace_private': False}
+ >>>
+ >>> def my_method(self, some_args):
+ >>> pass
+ >>>
+ >>> def my_method2(self, some_arg1, some_arg2, kw=None, kw2=None)
+ >>> pass
+
+ Adding of this metaclass requires to set __trace_args__ attribute to the
+ class we want to modify. __trace_args__ is the dictionary with one
+ mandatory key included - "name", that will define name of action to be
+ traced - E.g. wsgi, rpc, db, etc...
+ """
+ def __init__(cls, cls_name, bases, attrs):
+ super(TracedMeta, cls).__init__(cls_name, bases, attrs)
+
+ trace_args = dict(getattr(cls, "__trace_args__", {}))
+ trace_private = trace_args.pop("trace_private", False)
+ allow_multiple_trace = trace_args.pop("allow_multiple_trace", True)
+ if "name" not in trace_args:
+ raise TypeError("Please specify __trace_args__ class level "
+ "dictionary attribute with mandatory 'name' key - "
+ "e.g. __trace_args__ = {'name': 'rpc'}")
+
+ traceable_attrs = []
+ for attr_name, attr_value in attrs.items():
+ if not (inspect.ismethod(attr_value)
+ or inspect.isfunction(attr_value)):
+ continue
+ if attr_name.startswith("__"):
+ continue
+ if not trace_private and attr_name.startswith("_"):
+ continue
+ traceable_attrs.append((attr_name, attr_value))
+ if not allow_multiple_trace:
+ # Check before doing any other further work (so we don't
+ # halfway trace this class).
+ _ensure_no_multiple_traced(traceable_attrs)
+ for attr_name, attr_value in traceable_attrs:
+ setattr(cls, attr_name, trace(**trace_args)(getattr(cls,
+ attr_name)))
+
+
class Trace(object):
def __init__(self, name, info=None):
@@ -175,18 +347,14 @@ class Trace(object):
start(self._name, info=self._info)
def __exit__(self, etype, value, traceback):
- stop()
-
-
-def _get_full_func_name(f):
- if hasattr(f, "__qualname__"):
- # NOTE(boris-42): Most proper way to get full name in py33
- return ".".join([f.__module__, f.__qualname__])
-
- if inspect.ismethod(f):
- return ".".join([f.__module__, f.im_class.__name__, f.__name__])
-
- return ".".join([f.__module__, f.__name__])
+ if etype:
+ info = {
+ "etype": reflection.get_class_name(etype),
+ "message": value.args[0] if value.args else None
+ }
+ stop(info=info)
+ else:
+ stop()
class _Profiler(object):
@@ -194,12 +362,21 @@ class _Profiler(object):
def __init__(self, hmac_key, base_id=None, parent_id=None):
self.hmac_key = hmac_key
if not base_id:
- base_id = str(uuid.uuid4())
+ base_id = str(uuidutils.generate_uuid())
self._trace_stack = collections.deque([base_id, parent_id or base_id])
self._name = collections.deque()
+ self._host = socket.gethostname()
+
+ def get_shorten_id(self, uuid_id):
+ """Return shorten id of a uuid that will be used in OpenTracing drivers
+
+ :param uuid_id: A string of uuid that was generated by uuidutils
+ :returns: A shorter 64-bit long id
+ """
+ return format(utils.shorten_id(uuid_id), "x")
def get_base_id(self):
- """Return base if of trace.
+ """Return base id of a trace.
Base id is the same for all elements in one trace. It's main goal is
to be able to retrieve by one request all trace elements from storage.
@@ -218,42 +395,44 @@ class _Profiler(object):
"""Start new event.
Adds new trace_id to trace stack and sends notification
- to collector (may be ceilometer). With "info" and 3 ids:
+ to collector. With "info" and 3 ids:
base_id - to be able to retrieve all trace elements by one query
parent_id - to build tree of events (not just a list)
trace_id - current event id.
- As we are writing this code special for OpenStack, and there will be
- only one implementation of notifier based on ceilometer notifer api.
- That already contains timestamps, so we don't measure time by hand.
-
:param name: name of trace element (db, wsgi, rpc, etc..)
:param info: Dictionary with any useful information related to this
trace element. (sql request, rpc message or url...)
"""
+ info = info or {}
+ info["host"] = self._host
self._name.append(name)
- self._trace_stack.append(str(uuid.uuid4()))
- self._notify('%s-start' % name, info)
+ self._trace_stack.append(str(uuidutils.generate_uuid()))
+ self._notify("%s-start" % name, info)
def stop(self, info=None):
- """Finish latests event.
+ """Finish latest event.
Same as a start, but instead of pushing trace_id to stack it pops it.
:param info: Dict with useful info. It will be send in notification.
"""
- self._notify('%s-stop' % self._name.pop(), info)
+ info = info or {}
+ info["host"] = self._host
+ self._notify("%s-stop" % self._name.pop(), info)
self._trace_stack.pop()
def _notify(self, name, info):
payload = {
- 'name': name,
- 'base_id': self.get_base_id(),
- 'trace_id': self.get_id(),
- 'parent_id': self.get_parent_id()
+ "name": name,
+ "base_id": self.get_base_id(),
+ "trace_id": self.get_id(),
+ "parent_id": self.get_parent_id(),
+ "timestamp": datetime.datetime.utcnow().strftime(
+ "%Y-%m-%dT%H:%M:%S.%f"),
}
if info:
- payload['info'] = info
+ payload["info"] = info
notifier.notify(payload)
diff --git a/osprofiler/sqlalchemy.py b/osprofiler/sqlalchemy.py
index 4120a1fcaa268ca08bad228da4fe11bd8841d66e..5e2fc3cfa22e98e0ee90d50b0302ce022c49d49a 100644
--- a/osprofiler/sqlalchemy.py
+++ b/osprofiler/sqlalchemy.py
@@ -13,8 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+import contextlib
+import logging as log
+
+from oslo_utils import reflection
+
from osprofiler import profiler
+LOG = log.getLogger(__name__)
_DISABLED = False
@@ -32,30 +38,79 @@ def enable():
_DISABLED = False
-def add_tracing(sqlalchemy, engine, name):
+def add_tracing(sqlalchemy, engine, name, hide_result=True):
"""Add tracing to all sqlalchemy calls."""
if not _DISABLED:
- sqlalchemy.event.listen(engine, 'before_cursor_execute',
+ sqlalchemy.event.listen(engine, "before_cursor_execute",
_before_cursor_execute(name))
- sqlalchemy.event.listen(engine, 'after_cursor_execute',
- _after_cursor_execute())
+ sqlalchemy.event.listen(
+ engine, "after_cursor_execute",
+ _after_cursor_execute(hide_result=hide_result)
+ )
+ sqlalchemy.event.listen(engine, "handle_error", handle_error)
+
+
+@contextlib.contextmanager
+def wrap_session(sqlalchemy, sess):
+ with sess as s:
+ if not getattr(s.bind, "traced", False):
+ add_tracing(sqlalchemy, s.bind, "db")
+ s.bind.traced = True
+ yield s
def _before_cursor_execute(name):
"""Add listener that will send trace info before query is executed."""
def handler(conn, cursor, statement, params, context, executemany):
- info = {"db.statement": statement, "db.params": params}
+ info = {"db": {
+ "statement": statement,
+ "params": params}
+ }
profiler.start(name, info=info)
return handler
-def _after_cursor_execute():
- """Add listener that will send trace info after query is executed."""
+def _after_cursor_execute(hide_result=True):
+ """Add listener that will send trace info after query is executed.
+
+ :param hide_result: Boolean value to hide or show SQL result in trace.
+ True - hide SQL result (default).
+ False - show SQL result in trace.
+ """
def handler(conn, cursor, statement, params, context, executemany):
- profiler.stop()
+ if not hide_result:
+ # Add SQL result to trace info in *-stop phase
+ info = {
+ "db": {
+ "result": str(cursor._rows)
+ }
+ }
+ profiler.stop(info=info)
+ else:
+ profiler.stop()
return handler
+
+
+def handle_error(exception_context):
+ """Handle SQLAlchemy errors"""
+ exception_class_name = reflection.get_class_name(
+ exception_context.original_exception)
+ original_exception = str(exception_context.original_exception)
+ chained_exception = str(exception_context.chained_exception)
+
+ info = {
+ "etype": exception_class_name,
+ "message": original_exception,
+ "db": {
+ "original_exception": original_exception,
+ "chained_exception": chained_exception
+ }
+ }
+ profiler.stop(info=info)
+ LOG.debug("OSProfiler has handled SQLAlchemy error: %s",
+ original_exception)
diff --git a/osprofiler/parsers/__init__.py b/osprofiler/tests/__init__.py
similarity index 100%
rename from osprofiler/parsers/__init__.py
rename to osprofiler/tests/__init__.py
diff --git a/tests/__init__.py b/osprofiler/tests/functional/__init__.py
similarity index 100%
rename from tests/__init__.py
rename to osprofiler/tests/functional/__init__.py
diff --git a/osprofiler/tests/functional/config.cfg b/osprofiler/tests/functional/config.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..32219ada51fb2ee83b3b4a73e3a3a7d809c0f906
--- /dev/null
+++ b/osprofiler/tests/functional/config.cfg
@@ -0,0 +1,5 @@
+[DEFAULT]
+transport_url=rabbit://localhost:5672/
+
+[profiler]
+connection_string="messaging://"
diff --git a/osprofiler/tests/functional/test_driver.py b/osprofiler/tests/functional/test_driver.py
new file mode 100644
index 0000000000000000000000000000000000000000..c29064ac7504d2006d2264a8ff930bc9895f4c4d
--- /dev/null
+++ b/osprofiler/tests/functional/test_driver.py
@@ -0,0 +1,155 @@
+# Copyright (c) 2016 VMware, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import os
+
+from oslo_config import cfg
+
+from osprofiler.drivers import base
+from osprofiler import initializer
+from osprofiler import opts
+from osprofiler import profiler
+from osprofiler.tests import test
+
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+@profiler.trace_cls("rpc", hide_args=True)
+class Foo(object):
+ def bar(self, x):
+ return self.baz(x, x)
+
+ def baz(self, x, y):
+ return x * y
+
+
+class DriverTestCase(test.FunctionalTestCase):
+
+ SERVICE = "service"
+ PROJECT = "project"
+
+ def setUp(self):
+ super(DriverTestCase, self).setUp()
+ CONF(["--config-file", os.path.dirname(__file__) + "/config.cfg"])
+ opts.set_defaults(CONF,
+ enabled=True,
+ trace_sqlalchemy=False,
+ hmac_keys="SECRET_KEY")
+
+ def _assert_dict(self, info, **kwargs):
+ for key in kwargs:
+ self.assertEqual(kwargs[key], info[key])
+
+ def _assert_child_dict(self, child, base_id, parent_id, name, fn_name):
+ self.assertEqual(parent_id, child["parent_id"])
+
+ exp_info = {"name": "rpc",
+ "service": self.SERVICE,
+ "project": self.PROJECT}
+ self._assert_dict(child["info"], **exp_info)
+
+ raw_start = child["info"]["meta.raw_payload.%s-start" % name]
+ self.assertEqual(fn_name, raw_start["info"]["function"]["name"])
+ exp_raw = {"name": "%s-start" % name,
+ "service": self.SERVICE,
+ "trace_id": child["trace_id"],
+ "project": self.PROJECT,
+ "base_id": base_id}
+ self._assert_dict(raw_start, **exp_raw)
+
+ raw_stop = child["info"]["meta.raw_payload.%s-stop" % name]
+ exp_raw["name"] = "%s-stop" % name
+ self._assert_dict(raw_stop, **exp_raw)
+
+ def test_get_report(self):
+ # initialize profiler notifier (the same way as in services)
+ initializer.init_from_conf(
+ CONF, {}, self.PROJECT, self.SERVICE, "host")
+ profiler.init("SECRET_KEY")
+
+ # grab base_id
+ base_id = profiler.get().get_base_id()
+
+ # execute profiled code
+ foo = Foo()
+ foo.bar(1)
+
+ # instantiate report engine (the same way as in osprofiler CLI)
+ engine = base.get_driver(CONF.profiler.connection_string,
+ project=self.PROJECT,
+ service=self.SERVICE,
+ host="host",
+ conf=CONF)
+
+ # generate the report
+ report = engine.get_report(base_id)
+ LOG.debug("OSProfiler report: %s", report)
+
+ # verify the report
+ self.assertEqual("total", report["info"]["name"])
+ self.assertEqual(2, report["stats"]["rpc"]["count"])
+ self.assertEqual(1, len(report["children"]))
+
+ cbar = report["children"][0]
+ self._assert_child_dict(
+ cbar, base_id, base_id, "rpc",
+ "osprofiler.tests.functional.test_driver.Foo.bar")
+
+ self.assertEqual(1, len(cbar["children"]))
+ cbaz = cbar["children"][0]
+ self._assert_child_dict(
+ cbaz, base_id, cbar["trace_id"], "rpc",
+ "osprofiler.tests.functional.test_driver.Foo.baz")
+
+
+class RedisDriverTestCase(DriverTestCase):
+ def setUp(self):
+ super(DriverTestCase, self).setUp()
+ CONF([])
+ opts.set_defaults(CONF,
+ connection_string="redis://localhost:6379",
+ enabled=True,
+ trace_sqlalchemy=False,
+ hmac_keys="SECRET_KEY")
+
+ def test_list_traces(self):
+ # initialize profiler notifier (the same way as in services)
+ initializer.init_from_conf(
+ CONF, {}, self.PROJECT, self.SERVICE, "host")
+ profiler.init("SECRET_KEY")
+
+ # grab base_id
+ base_id = profiler.get().get_base_id()
+
+ # execute profiled code
+ foo = Foo()
+ foo.bar(1)
+
+ # instantiate report engine (the same way as in osprofiler CLI)
+ engine = base.get_driver(CONF.profiler.connection_string,
+ project=self.PROJECT,
+ service=self.SERVICE,
+ host="host",
+ conf=CONF)
+
+ # generate the report
+ traces = engine.list_traces()
+ LOG.debug("Collected traces: %s", traces)
+
+ # ensure trace with base_id is in the list of traces
+ self.assertIn(base_id, [t["base_id"] for t in traces])
diff --git a/tests/test.py b/osprofiler/tests/test.py
similarity index 76%
rename from tests/test.py
rename to osprofiler/tests/test.py
index fd337c935422fe1adf2ae0b5a0b8d60956e0fb52..3e865422efd023fda0a2eed2b350fecb292a6fa4 100644
--- a/tests/test.py
+++ b/osprofiler/tests/test.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
+import sys
from testtools import testcase
@@ -20,3 +22,12 @@ from testtools import testcase
class TestCase(testcase.TestCase):
"""Test case base class for all osprofiler unit tests."""
pass
+
+
+class FunctionalTestCase(TestCase):
+ """Base for functional tests"""
+
+ def setUp(self):
+ super(FunctionalTestCase, self).setUp()
+
+ logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
diff --git a/tests/cmd/__init__.py b/osprofiler/tests/unit/__init__.py
similarity index 100%
rename from tests/cmd/__init__.py
rename to osprofiler/tests/unit/__init__.py
diff --git a/tests/notifiers/__init__.py b/osprofiler/tests/unit/cmd/__init__.py
similarity index 100%
rename from tests/notifiers/__init__.py
rename to osprofiler/tests/unit/cmd/__init__.py
diff --git a/osprofiler/tests/unit/cmd/test_shell.py b/osprofiler/tests/unit/cmd/test_shell.py
new file mode 100644
index 0000000000000000000000000000000000000000..845d8d94020e68588f9c50e5507d47bb9b0714b8
--- /dev/null
+++ b/osprofiler/tests/unit/cmd/test_shell.py
@@ -0,0 +1,155 @@
+# Copyright 2014 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import os
+import sys
+from unittest import mock
+
+import ddt
+import six
+
+from osprofiler.cmd import shell
+from osprofiler import exc
+from osprofiler.tests import test
+
+
+@ddt.ddt
+class ShellTestCase(test.TestCase):
+
+ TRACE_ID = "c598094d-bbee-40b6-b317-d76003b679d3"
+
+ def setUp(self):
+ super(ShellTestCase, self).setUp()
+ self.old_environment = os.environ.copy()
+
+ def tearDown(self):
+ super(ShellTestCase, self).tearDown()
+ os.environ = self.old_environment
+
+ def _trace_show_cmd(self, format_=None):
+ cmd = "trace show --connection-string redis:// %s" % self.TRACE_ID
+ return cmd if format_ is None else "%s --%s" % (cmd, format_)
+
+ @mock.patch("sys.stdout", six.StringIO())
+ @mock.patch("osprofiler.cmd.shell.OSProfilerShell")
+ def test_shell_main(self, mock_shell):
+ mock_shell.side_effect = exc.CommandError("some_message")
+ shell.main()
+ self.assertEqual("some_message\n", sys.stdout.getvalue())
+
+ def run_command(self, cmd):
+ shell.OSProfilerShell(cmd.split())
+
+ def _test_with_command_error(self, cmd, expected_message):
+ try:
+ self.run_command(cmd)
+ except exc.CommandError as actual_error:
+ self.assertEqual(str(actual_error), expected_message)
+ else:
+ raise ValueError(
+ "Expected: `osprofiler.exc.CommandError` is raised with "
+ "message: '%s'." % expected_message)
+
+ @mock.patch("osprofiler.drivers.redis_driver.Redis.get_report")
+ def test_trace_show_no_selected_format(self, mock_get):
+ mock_get.return_value = self._create_mock_notifications()
+ msg = ("You should choose one of the following output formats: "
+ "json, html or dot.")
+ self._test_with_command_error(self._trace_show_cmd(), msg)
+
+ @mock.patch("osprofiler.drivers.redis_driver.Redis.get_report")
+ @ddt.data(None, {"info": {"started": 0, "finished": 1, "name": "total"},
+ "children": []})
+ def test_trace_show_trace_id_not_found(self, notifications, mock_get):
+ mock_get.return_value = notifications
+
+ msg = ("Trace with UUID %s not found. Please check the HMAC key "
+ "used in the command." % self.TRACE_ID)
+
+ self._test_with_command_error(self._trace_show_cmd(), msg)
+
+ def _create_mock_notifications(self):
+ notifications = {
+ "info": {
+ "started": 0,
+ "finished": 1,
+ "name": "total"
+ },
+ "children": [{
+ "info": {
+ "started": 0,
+ "finished": 1,
+ "name": "total"
+ },
+ "children": []
+ }]
+ }
+ return notifications
+
+ @mock.patch("sys.stdout", six.StringIO())
+ @mock.patch("osprofiler.drivers.redis_driver.Redis.get_report")
+ def test_trace_show_in_json(self, mock_get):
+ notifications = self._create_mock_notifications()
+ mock_get.return_value = notifications
+
+ self.run_command(self._trace_show_cmd(format_="json"))
+ self.assertEqual("%s\n" % json.dumps(notifications, indent=2,
+ separators=(",", ": "),),
+ sys.stdout.getvalue())
+
+ @mock.patch("sys.stdout", six.StringIO())
+ @mock.patch("osprofiler.drivers.redis_driver.Redis.get_report")
+ def test_trace_show_in_html(self, mock_get):
+ notifications = self._create_mock_notifications()
+ mock_get.return_value = notifications
+
+ # NOTE(akurilin): to simplify assert statement, html-template should be
+ # replaced.
+ html_template = (
+ "A long time ago in a galaxy far, far away..."
+ " some_data = $DATA"
+ "It is a period of civil war. Rebel"
+ "spaceships, striking from a hidden"
+ "base, have won their first victory"
+ "against the evil Galactic Empire.")
+
+ with mock.patch("osprofiler.cmd.commands.open",
+ mock.mock_open(read_data=html_template), create=True):
+ self.run_command(self._trace_show_cmd(format_="html"))
+ self.assertEqual("A long time ago in a galaxy far, far away..."
+ " some_data = %s"
+ "It is a period of civil war. Rebel"
+ "spaceships, striking from a hidden"
+ "base, have won their first victory"
+ "against the evil Galactic Empire."
+ "\n" % json.dumps(notifications, indent=4,
+ separators=(",", ": ")),
+ sys.stdout.getvalue())
+
+ @mock.patch("sys.stdout", six.StringIO())
+ @mock.patch("osprofiler.drivers.redis_driver.Redis.get_report")
+ def test_trace_show_write_to_file(self, mock_get):
+ notifications = self._create_mock_notifications()
+ mock_get.return_value = notifications
+
+ with mock.patch("osprofiler.cmd.commands.open",
+ mock.mock_open(), create=True) as mock_open:
+ self.run_command("%s --out='/file'" %
+ self._trace_show_cmd(format_="json"))
+
+ output = mock_open.return_value.__enter__.return_value
+ output.write.assert_called_once_with(
+ json.dumps(notifications, indent=2, separators=(",", ": ")))
diff --git a/tests/parsers/__init__.py b/osprofiler/tests/unit/doc/__init__.py
similarity index 100%
rename from tests/parsers/__init__.py
rename to osprofiler/tests/unit/doc/__init__.py
diff --git a/osprofiler/tests/unit/doc/test_specs.py b/osprofiler/tests/unit/doc/test_specs.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe2b86743cce3febb398e35cca133e569addac7c
--- /dev/null
+++ b/osprofiler/tests/unit/doc/test_specs.py
@@ -0,0 +1,119 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import glob
+import os
+import re
+
+import docutils.core
+
+from osprofiler.tests import test
+
+
+class TitlesTestCase(test.TestCase):
+
+ specs_path = os.path.join(
+ os.path.dirname(__file__),
+ os.pardir, os.pardir, os.pardir, os.pardir,
+ "doc", "specs")
+
+ def _get_title(self, section_tree):
+ section = {"subtitles": []}
+ for node in section_tree:
+ if node.tagname == "title":
+ section["name"] = node.rawsource
+ elif node.tagname == "section":
+ subsection = self._get_title(node)
+ section["subtitles"].append(subsection["name"])
+ return section
+
+ def _get_titles(self, spec):
+ titles = {}
+ for node in spec:
+ if node.tagname == "section":
+ # Note subsection subtitles are thrown away
+ section = self._get_title(node)
+ titles[section["name"]] = section["subtitles"]
+ return titles
+
+ def _check_titles(self, filename, expect, actual):
+ missing_sections = [x for x in expect.keys() if x not in actual.keys()]
+ extra_sections = [x for x in actual.keys() if x not in expect.keys()]
+
+ msgs = []
+ if len(missing_sections) > 0:
+ msgs.append("Missing sections: %s" % missing_sections)
+ if len(extra_sections) > 0:
+ msgs.append("Extra sections: %s" % extra_sections)
+
+ for section in expect.keys():
+ missing_subsections = [x for x in expect[section]
+ if x not in actual.get(section, {})]
+ # extra subsections are allowed
+ if len(missing_subsections) > 0:
+ msgs.append("Section '%s' is missing subsections: %s"
+ % (section, missing_subsections))
+
+ if len(msgs) > 0:
+ self.fail("While checking '%s':\n %s"
+ % (filename, "\n ".join(msgs)))
+
+ def _check_lines_wrapping(self, tpl, raw):
+ for i, line in enumerate(raw.split("\n")):
+ if "http://" in line or "https://" in line:
+ continue
+ self.assertTrue(
+ len(line) < 80,
+ msg="%s:%d: Line limited to a maximum of 79 characters." %
+ (tpl, i+1))
+
+ def _check_no_cr(self, tpl, raw):
+ matches = re.findall("\r", raw)
+ self.assertEqual(
+ len(matches), 0,
+ "Found %s literal carriage returns in file %s" %
+ (len(matches), tpl))
+
+ def _check_trailing_spaces(self, tpl, raw):
+ for i, line in enumerate(raw.split("\n")):
+ trailing_spaces = re.findall(" +$", line)
+ self.assertEqual(
+ len(trailing_spaces), 0,
+ "Found trailing spaces on line %s of %s" % (i+1, tpl))
+
+ def test_template(self):
+ with open(os.path.join(self.specs_path, "template.rst")) as f:
+ template = f.read()
+
+ spec = docutils.core.publish_doctree(template)
+ template_titles = self._get_titles(spec)
+
+ for d in ["implemented", "in-progress"]:
+ spec_dir = "%s/%s" % (self.specs_path, d)
+
+ self.assertTrue(os.path.isdir(spec_dir),
+ "%s is not a directory" % spec_dir)
+ for filename in glob.glob(spec_dir + "/*"):
+ if filename.endswith("README.rst"):
+ continue
+
+ self.assertTrue(
+ filename.endswith(".rst"),
+ "spec's file must have .rst ext. Found: %s" % filename)
+ with open(filename) as f:
+ data = f.read()
+
+ titles = self._get_titles(docutils.core.publish_doctree(data))
+ self._check_titles(filename, template_titles, titles)
+ self._check_lines_wrapping(filename, data)
+ self._check_no_cr(filename, data)
+ self._check_trailing_spaces(filename, data)
diff --git a/osprofiler/tests/unit/drivers/__init__.py b/osprofiler/tests/unit/drivers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/osprofiler/tests/unit/drivers/test_base.py b/osprofiler/tests/unit/drivers/test_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bb56741a34c54f09b9aa13cfbfadace48dff8c0
--- /dev/null
+++ b/osprofiler/tests/unit/drivers/test_base.py
@@ -0,0 +1,119 @@
+# Copyright 2016 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from osprofiler.drivers import base
+from osprofiler.tests import test
+
+
+class NotifierBaseTestCase(test.TestCase):
+
+ def test_factory(self):
+
+ class A(base.Driver):
+ @classmethod
+ def get_name(cls):
+ return "a"
+
+ def notify(self, a):
+ return a
+
+ self.assertEqual(10, base.get_driver("a://").notify(10))
+
+ def test_factory_with_args(self):
+
+ class B(base.Driver):
+
+ def __init__(self, c_str, a, b=10):
+ self.a = a
+ self.b = b
+
+ @classmethod
+ def get_name(cls):
+ return "b"
+
+ def notify(self, c):
+ return self.a + self.b + c
+
+ self.assertEqual(22, base.get_driver("b://", 5, b=7).notify(10))
+
+ def test_driver_not_found(self):
+ self.assertRaises(ValueError, base.get_driver,
+ "Driver not found for connection string: "
+ "nonexisting://")
+
+ def test_build_empty_tree(self):
+ class C(base.Driver):
+ @classmethod
+ def get_name(cls):
+ return "c"
+
+ self.assertEqual([], base.get_driver("c://")._build_tree({}))
+
+ def test_build_complex_tree(self):
+ class D(base.Driver):
+ @classmethod
+ def get_name(cls):
+ return "d"
+
+ test_input = {
+ "2": {"parent_id": "0", "trace_id": "2", "info": {"started": 1}},
+ "1": {"parent_id": "0", "trace_id": "1", "info": {"started": 0}},
+ "21": {"parent_id": "2", "trace_id": "21", "info": {"started": 6}},
+ "22": {"parent_id": "2", "trace_id": "22", "info": {"started": 7}},
+ "11": {"parent_id": "1", "trace_id": "11", "info": {"started": 1}},
+ "113": {"parent_id": "11", "trace_id": "113",
+ "info": {"started": 3}},
+ "112": {"parent_id": "11", "trace_id": "112",
+ "info": {"started": 2}},
+ "114": {"parent_id": "11", "trace_id": "114",
+ "info": {"started": 5}}
+ }
+
+ expected_output = [
+ {
+ "parent_id": "0",
+ "trace_id": "1",
+ "info": {"started": 0},
+ "children": [
+ {
+ "parent_id": "1",
+ "trace_id": "11",
+ "info": {"started": 1},
+ "children": [
+ {"parent_id": "11", "trace_id": "112",
+ "info": {"started": 2}, "children": []},
+ {"parent_id": "11", "trace_id": "113",
+ "info": {"started": 3}, "children": []},
+ {"parent_id": "11", "trace_id": "114",
+ "info": {"started": 5}, "children": []}
+ ]
+ }
+ ]
+ },
+ {
+ "parent_id": "0",
+ "trace_id": "2",
+ "info": {"started": 1},
+ "children": [
+ {"parent_id": "2", "trace_id": "21",
+ "info": {"started": 6}, "children": []},
+ {"parent_id": "2", "trace_id": "22",
+ "info": {"started": 7}, "children": []}
+ ]
+ }
+ ]
+
+ self.assertEqual(
+ expected_output, base.get_driver("d://")._build_tree(test_input))
diff --git a/osprofiler/tests/unit/drivers/test_elasticsearch.py b/osprofiler/tests/unit/drivers/test_elasticsearch.py
new file mode 100644
index 0000000000000000000000000000000000000000..a73b9bd35583ec0da797fcc3f9f772ca6aac8f24
--- /dev/null
+++ b/osprofiler/tests/unit/drivers/test_elasticsearch.py
@@ -0,0 +1,114 @@
+# Copyright 2016 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from osprofiler.drivers.elasticsearch_driver import ElasticsearchDriver
+from osprofiler.tests import test
+
+
+class ElasticsearchTestCase(test.TestCase):
+
+ def setUp(self):
+ super(ElasticsearchTestCase, self).setUp()
+ self.elasticsearch = ElasticsearchDriver("elasticsearch://localhost")
+ self.elasticsearch.project = "project"
+ self.elasticsearch.service = "service"
+
+ def test_init_and_notify(self):
+ self.elasticsearch.client = mock.MagicMock()
+ self.elasticsearch.client.reset_mock()
+ project = "project"
+ service = "service"
+ host = "host"
+
+ info = {
+ "a": 10,
+ "project": project,
+ "service": service,
+ "host": host
+ }
+ self.elasticsearch.notify(info)
+
+ self.elasticsearch.client\
+ .index.assert_called_once_with(index="osprofiler-notifications",
+ doc_type="notification",
+ body=info)
+
+ def test_get_empty_report(self):
+ self.elasticsearch.client = mock.MagicMock()
+ self.elasticsearch.client.search = mock\
+ .MagicMock(return_value={"_scroll_id": "1", "hits": {"hits": []}})
+ self.elasticsearch.client.reset_mock()
+
+ get_report = self.elasticsearch.get_report
+ base_id = "abacaba"
+
+ get_report(base_id)
+
+ self.elasticsearch.client\
+ .search.assert_called_once_with(index="osprofiler-notifications",
+ doc_type="notification",
+ size=10000,
+ scroll="2m",
+ body={"query": {
+ "match": {"base_id": base_id}}
+ })
+
+ def test_get_non_empty_report(self):
+ base_id = "1"
+ elasticsearch_first_response = {
+ "_scroll_id": "1",
+ "hits": {
+ "hits": [
+ {
+ "_source": {
+ "timestamp": "2016-08-10T16:58:03.064438",
+ "base_id": base_id,
+ "project": "project",
+ "service": "service",
+ "parent_id": "0",
+ "name": "test",
+ "info": {
+ "host": "host"
+ },
+ "trace_id": "1"
+ }
+ }
+ ]}}
+ elasticsearch_second_response = {
+ "_scroll_id": base_id,
+ "hits": {"hits": []}}
+ self.elasticsearch.client = mock.MagicMock()
+ self.elasticsearch.client.search = \
+ mock.MagicMock(return_value=elasticsearch_first_response)
+ self.elasticsearch.client.scroll = \
+ mock.MagicMock(return_value=elasticsearch_second_response)
+
+ self.elasticsearch.client.reset_mock()
+
+ self.elasticsearch.get_report(base_id)
+
+ self.elasticsearch.client\
+ .search.assert_called_once_with(index="osprofiler-notifications",
+ doc_type="notification",
+ size=10000,
+ scroll="2m",
+ body={"query": {
+ "match": {"base_id": base_id}}
+ })
+
+ self.elasticsearch.client\
+ .scroll.assert_called_once_with(scroll_id=base_id, scroll="2m")
diff --git a/osprofiler/tests/unit/drivers/test_jaeger.py b/osprofiler/tests/unit/drivers/test_jaeger.py
new file mode 100644
index 0000000000000000000000000000000000000000..e59e69b791267691f5ddec0a64547e1d95ecff20
--- /dev/null
+++ b/osprofiler/tests/unit/drivers/test_jaeger.py
@@ -0,0 +1,78 @@
+# Copyright 2018 Fujitsu Ltd.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from osprofiler.drivers import jaeger
+from osprofiler.tests import test
+
+
+class JaegerTestCase(test.TestCase):
+
+ def setUp(self):
+ super(JaegerTestCase, self).setUp()
+ self.payload_start = {
+ "name": "api-start",
+ "base_id": "4e3e0ec6-2938-40b1-8504-09eb1d4b0dee",
+ "trace_id": "1c089ea8-28fe-4f3d-8c00-f6daa2bc32f1",
+ "parent_id": "e2715537-3d1c-4f0c-b3af-87355dc5fc5b",
+ "timestamp": "2018-05-03T04:31:51.781381",
+ "info": {
+ "host": "test"
+ }
+ }
+
+ self.payload_stop = {
+ "name": "api-stop",
+ "base_id": "4e3e0ec6-2938-40b1-8504-09eb1d4b0dee",
+ "trace_id": "1c089ea8-28fe-4f3d-8c00-f6daa2bc32f1",
+ "parent_id": "e2715537-3d1c-4f0c-b3af-87355dc5fc5b",
+ "timestamp": "2018-05-03T04:31:51.781381",
+ "info": {
+ "host": "test",
+ "function": {
+ "result": 1
+ }
+ }
+ }
+
+ self.driver = jaeger.Jaeger("jaeger://127.0.0.1:6831",
+ project="nova", service="api")
+
+ @mock.patch("osprofiler._utils.shorten_id")
+ def test_notify_start(self, mock_shorten_id):
+ self.driver.notify(self.payload_start)
+ calls = [
+ mock.call(self.payload_start["base_id"]),
+ mock.call(self.payload_start["parent_id"]),
+ mock.call(self.payload_start["trace_id"])
+ ]
+ mock_shorten_id.assert_has_calls(calls, any_order=True)
+
+ @mock.patch("jaeger_client.span.Span")
+ @mock.patch("time.time")
+ def test_notify_stop(self, mock_time, mock_span):
+ fake_time = 1525416065.5958152
+ mock_time.return_value = fake_time
+
+ span = mock_span()
+ self.driver.spans.append(mock_span())
+
+ self.driver.notify(self.payload_stop)
+
+ mock_time.assert_called_once()
+ mock_time.reset_mock()
+
+ span.finish.assert_called_once_with(finish_time=fake_time)
diff --git a/osprofiler/tests/unit/drivers/test_loginsight.py b/osprofiler/tests/unit/drivers/test_loginsight.py
new file mode 100644
index 0000000000000000000000000000000000000000..936b7e4ea1f80ad9a52bc97571e20f6166e6851d
--- /dev/null
+++ b/osprofiler/tests/unit/drivers/test_loginsight.py
@@ -0,0 +1,296 @@
+# Copyright (c) 2016 VMware, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+from unittest import mock
+
+import ddt
+
+from osprofiler.drivers import loginsight
+from osprofiler import exc
+from osprofiler.tests import test
+
+
+@ddt.ddt
+class LogInsightDriverTestCase(test.TestCase):
+
+ BASE_ID = "8d28af1e-acc0-498c-9890-6908e33eff5f"
+
+ def setUp(self):
+ super(LogInsightDriverTestCase, self).setUp()
+ self._client = mock.Mock(spec=loginsight.LogInsightClient)
+ self._project = "cinder"
+ self._service = "osapi_volume"
+ self._host = "ubuntu"
+ with mock.patch.object(loginsight, "LogInsightClient",
+ return_value=self._client):
+ self._driver = loginsight.LogInsightDriver(
+ "loginsight://username:password@host",
+ project=self._project,
+ service=self._service,
+ host=self._host)
+
+ @mock.patch.object(loginsight, "LogInsightClient")
+ def test_init(self, client_class):
+ client = mock.Mock()
+ client_class.return_value = client
+
+ loginsight.LogInsightDriver("loginsight://username:password@host")
+ client_class.assert_called_once_with("host", "username", "password")
+ client.login.assert_called_once_with()
+
+ @ddt.data("loginsight://username@host",
+ "loginsight://username:p@ssword@host",
+ "loginsight://us:rname:password@host")
+ def test_init_with_invalid_connection_string(self, conn_str):
+ self.assertRaises(ValueError, loginsight.LogInsightDriver, conn_str)
+
+ @mock.patch.object(loginsight, "LogInsightClient")
+ def test_init_with_special_chars_in_conn_str(self, client_class):
+ client = mock.Mock()
+ client_class.return_value = client
+
+ loginsight.LogInsightDriver("loginsight://username:p%40ssword@host")
+ client_class.assert_called_once_with("host", "username", "p@ssword")
+ client.login.assert_called_once_with()
+
+ def test_get_name(self):
+ self.assertEqual("loginsight", self._driver.get_name())
+
+ def _create_trace(self,
+ name,
+ timestamp,
+ parent_id="8d28af1e-acc0-498c-9890-6908e33eff5f",
+ base_id=BASE_ID,
+ trace_id="e465db5c-9672-45a1-b90b-da918f30aef6"):
+ return {"parent_id": parent_id,
+ "name": name,
+ "base_id": base_id,
+ "trace_id": trace_id,
+ "timestamp": timestamp,
+ "info": {"host": self._host}}
+
+ def _create_start_trace(self):
+ return self._create_trace("wsgi-start", "2016-10-04t11:50:21.902303")
+
+ def _create_stop_trace(self):
+ return self._create_trace("wsgi-stop", "2016-10-04t11:50:30.123456")
+
+ @mock.patch("json.dumps")
+ def test_notify(self, dumps):
+ json_str = mock.sentinel.json_str
+ dumps.return_value = json_str
+
+ trace = self._create_stop_trace()
+ self._driver.notify(trace)
+
+ trace["project"] = self._project
+ trace["service"] = self._service
+ exp_event = {"text": "OSProfiler trace",
+ "fields": [{"name": "base_id",
+ "content": trace["base_id"]},
+ {"name": "trace_id",
+ "content": trace["trace_id"]},
+ {"name": "project",
+ "content": trace["project"]},
+ {"name": "service",
+ "content": trace["service"]},
+ {"name": "name",
+ "content": trace["name"]},
+ {"name": "trace",
+ "content": json_str}]
+ }
+ self._client.send_event.assert_called_once_with(exp_event)
+
+ @mock.patch.object(loginsight.LogInsightDriver, "_append_results")
+ @mock.patch.object(loginsight.LogInsightDriver, "_parse_results")
+ def test_get_report(self, parse_results, append_results):
+ start_trace = self._create_start_trace()
+ start_trace["project"] = self._project
+ start_trace["service"] = self._service
+
+ stop_trace = self._create_stop_trace()
+ stop_trace["project"] = self._project
+ stop_trace["service"] = self._service
+
+ resp = {"events": [{"text": "OSProfiler trace",
+ "fields": [{"name": "trace",
+ "content": json.dumps(start_trace)
+ }
+ ]
+ },
+ {"text": "OSProfiler trace",
+ "fields": [{"name": "trace",
+ "content": json.dumps(stop_trace)
+ }
+ ]
+ }
+ ]
+ }
+ self._client.query_events = mock.Mock(return_value=resp)
+
+ self._driver.get_report(self.BASE_ID)
+ self._client.query_events.assert_called_once_with({"base_id":
+ self.BASE_ID})
+ append_results.assert_has_calls(
+ [mock.call(start_trace["trace_id"], start_trace["parent_id"],
+ start_trace["name"], start_trace["project"],
+ start_trace["service"], start_trace["info"]["host"],
+ start_trace["timestamp"], start_trace),
+ mock.call(stop_trace["trace_id"], stop_trace["parent_id"],
+ stop_trace["name"], stop_trace["project"],
+ stop_trace["service"], stop_trace["info"]["host"],
+ stop_trace["timestamp"], stop_trace)
+ ])
+ parse_results.assert_called_once_with()
+
+
+class LogInsightClientTestCase(test.TestCase):
+
+ def setUp(self):
+ super(LogInsightClientTestCase, self).setUp()
+ self._host = "localhost"
+ self._username = "username"
+ self._password = "password"
+ self._client = loginsight.LogInsightClient(
+ self._host, self._username, self._password)
+ self._client._session_id = "4ff800d1-3175-4b49-9209-39714ea56416"
+
+ def test_check_response_login_timeout(self):
+ resp = mock.Mock(status_code=440)
+ self.assertRaises(
+ exc.LogInsightLoginTimeout, self._client._check_response, resp)
+
+ def test_check_response_api_error(self):
+ resp = mock.Mock(status_code=401, ok=False)
+ resp.text = json.dumps(
+ {"errorMessage": "Invalid username or password.",
+ "errorCode": "FIELD_ERROR"})
+ e = self.assertRaises(
+ exc.LogInsightAPIError, self._client._check_response, resp)
+ self.assertEqual("Invalid username or password.", str(e))
+
+ @mock.patch("requests.Request")
+ @mock.patch("json.dumps")
+ @mock.patch.object(loginsight.LogInsightClient, "_check_response")
+ def test_send_request(self, check_resp, json_dumps, request_class):
+ req = mock.Mock()
+ request_class.return_value = req
+ prep_req = mock.sentinel.prep_req
+ req.prepare = mock.Mock(return_value=prep_req)
+
+ data = mock.sentinel.data
+ json_dumps.return_value = data
+
+ self._client._session = mock.Mock()
+ resp = mock.Mock()
+ self._client._session.send = mock.Mock(return_value=resp)
+ resp_json = mock.sentinel.resp_json
+ resp.json = mock.Mock(return_value=resp_json)
+
+ header = {"X-LI-Session-Id": "foo"}
+ body = mock.sentinel.body
+ params = mock.sentinel.params
+ ret = self._client._send_request(
+ "get", "https", "api/v1/events", header, body, params)
+
+ self.assertEqual(resp_json, ret)
+ exp_headers = {"X-LI-Session-Id": "foo",
+ "content-type": "application/json"}
+ request_class.assert_called_once_with(
+ "get", "https://localhost:9543/api/v1/events", headers=exp_headers,
+ data=data, params=mock.sentinel.params)
+ self._client._session.send.assert_called_once_with(prep_req,
+ verify=False)
+ check_resp.assert_called_once_with(resp)
+
+ @mock.patch.object(loginsight.LogInsightClient, "_send_request")
+ def test_is_current_session_active_with_active_session(self, send_request):
+ self.assertTrue(self._client._is_current_session_active())
+ exp_header = {"X-LI-Session-Id": self._client._session_id}
+ send_request.assert_called_once_with(
+ "get", "https", "api/v1/sessions/current", headers=exp_header)
+
+ @mock.patch.object(loginsight.LogInsightClient, "_send_request")
+ def test_is_current_session_active_with_expired_session(self,
+ send_request):
+ send_request.side_effect = exc.LogInsightLoginTimeout
+
+ self.assertFalse(self._client._is_current_session_active())
+ send_request.assert_called_once_with(
+ "get", "https", "api/v1/sessions/current",
+ headers={"X-LI-Session-Id": self._client._session_id})
+
+ @mock.patch.object(loginsight.LogInsightClient,
+ "_is_current_session_active", return_value=True)
+ @mock.patch.object(loginsight.LogInsightClient, "_send_request")
+ def test_login_with_current_session_active(self, send_request,
+ is_current_session_active):
+ self._client.login()
+ is_current_session_active.assert_called_once_with()
+ send_request.assert_not_called()
+
+ @mock.patch.object(loginsight.LogInsightClient,
+ "_is_current_session_active", return_value=False)
+ @mock.patch.object(loginsight.LogInsightClient, "_send_request")
+ def test_login(self, send_request, is_current_session_active):
+ new_session_id = "569a80aa-be5c-49e5-82c1-bb62392d2667"
+ resp = {"sessionId": new_session_id}
+ send_request.return_value = resp
+
+ self._client.login()
+ is_current_session_active.assert_called_once_with()
+ exp_body = {"username": self._username, "password": self._password}
+ send_request.assert_called_once_with(
+ "post", "https", "api/v1/sessions", body=exp_body)
+ self.assertEqual(new_session_id, self._client._session_id)
+
+ @mock.patch.object(loginsight.LogInsightClient, "_send_request")
+ def test_send_event(self, send_request):
+ event = mock.sentinel.event
+ self._client.send_event(event)
+
+ exp_body = {"events": [event]}
+ exp_path = ("api/v1/events/ingest/%s" %
+ self._client.LI_OSPROFILER_AGENT_ID)
+ send_request.assert_called_once_with(
+ "post", "http", exp_path, body=exp_body)
+
+ @mock.patch.object(loginsight.LogInsightClient, "_send_request")
+ def test_query_events(self, send_request):
+ resp = mock.sentinel.response
+ send_request.return_value = resp
+
+ self.assertEqual(resp, self._client.query_events({"foo": "bar"}))
+ exp_header = {"X-LI-Session-Id": self._client._session_id}
+ exp_params = {"limit": 20000, "timeout": self._client._query_timeout}
+ send_request.assert_called_once_with(
+ "get", "https", "api/v1/events/foo/CONTAINS+bar/timestamp/GT+0",
+ headers=exp_header, params=exp_params)
+
+ @mock.patch.object(loginsight.LogInsightClient, "_send_request")
+ @mock.patch.object(loginsight.LogInsightClient, "login")
+ def test_query_events_with_session_expiry(self, login, send_request):
+ resp = mock.sentinel.response
+ send_request.side_effect = [exc.LogInsightLoginTimeout, resp]
+
+ self.assertEqual(resp, self._client.query_events({"foo": "bar"}))
+ login.assert_called_once_with()
+ exp_header = {"X-LI-Session-Id": self._client._session_id}
+ exp_params = {"limit": 20000, "timeout": self._client._query_timeout}
+ exp_send_request_call = mock.call(
+ "get", "https", "api/v1/events/foo/CONTAINS+bar/timestamp/GT+0",
+ headers=exp_header, params=exp_params)
+ send_request.assert_has_calls([exp_send_request_call] * 2)
diff --git a/osprofiler/tests/unit/drivers/test_messaging.py b/osprofiler/tests/unit/drivers/test_messaging.py
new file mode 100644
index 0000000000000000000000000000000000000000..baa911bc17e1897a0846b80e4f95b586e70cd8be
--- /dev/null
+++ b/osprofiler/tests/unit/drivers/test_messaging.py
@@ -0,0 +1,72 @@
+# Copyright 2016 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from osprofiler.drivers import base
+from osprofiler.tests import test
+
+
+class MessagingTestCase(test.TestCase):
+
+ @mock.patch("oslo_utils.importutils.try_import")
+ def test_init_no_oslo_messaging(self, try_import_mock):
+ try_import_mock.return_value = None
+
+ self.assertRaises(
+ ValueError, base.get_driver,
+ "messaging://", project="project", service="service",
+ host="host", context={})
+
+ @mock.patch("oslo_utils.importutils.try_import")
+ def test_init_and_notify(self, try_import_mock):
+ context = "context"
+ transport = "transport"
+ project = "project"
+ service = "service"
+ host = "host"
+
+ # emulate dynamic load of oslo.messaging library
+ oslo_messaging_mock = mock.Mock()
+ try_import_mock.return_value = oslo_messaging_mock
+
+ # mock oslo.messaging APIs
+ notifier_mock = mock.Mock()
+ oslo_messaging_mock.Notifier.return_value = notifier_mock
+ oslo_messaging_mock.get_notification_transport.return_value = transport
+
+ notify_func = base.get_driver(
+ "messaging://", project=project, service=service,
+ context=context, host=host).notify
+
+ oslo_messaging_mock.Notifier.assert_called_once_with(
+ transport, publisher_id=host, driver="messaging",
+ topics=["profiler"], retry=0)
+
+ info = {
+ "a": 10,
+ "project": project,
+ "service": service,
+ "host": host
+ }
+ notify_func(info)
+
+ notifier_mock.info.assert_called_once_with(
+ context, "profiler.service", info)
+
+ notifier_mock.reset_mock()
+ notify_func(info, context="my_context")
+ notifier_mock.info.assert_called_once_with(
+ "my_context", "profiler.service", info)
diff --git a/osprofiler/tests/unit/drivers/test_mongodb.py b/osprofiler/tests/unit/drivers/test_mongodb.py
new file mode 100644
index 0000000000000000000000000000000000000000..603765386b2f4fcdd23e1b2ca3da4f404c7f51b6
--- /dev/null
+++ b/osprofiler/tests/unit/drivers/test_mongodb.py
@@ -0,0 +1,321 @@
+# Copyright 2016 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from osprofiler.drivers.mongodb import MongoDB
+from osprofiler.tests import test
+
+
+class MongoDBParserTestCase(test.TestCase):
+ def setUp(self):
+ super(MongoDBParserTestCase, self).setUp()
+ self.mongodb = MongoDB("mongodb://localhost")
+
+ def test_build_empty_tree(self):
+ self.assertEqual([], self.mongodb._build_tree({}))
+
+ def test_build_complex_tree(self):
+ test_input = {
+ "2": {"parent_id": "0", "trace_id": "2", "info": {"started": 1}},
+ "1": {"parent_id": "0", "trace_id": "1", "info": {"started": 0}},
+ "21": {"parent_id": "2", "trace_id": "21", "info": {"started": 6}},
+ "22": {"parent_id": "2", "trace_id": "22", "info": {"started": 7}},
+ "11": {"parent_id": "1", "trace_id": "11", "info": {"started": 1}},
+ "113": {"parent_id": "11", "trace_id": "113",
+ "info": {"started": 3}},
+ "112": {"parent_id": "11", "trace_id": "112",
+ "info": {"started": 2}},
+ "114": {"parent_id": "11", "trace_id": "114",
+ "info": {"started": 5}}
+ }
+
+ expected_output = [
+ {
+ "parent_id": "0",
+ "trace_id": "1",
+ "info": {"started": 0},
+ "children": [
+ {
+ "parent_id": "1",
+ "trace_id": "11",
+ "info": {"started": 1},
+ "children": [
+ {"parent_id": "11", "trace_id": "112",
+ "info": {"started": 2}, "children": []},
+ {"parent_id": "11", "trace_id": "113",
+ "info": {"started": 3}, "children": []},
+ {"parent_id": "11", "trace_id": "114",
+ "info": {"started": 5}, "children": []}
+ ]
+ }
+ ]
+ },
+ {
+ "parent_id": "0",
+ "trace_id": "2",
+ "info": {"started": 1},
+ "children": [
+ {"parent_id": "2", "trace_id": "21",
+ "info": {"started": 6}, "children": []},
+ {"parent_id": "2", "trace_id": "22",
+ "info": {"started": 7}, "children": []}
+ ]
+ }
+ ]
+
+ result = self.mongodb._build_tree(test_input)
+ self.assertEqual(expected_output, result)
+
+ def test_get_report_empty(self):
+ self.mongodb.db = mock.MagicMock()
+ self.mongodb.db.profiler.find.return_value = []
+
+ expected = {
+ "info": {
+ "name": "total",
+ "started": 0,
+ "finished": None,
+ "last_trace_started": None
+ },
+ "children": [],
+ "stats": {},
+ }
+
+ base_id = "10"
+ self.assertEqual(expected, self.mongodb.get_report(base_id))
+
+ def test_get_report(self):
+ self.mongodb.db = mock.MagicMock()
+ results = [
+ {
+ "info": {
+ "project": None,
+ "host": "ubuntu",
+ "request": {
+ "path": "/v2/a322b5049d224a90bf8786c644409400/volumes",
+ "scheme": "http",
+ "method": "POST",
+ "query": ""
+ },
+ "service": None
+ },
+ "name": "wsgi-start",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.338776",
+ "trace_id": "06320327-2c2c-45ae-923a-515de890276a",
+ "project": "keystone",
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
+ },
+
+ {
+ "info": {
+ "project": None,
+ "host": "ubuntu",
+ "service": None
+ },
+ "name": "wsgi-stop",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.380405",
+ "trace_id": "839ca3f1-afcb-45be-a4a1-679124c552bf",
+ "project": "keystone",
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
+ },
+
+ {
+ "info": {
+ "project": None,
+ "host": "ubuntu",
+ "db": {
+ "params": {
+
+ },
+ "statement": "SELECT 1"
+ },
+ "service": None
+ },
+ "name": "db-start",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.395365",
+ "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a",
+ "project": "keystone",
+ "parent_id": "06320327-2c2c-45ae-923a-515de890276a",
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
+ },
+
+ {
+ "info": {
+ "project": None,
+ "host": "ubuntu",
+ "service": None
+ },
+ "name": "db-stop",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.415486",
+ "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a",
+ "project": "keystone",
+ "parent_id": "06320327-2c2c-45ae-923a-515de890276a",
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
+ },
+
+ {
+ "info": {
+ "project": None,
+ "host": "ubuntu",
+ "request": {
+ "path": "/v2/a322b5049d224a90bf8786c644409400/volumes",
+ "scheme": "http",
+ "method": "GET",
+ "query": ""
+ },
+ "service": None
+ },
+ "name": "wsgi-start",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.427444",
+ "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b",
+ "project": "keystone",
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
+ }]
+
+ expected = {"children": [{"children": [{
+ "children": [],
+ "info": {"finished": 76,
+ "host": "ubuntu",
+ "meta.raw_payload.db-start": {
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "info": {"db": {"params": {},
+ "statement": "SELECT 1"},
+ "host": "ubuntu",
+ "project": None,
+ "service": None},
+ "name": "db-start",
+ "parent_id": "06320327-2c2c-45ae-923a-515de890276a",
+ "project": "keystone",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.395365",
+ "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"},
+ "meta.raw_payload.db-stop": {
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "info": {"host": "ubuntu",
+ "project": None,
+ "service": None},
+ "name": "db-stop",
+ "parent_id": "06320327-2c2c-45ae-923a-515de890276a",
+ "project": "keystone",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.415486",
+ "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"},
+ "name": "db",
+ "project": "keystone",
+ "service": "main",
+ "started": 56,
+ "exception": "None"},
+ "parent_id": "06320327-2c2c-45ae-923a-515de890276a",
+ "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"}],
+
+ "info": {"finished": 0,
+ "host": "ubuntu",
+ "meta.raw_payload.wsgi-start": {
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "info": {"host": "ubuntu",
+ "project": None,
+ "request": {"method": "POST",
+ "path": "/v2/a322b5049d224a90bf8"
+ "786c644409400/volumes",
+ "query": "",
+ "scheme": "http"},
+ "service": None},
+ "name": "wsgi-start",
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "project": "keystone",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.338776",
+ "trace_id": "06320327-2c2c-45ae-923a-515de890276a"},
+ "name": "wsgi",
+ "project": "keystone",
+ "service": "main",
+ "started": 0},
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "trace_id": "06320327-2c2c-45ae-923a-515de890276a"},
+
+ {"children": [],
+ "info": {"finished": 41,
+ "host": "ubuntu",
+ "meta.raw_payload.wsgi-stop": {
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "info": {"host": "ubuntu",
+ "project": None,
+ "service": None},
+ "name": "wsgi-stop",
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "project": "keystone",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.380405",
+ "trace_id": "839ca3f1-afcb-45be-a4a1-679124c552bf"},
+ "name": "wsgi",
+ "project": "keystone",
+ "service": "main",
+ "started": 41,
+ "exception": "None"},
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "trace_id": "839ca3f1-afcb-45be-a4a1-679124c552bf"},
+
+ {"children": [],
+ "info": {"finished": 88,
+ "host": "ubuntu",
+ "meta.raw_payload.wsgi-start": {
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "info": {"host": "ubuntu",
+ "project": None,
+ "request": {"method": "GET",
+ "path": "/v2/a322b5049d224a90bf"
+ "8786c644409400/volumes",
+ "query": "",
+ "scheme": "http"},
+ "service": None},
+ "name": "wsgi-start",
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "project": "keystone",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.427444",
+ "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b"},
+ "name": "wsgi",
+ "project": "keystone",
+ "service": "main",
+ "started": 88},
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b"}],
+ "info": {
+ "finished": 88,
+ "name": "total",
+ "started": 0,
+ "last_trace_started": 88},
+ "stats": {"db": {"count": 1, "duration": 20},
+ "wsgi": {"count": 3, "duration": 0}}}
+
+ self.mongodb.db.profiler.find.return_value = results
+
+ base_id = "10"
+
+ result = self.mongodb.get_report(base_id)
+
+ expected_filter = [{"base_id": base_id}, {"_id": 0}]
+ self.mongodb.db.profiler.find.assert_called_once_with(
+ *expected_filter)
+ self.assertEqual(expected, result)
diff --git a/osprofiler/tests/unit/drivers/test_redis_driver.py b/osprofiler/tests/unit/drivers/test_redis_driver.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8d3e5e67ebd88cf9223111a4e26206722ae30f5
--- /dev/null
+++ b/osprofiler/tests/unit/drivers/test_redis_driver.py
@@ -0,0 +1,332 @@
+# Copyright 2016 Mirantis Inc.
+# Copyright 2016 IBM Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from oslo_serialization import jsonutils
+
+from osprofiler.drivers.redis_driver import Redis
+from osprofiler.tests import test
+
+
+class RedisParserTestCase(test.TestCase):
+ def setUp(self):
+ super(RedisParserTestCase, self).setUp()
+ self.redisdb = Redis("redis://localhost:6379")
+
+ def test_build_empty_tree(self):
+ self.assertEqual([], self.redisdb._build_tree({}))
+
+ def test_build_complex_tree(self):
+ test_input = {
+ "2": {"parent_id": "0", "trace_id": "2", "info": {"started": 1}},
+ "1": {"parent_id": "0", "trace_id": "1", "info": {"started": 0}},
+ "21": {"parent_id": "2", "trace_id": "21", "info": {"started": 6}},
+ "22": {"parent_id": "2", "trace_id": "22", "info": {"started": 7}},
+ "11": {"parent_id": "1", "trace_id": "11", "info": {"started": 1}},
+ "113": {"parent_id": "11", "trace_id": "113",
+ "info": {"started": 3}},
+ "112": {"parent_id": "11", "trace_id": "112",
+ "info": {"started": 2}},
+ "114": {"parent_id": "11", "trace_id": "114",
+ "info": {"started": 5}}
+ }
+
+ expected_output = [
+ {
+ "parent_id": "0",
+ "trace_id": "1",
+ "info": {"started": 0},
+ "children": [
+ {
+ "parent_id": "1",
+ "trace_id": "11",
+ "info": {"started": 1},
+ "children": [
+ {"parent_id": "11", "trace_id": "112",
+ "info": {"started": 2}, "children": []},
+ {"parent_id": "11", "trace_id": "113",
+ "info": {"started": 3}, "children": []},
+ {"parent_id": "11", "trace_id": "114",
+ "info": {"started": 5}, "children": []}
+ ]
+ }
+ ]
+ },
+ {
+ "parent_id": "0",
+ "trace_id": "2",
+ "info": {"started": 1},
+ "children": [
+ {"parent_id": "2", "trace_id": "21",
+ "info": {"started": 6}, "children": []},
+ {"parent_id": "2", "trace_id": "22",
+ "info": {"started": 7}, "children": []}
+ ]
+ }
+ ]
+
+ result = self.redisdb._build_tree(test_input)
+ self.assertEqual(expected_output, result)
+
+ def test_get_report_empty(self):
+ self.redisdb.db = mock.MagicMock()
+ self.redisdb.db.scan_iter.return_value = []
+
+ expected = {
+ "info": {
+ "name": "total",
+ "started": 0,
+ "finished": None,
+ "last_trace_started": None
+ },
+ "children": [],
+ "stats": {},
+ }
+
+ base_id = "10"
+ self.assertEqual(expected, self.redisdb.get_report(base_id))
+
+ def test_get_report(self):
+ self.redisdb.db = mock.MagicMock()
+ result_elements = [
+ {
+ "info": {
+ "project": None,
+ "host": "ubuntu",
+ "request": {
+ "path": "/v2/a322b5049d224a90bf8786c644409400/volumes",
+ "scheme": "http",
+ "method": "POST",
+ "query": ""
+ },
+ "service": None
+ },
+ "name": "wsgi-start",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.338776",
+ "trace_id": "06320327-2c2c-45ae-923a-515de890276a",
+ "project": "keystone",
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
+ },
+
+ {
+ "info": {
+ "project": None,
+ "host": "ubuntu",
+ "service": None
+ },
+ "name": "wsgi-stop",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.380405",
+ "trace_id": "839ca3f1-afcb-45be-a4a1-679124c552bf",
+ "project": "keystone",
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
+ },
+
+ {
+ "info": {
+ "project": None,
+ "host": "ubuntu",
+ "db": {
+ "params": {
+
+ },
+ "statement": "SELECT 1"
+ },
+ "service": None
+ },
+ "name": "db-start",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.395365",
+ "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a",
+ "project": "keystone",
+ "parent_id": "06320327-2c2c-45ae-923a-515de890276a",
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
+ },
+
+ {
+ "info": {
+ "project": None,
+ "host": "ubuntu",
+ "service": None
+ },
+ "name": "db-stop",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.415486",
+ "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a",
+ "project": "keystone",
+ "parent_id": "06320327-2c2c-45ae-923a-515de890276a",
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
+ },
+
+ {
+ "info": {
+ "project": None,
+ "host": "ubuntu",
+ "request": {
+ "path": "/v2/a322b5049d224a90bf8786c644409400/volumes",
+ "scheme": "http",
+ "method": "GET",
+ "query": ""
+ },
+ "service": None
+ },
+ "name": "wsgi-start",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.427444",
+ "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b",
+ "project": "keystone",
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4"
+ }]
+ results = {result["base_id"] + "_" + result["trace_id"]
+ + "_" + result["timestamp"]: result
+ for result in result_elements}
+
+ expected = {"children": [{"children": [{
+ "children": [],
+ "info": {"finished": 76,
+ "host": "ubuntu",
+ "meta.raw_payload.db-start": {
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "info": {"db": {"params": {},
+ "statement": "SELECT 1"},
+ "host": "ubuntu",
+ "project": None,
+ "service": None},
+ "name": "db-start",
+ "parent_id": "06320327-2c2c-45ae-923a-515de890276a",
+ "project": "keystone",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.395365",
+ "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"},
+ "meta.raw_payload.db-stop": {
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "info": {"host": "ubuntu",
+ "project": None,
+ "service": None},
+ "name": "db-stop",
+ "parent_id": "06320327-2c2c-45ae-923a-515de890276a",
+ "project": "keystone",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.415486",
+ "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"},
+ "name": "db",
+ "project": "keystone",
+ "service": "main",
+ "started": 56,
+ "exception": "None"},
+ "parent_id": "06320327-2c2c-45ae-923a-515de890276a",
+ "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"}],
+
+ "info": {"finished": 0,
+ "host": "ubuntu",
+ "meta.raw_payload.wsgi-start": {
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "info": {"host": "ubuntu",
+ "project": None,
+ "request": {"method": "POST",
+ "path": "/v2/a322b5049d224a90bf8"
+ "786c644409400/volumes",
+ "query": "",
+ "scheme": "http"},
+ "service": None},
+ "name": "wsgi-start",
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "project": "keystone",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.338776",
+ "trace_id": "06320327-2c2c-45ae-923a-515de890276a"},
+ "name": "wsgi",
+ "project": "keystone",
+ "service": "main",
+ "started": 0},
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "trace_id": "06320327-2c2c-45ae-923a-515de890276a"},
+
+ {"children": [],
+ "info": {"finished": 41,
+ "host": "ubuntu",
+ "meta.raw_payload.wsgi-stop": {
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "info": {"host": "ubuntu",
+ "project": None,
+ "service": None},
+ "name": "wsgi-stop",
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "project": "keystone",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.380405",
+ "trace_id": "839ca3f1-afcb-45be-a4a1-679124c552bf"},
+ "name": "wsgi",
+ "project": "keystone",
+ "service": "main",
+ "started": 41,
+ "exception": "None"},
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "trace_id": "839ca3f1-afcb-45be-a4a1-679124c552bf"},
+
+ {"children": [],
+ "info": {"finished": 88,
+ "host": "ubuntu",
+ "meta.raw_payload.wsgi-start": {
+ "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "info": {"host": "ubuntu",
+ "project": None,
+ "request": {"method": "GET",
+ "path": "/v2/a322b5049d224a90bf"
+ "8786c644409400/volumes",
+ "query": "",
+ "scheme": "http"},
+ "service": None},
+ "name": "wsgi-start",
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "project": "keystone",
+ "service": "main",
+ "timestamp": "2015-12-23T14:02:22.427444",
+ "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b"},
+ "name": "wsgi",
+ "project": "keystone",
+ "service": "main",
+ "started": 88},
+ "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4",
+ "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b"}],
+ "info": {
+ "finished": 88,
+ "name": "total",
+ "started": 0,
+ "last_trace_started": 88},
+ "stats": {"db": {"count": 1, "duration": 20},
+ "wsgi": {"count": 3, "duration": 0}}}
+
+ self.redisdb.db.scan_iter.return_value = list(results.keys())
+
+ def side_effect(*args, **kwargs):
+ return jsonutils.dumps(results[args[0]])
+
+ self.redisdb.db.get.side_effect = side_effect
+
+ base_id = "10"
+
+ result = self.redisdb.get_report(base_id)
+
+ expected_filter = self.redisdb.namespace + "10*"
+ self.redisdb.db.scan_iter.assert_called_once_with(
+ match=expected_filter)
+ self.assertEqual(expected, result)
diff --git a/osprofiler/tests/unit/test_initializer.py b/osprofiler/tests/unit/test_initializer.py
new file mode 100644
index 0000000000000000000000000000000000000000..21d20c36c4f380ae387d40d7416e66365d6b726d
--- /dev/null
+++ b/osprofiler/tests/unit/test_initializer.py
@@ -0,0 +1,44 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+import testtools
+
+from osprofiler import initializer
+
+
+class InitializerTestCase(testtools.TestCase):
+
+ @mock.patch("osprofiler.notifier.set")
+ @mock.patch("osprofiler.notifier.create")
+ @mock.patch("osprofiler.web.enable")
+ def test_initializer(self, web_enable_mock, notifier_create_mock,
+ notifier_set_mock):
+ conf = mock.Mock()
+ conf.profiler.connection_string = "driver://"
+ conf.profiler.hmac_keys = "hmac_keys"
+ context = {}
+ project = "my-project"
+ service = "my-service"
+ host = "my-host"
+
+ notifier_mock = mock.Mock()
+ notifier_create_mock.return_value = notifier_mock
+
+ initializer.init_from_conf(conf, context, project, service, host)
+
+ notifier_create_mock.assert_called_once_with(
+ "driver://", context=context, project=project, service=service,
+ host=host, conf=conf)
+ notifier_set_mock.assert_called_once_with(notifier_mock)
+ web_enable_mock.assert_called_once_with("hmac_keys")
diff --git a/tests/test_notifier.py b/osprofiler/tests/unit/test_notifier.py
similarity index 67%
rename from tests/test_notifier.py
rename to osprofiler/tests/unit/test_notifier.py
index 27bb9abc65dcf834ebab49b4fc1c515ad3b8843a..47229c8ec1f2a83973b26d4c27f7a1ed610c184f 100644
--- a/tests/test_notifier.py
+++ b/osprofiler/tests/unit/test_notifier.py
@@ -13,16 +13,17 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from osprofiler import notifier
-from tests import test
+from osprofiler.tests import test
class NotifierTestCase(test.TestCase):
def tearDown(self):
- notifier.__notifier = notifier._noop_notifier
+ notifier.set(notifier._noop_notifier) # restore defaults
+ notifier.clear_notifier_cache()
super(NotifierTestCase, self).tearDown()
def test_set(self):
@@ -43,9 +44,17 @@ class NotifierTestCase(test.TestCase):
m.assert_called_once_with(10)
- @mock.patch("osprofiler.notifier.base.Notifier.factory")
+ @mock.patch("osprofiler.notifier.base.get_driver")
def test_create(self, mock_factory):
result = notifier.create("test", 10, b=20)
mock_factory.assert_called_once_with("test", 10, b=20)
- self.assertEqual(mock_factory.return_value, result)
+ self.assertEqual(mock_factory.return_value.notify, result)
+
+ @mock.patch("osprofiler.notifier.base.get_driver")
+ def test_create_driver_init_failure(self, mock_get_driver):
+ mock_get_driver.side_effect = Exception()
+
+ result = notifier.create("test", 10, b=20)
+ mock_get_driver.assert_called_once_with("test", 10, b=20)
+ self.assertEqual(notifier._noop_notifier, result)
diff --git a/osprofiler/tests/unit/test_opts.py b/osprofiler/tests/unit/test_opts.py
new file mode 100644
index 0000000000000000000000000000000000000000..31af2abf9d2ec48ab3cde1700c70aca7544a3e78
--- /dev/null
+++ b/osprofiler/tests/unit/test_opts.py
@@ -0,0 +1,66 @@
+# Copyright 2016 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from oslo_config import fixture
+
+from osprofiler import opts
+from osprofiler.tests import test
+
+
+class ConfigTestCase(test.TestCase):
+ def setUp(self):
+ super(ConfigTestCase, self).setUp()
+ self.conf_fixture = self.useFixture(fixture.Config())
+
+ def test_options_defaults(self):
+ opts.set_defaults(self.conf_fixture.conf)
+ self.assertFalse(self.conf_fixture.conf.profiler.enabled)
+ self.assertFalse(self.conf_fixture.conf.profiler.trace_sqlalchemy)
+ self.assertEqual("SECRET_KEY",
+ self.conf_fixture.conf.profiler.hmac_keys)
+ self.assertFalse(opts.is_trace_enabled(self.conf_fixture.conf))
+ self.assertFalse(opts.is_db_trace_enabled(self.conf_fixture.conf))
+
+ def test_options_defaults_override(self):
+ opts.set_defaults(self.conf_fixture.conf, enabled=True,
+ trace_sqlalchemy=True,
+ hmac_keys="MY_KEY")
+ self.assertTrue(self.conf_fixture.conf.profiler.enabled)
+ self.assertTrue(self.conf_fixture.conf.profiler.trace_sqlalchemy)
+ self.assertEqual("MY_KEY",
+ self.conf_fixture.conf.profiler.hmac_keys)
+ self.assertTrue(opts.is_trace_enabled(self.conf_fixture.conf))
+ self.assertTrue(opts.is_db_trace_enabled(self.conf_fixture.conf))
+
+ @mock.patch("osprofiler.web.enable")
+ @mock.patch("osprofiler.web.disable")
+ def test_web_trace_disabled(self, mock_disable, mock_enable):
+ opts.set_defaults(self.conf_fixture.conf, hmac_keys="MY_KEY")
+ opts.enable_web_trace(self.conf_fixture.conf)
+ opts.disable_web_trace(self.conf_fixture.conf)
+ self.assertEqual(0, mock_enable.call_count)
+ self.assertEqual(0, mock_disable.call_count)
+
+ @mock.patch("osprofiler.web.enable")
+ @mock.patch("osprofiler.web.disable")
+ def test_web_trace_enabled(self, mock_disable, mock_enable):
+ opts.set_defaults(self.conf_fixture.conf, enabled=True,
+ hmac_keys="MY_KEY")
+ opts.enable_web_trace(self.conf_fixture.conf)
+ opts.disable_web_trace(self.conf_fixture.conf)
+ mock_enable.assert_called_once_with("MY_KEY")
+ mock_disable.assert_called_once_with()
diff --git a/osprofiler/tests/unit/test_profiler.py b/osprofiler/tests/unit/test_profiler.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f0fcb5a8b47318e07f74fe8b4c694b3074105f5
--- /dev/null
+++ b/osprofiler/tests/unit/test_profiler.py
@@ -0,0 +1,624 @@
+# Copyright 2014 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import copy
+import datetime
+import re
+from unittest import mock
+
+import six
+
+from osprofiler import profiler
+from osprofiler.tests import test
+
+
+class ProfilerGlobMethodsTestCase(test.TestCase):
+
+ def test_get_profiler_not_inited(self):
+ profiler.clean()
+ self.assertIsNone(profiler.get())
+
+ def test_get_profiler_and_init(self):
+ p = profiler.init("secret", base_id="1", parent_id="2")
+ self.assertEqual(profiler.get(), p)
+
+ self.assertEqual(p.get_base_id(), "1")
+ # NOTE(boris-42): until we make first start we don't have
+ self.assertEqual(p.get_id(), "2")
+
+ def test_start_not_inited(self):
+ profiler.clean()
+ profiler.start("name")
+
+ def test_start(self):
+ p = profiler.init("secret", base_id="1", parent_id="2")
+ p.start = mock.MagicMock()
+ profiler.start("name", info="info")
+ p.start.assert_called_once_with("name", info="info")
+
+ def test_stop_not_inited(self):
+ profiler.clean()
+ profiler.stop()
+
+ def test_stop(self):
+ p = profiler.init("secret", base_id="1", parent_id="2")
+ p.stop = mock.MagicMock()
+ profiler.stop(info="info")
+ p.stop.assert_called_once_with(info="info")
+
+
+class ProfilerTestCase(test.TestCase):
+
+ def test_profiler_get_shorten_id(self):
+ uuid_id = "4e3e0ec6-2938-40b1-8504-09eb1d4b0dee"
+ prof = profiler._Profiler("secret", base_id="1", parent_id="2")
+ result = prof.get_shorten_id(uuid_id)
+ expected = "850409eb1d4b0dee"
+ self.assertEqual(expected, result)
+
+ def test_profiler_get_shorten_id_int(self):
+ short_id_int = 42
+ prof = profiler._Profiler("secret", base_id="1", parent_id="2")
+ result = prof.get_shorten_id(short_id_int)
+ expected = "2a"
+ self.assertEqual(expected, result)
+
+ def test_profiler_get_base_id(self):
+ prof = profiler._Profiler("secret", base_id="1", parent_id="2")
+ self.assertEqual(prof.get_base_id(), "1")
+
+ @mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
+ def test_profiler_get_parent_id(self, mock_generate_uuid):
+ mock_generate_uuid.return_value = "42"
+ prof = profiler._Profiler("secret", base_id="1", parent_id="2")
+ prof.start("test")
+ self.assertEqual(prof.get_parent_id(), "2")
+
+ @mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
+ def test_profiler_get_base_id_unset_case(self, mock_generate_uuid):
+ mock_generate_uuid.return_value = "42"
+ prof = profiler._Profiler("secret")
+ self.assertEqual(prof.get_base_id(), "42")
+ self.assertEqual(prof.get_parent_id(), "42")
+
+ @mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
+ def test_profiler_get_id(self, mock_generate_uuid):
+ mock_generate_uuid.return_value = "43"
+ prof = profiler._Profiler("secret")
+ prof.start("test")
+ self.assertEqual(prof.get_id(), "43")
+
+ @mock.patch("osprofiler.profiler.datetime")
+ @mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
+ @mock.patch("osprofiler.profiler.notifier.notify")
+ def test_profiler_start(self, mock_notify, mock_generate_uuid,
+ mock_datetime):
+ mock_generate_uuid.return_value = "44"
+ now = datetime.datetime.utcnow()
+ mock_datetime.datetime.utcnow.return_value = now
+
+ info = {"some": "info"}
+ payload = {
+ "name": "test-start",
+ "base_id": "1",
+ "parent_id": "2",
+ "trace_id": "44",
+ "info": info,
+ "timestamp": now.strftime("%Y-%m-%dT%H:%M:%S.%f"),
+ }
+
+ prof = profiler._Profiler("secret", base_id="1", parent_id="2")
+ prof.start("test", info=info)
+
+ mock_notify.assert_called_once_with(payload)
+
+ @mock.patch("osprofiler.profiler.datetime")
+ @mock.patch("osprofiler.profiler.notifier.notify")
+ def test_profiler_stop(self, mock_notify, mock_datetime):
+ now = datetime.datetime.utcnow()
+ mock_datetime.datetime.utcnow.return_value = now
+ prof = profiler._Profiler("secret", base_id="1", parent_id="2")
+ prof._trace_stack.append("44")
+ prof._name.append("abc")
+
+ info = {"some": "info"}
+ prof.stop(info=info)
+
+ payload = {
+ "name": "abc-stop",
+ "base_id": "1",
+ "parent_id": "2",
+ "trace_id": "44",
+ "info": info,
+ "timestamp": now.strftime("%Y-%m-%dT%H:%M:%S.%f"),
+ }
+
+ mock_notify.assert_called_once_with(payload)
+ self.assertEqual(len(prof._name), 0)
+ self.assertEqual(prof._trace_stack, collections.deque(["1", "2"]))
+
+ def test_profiler_hmac(self):
+ hmac = "secret"
+ prof = profiler._Profiler(hmac, base_id="1", parent_id="2")
+ self.assertEqual(hmac, prof.hmac_key)
+
+
+class WithTraceTestCase(test.TestCase):
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_with_trace(self, mock_start, mock_stop):
+
+ with profiler.Trace("a", info="a1"):
+ mock_start.assert_called_once_with("a", info="a1")
+ mock_start.reset_mock()
+ with profiler.Trace("b", info="b1"):
+ mock_start.assert_called_once_with("b", info="b1")
+ mock_stop.assert_called_once_with()
+ mock_stop.reset_mock()
+ mock_stop.assert_called_once_with()
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_with_trace_etype(self, mock_start, mock_stop):
+
+ def foo():
+ with profiler.Trace("foo"):
+ raise ValueError("bar")
+
+ self.assertRaises(ValueError, foo)
+ mock_start.assert_called_once_with("foo", info=None)
+ mock_stop.assert_called_once_with(info={
+ "etype": "ValueError",
+ "message": "bar"
+ })
+
+
+@profiler.trace("function", info={"info": "some_info"})
+def traced_func(i):
+ return i
+
+
+@profiler.trace("hide_args", hide_args=True)
+def trace_hide_args_func(a, i=10):
+ return (a, i)
+
+
+@profiler.trace("foo", hide_args=True)
+def test_fn_exc():
+ raise ValueError()
+
+
+@profiler.trace("hide_result", hide_result=False)
+def trace_with_result_func(a, i=10):
+ return (a, i)
+
+
+class TraceDecoratorTestCase(test.TestCase):
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_duplicate_trace_disallow(self, mock_start, mock_stop):
+
+ @profiler.trace("test")
+ def trace_me():
+ pass
+
+ self.assertRaises(
+ ValueError,
+ profiler.trace("test-again", allow_multiple_trace=False),
+ trace_me)
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_with_args(self, mock_start, mock_stop):
+ self.assertEqual(1, traced_func(1))
+ expected_info = {
+ "info": "some_info",
+ "function": {
+ "name": "osprofiler.tests.unit.test_profiler.traced_func",
+ "args": str((1,)),
+ "kwargs": str({})
+ }
+ }
+ mock_start.assert_called_once_with("function", info=expected_info)
+ mock_stop.assert_called_once_with()
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_without_args(self, mock_start, mock_stop):
+ self.assertEqual((1, 2), trace_hide_args_func(1, i=2))
+ expected_info = {
+ "function": {
+ "name": "osprofiler.tests.unit.test_profiler"
+ ".trace_hide_args_func"
+ }
+ }
+ mock_start.assert_called_once_with("hide_args", info=expected_info)
+ mock_stop.assert_called_once_with()
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_with_exception(self, mock_start, mock_stop):
+
+ self.assertRaises(ValueError, test_fn_exc)
+ expected_info = {
+ "function": {
+ "name": "osprofiler.tests.unit.test_profiler.test_fn_exc"
+ }
+ }
+ expected_stop_info = {"etype": "ValueError", "message": ""}
+ mock_start.assert_called_once_with("foo", info=expected_info)
+ mock_stop.assert_called_once_with(info=expected_stop_info)
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_with_result(self, mock_start, mock_stop):
+ self.assertEqual((1, 2), trace_with_result_func(1, i=2))
+ start_info = {
+ "function": {
+ "name": "osprofiler.tests.unit.test_profiler"
+ ".trace_with_result_func",
+ "args": str((1,)),
+ "kwargs": str({"i": 2})
+ }
+ }
+
+ stop_info = {
+ "function": {
+ "result": str((1, 2))
+ }
+ }
+ mock_start.assert_called_once_with("hide_result", info=start_info)
+ mock_stop.assert_called_once_with(info=stop_info)
+
+
+class FakeTracedCls(object):
+
+ def method1(self, a, b, c=10):
+ return a + b + c
+
+ def method2(self, d, e):
+ return d - e
+
+ def method3(self, g=10, h=20):
+ return g * h
+
+ def _method(self, i):
+ return i
+
+
+@profiler.trace_cls("rpc", info={"a": 10})
+class FakeTraceClassWithInfo(FakeTracedCls):
+ pass
+
+
+@profiler.trace_cls("a", info={"b": 20}, hide_args=True)
+class FakeTraceClassHideArgs(FakeTracedCls):
+ pass
+
+
+@profiler.trace_cls("rpc", trace_private=True)
+class FakeTracePrivate(FakeTracedCls):
+ pass
+
+
+class FakeTraceStaticMethodBase(FakeTracedCls):
+ @staticmethod
+ def static_method(arg):
+ return arg
+
+
+@profiler.trace_cls("rpc", trace_static_methods=True)
+class FakeTraceStaticMethod(FakeTraceStaticMethodBase):
+ pass
+
+
+@profiler.trace_cls("rpc")
+class FakeTraceStaticMethodSkip(FakeTraceStaticMethodBase):
+ pass
+
+
+class FakeTraceClassMethodBase(FakeTracedCls):
+ @classmethod
+ def class_method(cls, arg):
+ return arg
+
+
+@profiler.trace_cls("rpc")
+class FakeTraceClassMethodSkip(FakeTraceClassMethodBase):
+ pass
+
+
+def py3_info(info):
+ # NOTE(boris-42): py33 I hate you.
+ info_py3 = copy.deepcopy(info)
+ new_name = re.sub("FakeTrace[^.]*", "FakeTracedCls",
+ info_py3["function"]["name"])
+ info_py3["function"]["name"] = new_name
+ return info_py3
+
+
+def possible_mock_calls(name, info):
+ # NOTE(boris-42): py33 I hate you.
+ return [mock.call(name, info=info), mock.call(name, info=py3_info(info))]
+
+
+class TraceClsDecoratorTestCase(test.TestCase):
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_args(self, mock_start, mock_stop):
+ fake_cls = FakeTraceClassWithInfo()
+ self.assertEqual(30, fake_cls.method1(5, 15))
+ expected_info = {
+ "a": 10,
+ "function": {
+ "name": ("osprofiler.tests.unit.test_profiler"
+ ".FakeTraceClassWithInfo.method1"),
+ "args": str((fake_cls, 5, 15)),
+ "kwargs": str({})
+ }
+ }
+ self.assertEqual(1, len(mock_start.call_args_list))
+ self.assertIn(mock_start.call_args_list[0],
+ possible_mock_calls("rpc", expected_info))
+ mock_stop.assert_called_once_with()
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_kwargs(self, mock_start, mock_stop):
+ fake_cls = FakeTraceClassWithInfo()
+ self.assertEqual(50, fake_cls.method3(g=5, h=10))
+ expected_info = {
+ "a": 10,
+ "function": {
+ "name": ("osprofiler.tests.unit.test_profiler"
+ ".FakeTraceClassWithInfo.method3"),
+ "args": str((fake_cls,)),
+ "kwargs": str({"g": 5, "h": 10})
+ }
+ }
+ self.assertEqual(1, len(mock_start.call_args_list))
+ self.assertIn(mock_start.call_args_list[0],
+ possible_mock_calls("rpc", expected_info))
+ mock_stop.assert_called_once_with()
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_without_private(self, mock_start, mock_stop):
+ fake_cls = FakeTraceClassHideArgs()
+ self.assertEqual(10, fake_cls._method(10))
+ self.assertFalse(mock_start.called)
+ self.assertFalse(mock_stop.called)
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_without_args(self, mock_start, mock_stop):
+ fake_cls = FakeTraceClassHideArgs()
+ self.assertEqual(40, fake_cls.method1(5, 15, c=20))
+ expected_info = {
+ "b": 20,
+ "function": {
+ "name": ("osprofiler.tests.unit.test_profiler"
+ ".FakeTraceClassHideArgs.method1"),
+ }
+ }
+
+ self.assertEqual(1, len(mock_start.call_args_list))
+ self.assertIn(mock_start.call_args_list[0],
+ possible_mock_calls("a", expected_info))
+ mock_stop.assert_called_once_with()
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_private_methods(self, mock_start, mock_stop):
+ fake_cls = FakeTracePrivate()
+ self.assertEqual(5, fake_cls._method(5))
+
+ expected_info = {
+ "function": {
+ "name": ("osprofiler.tests.unit.test_profiler"
+ ".FakeTracePrivate._method"),
+ "args": str((fake_cls, 5)),
+ "kwargs": str({})
+ }
+ }
+
+ self.assertEqual(1, len(mock_start.call_args_list))
+ self.assertIn(mock_start.call_args_list[0],
+ possible_mock_calls("rpc", expected_info))
+ mock_stop.assert_called_once_with()
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ @test.testcase.skip(
+ "Static method tracing was disabled due the bug. This test should be "
+ "skipped until we find the way to address it.")
+ def test_static(self, mock_start, mock_stop):
+ fake_cls = FakeTraceStaticMethod()
+
+ self.assertEqual(25, fake_cls.static_method(25))
+
+ expected_info = {
+ "function": {
+ # fixme(boris-42): Static methods are treated differently in
+ # Python 2.x and Python 3.x. So in PY2 we
+ # expect to see method4 because method is
+ # static and doesn't have reference to class
+ # - and FakeTraceStatic.method4 in PY3
+ "name":
+ "osprofiler.tests.unit.test_profiler"
+ ".method4" if six.PY2 else
+ "osprofiler.tests.unit.test_profiler.FakeTraceStatic"
+ ".method4",
+ "args": str((25,)),
+ "kwargs": str({})
+ }
+ }
+
+ self.assertEqual(1, len(mock_start.call_args_list))
+ self.assertIn(mock_start.call_args_list[0],
+ possible_mock_calls("rpc", expected_info))
+ mock_stop.assert_called_once_with()
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_static_method_skip(self, mock_start, mock_stop):
+ self.assertEqual(25, FakeTraceStaticMethodSkip.static_method(25))
+ self.assertFalse(mock_start.called)
+ self.assertFalse(mock_stop.called)
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_class_method_skip(self, mock_start, mock_stop):
+ self.assertEqual("foo", FakeTraceClassMethodSkip.class_method("foo"))
+ self.assertFalse(mock_start.called)
+ self.assertFalse(mock_stop.called)
+
+
+@six.add_metaclass(profiler.TracedMeta)
+class FakeTraceWithMetaclassBase(object):
+ __trace_args__ = {"name": "rpc",
+ "info": {"a": 10}}
+
+ def method1(self, a, b, c=10):
+ return a + b + c
+
+ def method2(self, d, e):
+ return d - e
+
+ def method3(self, g=10, h=20):
+ return g * h
+
+ def _method(self, i):
+ return i
+
+
+class FakeTraceDummy(FakeTraceWithMetaclassBase):
+ def method4(self, j):
+ return j
+
+
+class FakeTraceWithMetaclassHideArgs(FakeTraceWithMetaclassBase):
+ __trace_args__ = {"name": "a",
+ "info": {"b": 20},
+ "hide_args": True}
+
+ def method5(self, k, l):
+ return k + l
+
+
+class FakeTraceWithMetaclassPrivate(FakeTraceWithMetaclassBase):
+ __trace_args__ = {"name": "rpc",
+ "trace_private": True}
+
+ def _new_private_method(self, m):
+ return 2 * m
+
+
+class TraceWithMetaclassTestCase(test.TestCase):
+
+ def test_no_name_exception(self):
+ def define_class_with_no_name():
+ @six.add_metaclass(profiler.TracedMeta)
+ class FakeTraceWithMetaclassNoName(FakeTracedCls):
+ pass
+ self.assertRaises(TypeError, define_class_with_no_name, 1)
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_args(self, mock_start, mock_stop):
+ fake_cls = FakeTraceWithMetaclassBase()
+ self.assertEqual(30, fake_cls.method1(5, 15))
+ expected_info = {
+ "a": 10,
+ "function": {
+ "name": ("osprofiler.tests.unit.test_profiler"
+ ".FakeTraceWithMetaclassBase.method1"),
+ "args": str((fake_cls, 5, 15)),
+ "kwargs": str({})
+ }
+ }
+ self.assertEqual(1, len(mock_start.call_args_list))
+ self.assertIn(mock_start.call_args_list[0],
+ possible_mock_calls("rpc", expected_info))
+ mock_stop.assert_called_once_with()
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_kwargs(self, mock_start, mock_stop):
+ fake_cls = FakeTraceWithMetaclassBase()
+ self.assertEqual(50, fake_cls.method3(g=5, h=10))
+ expected_info = {
+ "a": 10,
+ "function": {
+ "name": ("osprofiler.tests.unit.test_profiler"
+ ".FakeTraceWithMetaclassBase.method3"),
+ "args": str((fake_cls,)),
+ "kwargs": str({"g": 5, "h": 10})
+ }
+ }
+ self.assertEqual(1, len(mock_start.call_args_list))
+ self.assertIn(mock_start.call_args_list[0],
+ possible_mock_calls("rpc", expected_info))
+ mock_stop.assert_called_once_with()
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_without_private(self, mock_start, mock_stop):
+ fake_cls = FakeTraceWithMetaclassHideArgs()
+ self.assertEqual(10, fake_cls._method(10))
+ self.assertFalse(mock_start.called)
+ self.assertFalse(mock_stop.called)
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_without_args(self, mock_start, mock_stop):
+ fake_cls = FakeTraceWithMetaclassHideArgs()
+ self.assertEqual(20, fake_cls.method5(5, 15))
+ expected_info = {
+ "b": 20,
+ "function": {
+ "name": ("osprofiler.tests.unit.test_profiler"
+ ".FakeTraceWithMetaclassHideArgs.method5")
+ }
+ }
+
+ self.assertEqual(1, len(mock_start.call_args_list))
+ self.assertIn(mock_start.call_args_list[0],
+ possible_mock_calls("a", expected_info))
+ mock_stop.assert_called_once_with()
+
+ @mock.patch("osprofiler.profiler.stop")
+ @mock.patch("osprofiler.profiler.start")
+ def test_private_methods(self, mock_start, mock_stop):
+ fake_cls = FakeTraceWithMetaclassPrivate()
+ self.assertEqual(10, fake_cls._new_private_method(5))
+
+ expected_info = {
+ "function": {
+ "name": ("osprofiler.tests.unit.test_profiler"
+ ".FakeTraceWithMetaclassPrivate._new_private_method"),
+ "args": str((fake_cls, 5)),
+ "kwargs": str({})
+ }
+ }
+
+ self.assertEqual(1, len(mock_start.call_args_list))
+ self.assertIn(mock_start.call_args_list[0],
+ possible_mock_calls("rpc", expected_info))
+ mock_stop.assert_called_once_with()
diff --git a/osprofiler/tests/unit/test_sqlalchemy.py b/osprofiler/tests/unit/test_sqlalchemy.py
new file mode 100644
index 0000000000000000000000000000000000000000..4fb71c4279fd8023171681752294d84d143ac140
--- /dev/null
+++ b/osprofiler/tests/unit/test_sqlalchemy.py
@@ -0,0 +1,168 @@
+# Copyright 2014 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+from unittest import mock
+
+
+from osprofiler import sqlalchemy
+from osprofiler.tests import test
+
+
+class SqlalchemyTracingTestCase(test.TestCase):
+
+ @mock.patch("osprofiler.sqlalchemy.profiler")
+ def test_before_execute(self, mock_profiler):
+ handler = sqlalchemy._before_cursor_execute("sql")
+
+ handler(mock.MagicMock(), 1, 2, 3, 4, 5)
+ expected_info = {"db": {"statement": 2, "params": 3}}
+ mock_profiler.start.assert_called_once_with("sql", info=expected_info)
+
+ @mock.patch("osprofiler.sqlalchemy.profiler")
+ def test_after_execute(self, mock_profiler):
+ handler = sqlalchemy._after_cursor_execute()
+ handler(mock.MagicMock(), 1, 2, 3, 4, 5)
+ mock_profiler.stop.assert_called_once_with()
+
+ @mock.patch("osprofiler.sqlalchemy.profiler")
+ def test_after_execute_with_sql_result(self, mock_profiler):
+ handler = sqlalchemy._after_cursor_execute(hide_result=False)
+ cursor = mock.MagicMock()
+ cursor._rows = (1,)
+ handler(1, cursor, 2, 3, 4, 5)
+ info = {
+ "db": {
+ "result": str(cursor._rows)
+ }
+ }
+ mock_profiler.stop.assert_called_once_with(info=info)
+
+ @mock.patch("osprofiler.sqlalchemy.profiler")
+ def test_handle_error(self, mock_profiler):
+ original_exception = Exception("error")
+ chained_exception = Exception("error and the reason")
+
+ sqlalchemy_exception_ctx = mock.MagicMock()
+ sqlalchemy_exception_ctx.original_exception = original_exception
+ sqlalchemy_exception_ctx.chained_exception = chained_exception
+
+ sqlalchemy.handle_error(sqlalchemy_exception_ctx)
+ expected_info = {
+ "etype": "Exception",
+ "message": "error",
+ "db": {
+ "original_exception": str(original_exception),
+ "chained_exception": str(chained_exception),
+ }
+ }
+ mock_profiler.stop.assert_called_once_with(info=expected_info)
+
+ @mock.patch("osprofiler.sqlalchemy.handle_error")
+ @mock.patch("osprofiler.sqlalchemy._before_cursor_execute")
+ @mock.patch("osprofiler.sqlalchemy._after_cursor_execute")
+ def test_add_tracing(self, mock_after_exc, mock_before_exc,
+ mock_handle_error):
+ sa = mock.MagicMock()
+ engine = mock.MagicMock()
+
+ mock_before_exc.return_value = "before"
+ mock_after_exc.return_value = "after"
+
+ sqlalchemy.add_tracing(sa, engine, "sql")
+
+ mock_before_exc.assert_called_once_with("sql")
+ # Default set hide_result=True
+ mock_after_exc.assert_called_once_with(hide_result=True)
+ expected_calls = [
+ mock.call(engine, "before_cursor_execute", "before"),
+ mock.call(engine, "after_cursor_execute", "after"),
+ mock.call(engine, "handle_error", mock_handle_error),
+ ]
+ self.assertEqual(sa.event.listen.call_args_list, expected_calls)
+
+ @mock.patch("osprofiler.sqlalchemy.handle_error")
+ @mock.patch("osprofiler.sqlalchemy._before_cursor_execute")
+ @mock.patch("osprofiler.sqlalchemy._after_cursor_execute")
+ def test_wrap_session(self, mock_after_exc, mock_before_exc,
+ mock_handle_error):
+ sa = mock.MagicMock()
+
+ @contextlib.contextmanager
+ def _session():
+ session = mock.MagicMock()
+ # current engine object stored within the session
+ session.bind = mock.MagicMock()
+ session.bind.traced = None
+ yield session
+
+ mock_before_exc.return_value = "before"
+ mock_after_exc.return_value = "after"
+
+ session = sqlalchemy.wrap_session(sa, _session())
+
+ with session as sess:
+ pass
+
+ mock_before_exc.assert_called_once_with("db")
+ # Default set hide_result=True
+ mock_after_exc.assert_called_once_with(hide_result=True)
+ expected_calls = [
+ mock.call(sess.bind, "before_cursor_execute", "before"),
+ mock.call(sess.bind, "after_cursor_execute", "after"),
+ mock.call(sess.bind, "handle_error", mock_handle_error),
+ ]
+
+ self.assertEqual(sa.event.listen.call_args_list, expected_calls)
+
+ @mock.patch("osprofiler.sqlalchemy.handle_error")
+ @mock.patch("osprofiler.sqlalchemy._before_cursor_execute")
+ @mock.patch("osprofiler.sqlalchemy._after_cursor_execute")
+ @mock.patch("osprofiler.profiler")
+ def test_with_sql_result(self, mock_profiler, mock_after_exc,
+ mock_before_exc, mock_handle_error):
+ sa = mock.MagicMock()
+ engine = mock.MagicMock()
+
+ mock_before_exc.return_value = "before"
+ mock_after_exc.return_value = "after"
+
+ sqlalchemy.add_tracing(sa, engine, "sql", hide_result=False)
+
+ mock_before_exc.assert_called_once_with("sql")
+ # Default set hide_result=True
+ mock_after_exc.assert_called_once_with(hide_result=False)
+ expected_calls = [
+ mock.call(engine, "before_cursor_execute", "before"),
+ mock.call(engine, "after_cursor_execute", "after"),
+ mock.call(engine, "handle_error", mock_handle_error),
+ ]
+ self.assertEqual(sa.event.listen.call_args_list, expected_calls)
+
+ @mock.patch("osprofiler.sqlalchemy._before_cursor_execute")
+ @mock.patch("osprofiler.sqlalchemy._after_cursor_execute")
+ def test_disable_and_enable(self, mock_after_exc, mock_before_exc):
+ sqlalchemy.disable()
+
+ sa = mock.MagicMock()
+ engine = mock.MagicMock()
+ sqlalchemy.add_tracing(sa, engine, "sql")
+ self.assertFalse(mock_after_exc.called)
+ self.assertFalse(mock_before_exc.called)
+
+ sqlalchemy.enable()
+ sqlalchemy.add_tracing(sa, engine, "sql")
+ self.assertTrue(mock_after_exc.called)
+ self.assertTrue(mock_before_exc.called)
diff --git a/tests/test_utils.py b/osprofiler/tests/unit/test_utils.py
similarity index 79%
rename from tests/test_utils.py
rename to osprofiler/tests/unit/test_utils.py
index 6130cf5faf4a72160771b7f4675af9860f688340..2239aa15bd0c311a6931332d27d9b76ede84ae6a 100644
--- a/tests/test_utils.py
+++ b/osprofiler/tests/unit/test_utils.py
@@ -16,11 +16,12 @@
import base64
import hashlib
import hmac
+from unittest import mock
+import uuid
-import mock
from osprofiler import _utils as utils
-from tests import test
+from osprofiler.tests import test
class UtilsTestCase(test.TestCase):
@@ -67,21 +68,21 @@ class UtilsTestCase(test.TestCase):
process_data = utils.signed_unpack(packed_data, hmac_data, [hmac])
self.assertIn("hmac_key", process_data)
- process_data.pop('hmac_key')
+ process_data.pop("hmac_key")
self.assertEqual(data, process_data)
def test_signed_pack_unpack_many_keys(self):
- keys = ['secret', 'secret2', 'secret3']
+ keys = ["secret", "secret2", "secret3"]
data = {"some": "data"}
packed_data, hmac_data = utils.signed_pack(data, keys[-1])
process_data = utils.signed_unpack(packed_data, hmac_data, keys)
- self.assertEqual(keys[-1], process_data['hmac_key'])
+ self.assertEqual(keys[-1], process_data["hmac_key"])
def test_signed_pack_unpack_many_wrong_keys(self):
- keys = ['secret', 'secret2', 'secret3']
+ keys = ["secret", "secret2", "secret3"]
data = {"some": "data"}
- packed_data, hmac_data = utils.signed_pack(data, 'password')
+ packed_data, hmac_data = utils.signed_pack(data, "password")
process_data = utils.signed_unpack(packed_data, hmac_data, keys)
self.assertIsNone(process_data)
@@ -111,6 +112,29 @@ class UtilsTestCase(test.TestCase):
self.assertIsNone(utils.signed_unpack(data, hmac_data, hmac))
+ def test_shorten_id_with_valid_uuid(self):
+ valid_id = "4e3e0ec6-2938-40b1-8504-09eb1d4b0dee"
+
+ uuid_obj = uuid.UUID(valid_id)
+
+ with mock.patch("uuid.UUID") as mock_uuid:
+ mock_uuid.return_value = uuid_obj
+
+ result = utils.shorten_id(valid_id)
+ expected = 9584796812364680686
+
+ self.assertEqual(expected, result)
+
+ @mock.patch("oslo_utils.uuidutils.generate_uuid")
+ def test_shorten_id_with_invalid_uuid(self, mock_gen_uuid):
+ invalid_id = "invalid"
+ mock_gen_uuid.return_value = "1c089ea8-28fe-4f3d-8c00-f6daa2bc32f1"
+
+ result = utils.shorten_id(invalid_id)
+ expected = 10088334584203457265
+
+ self.assertEqual(expected, result)
+
def test_itersubclasses(self):
class A(object):
diff --git a/tests/test_web.py b/osprofiler/tests/unit/test_web.py
similarity index 86%
rename from tests/test_web.py
rename to osprofiler/tests/unit/test_web.py
index 578559c93c969b7ab94d10fe2b371f444dbe95ae..7ceade023b6f73e9404a2836eca061630b46c951 100644
--- a/tests/test_web.py
+++ b/osprofiler/tests/unit/test_web.py
@@ -13,15 +13,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from webob import response as webob_response
from osprofiler import _utils as utils
from osprofiler import profiler
+from osprofiler.tests import test
from osprofiler import web
-from tests import test
-
def dummy_app(environ, response):
res = webob_response.Response()
@@ -32,8 +32,8 @@ class WebTestCase(test.TestCase):
def setUp(self):
super(WebTestCase, self).setUp()
- profiler._clean()
- self.addCleanup(profiler._clean)
+ profiler.clean()
+ self.addCleanup(profiler.clean)
def test_get_trace_id_headers_no_hmac(self):
profiler.init(None, base_id="y", parent_id="z")
@@ -48,9 +48,9 @@ class WebTestCase(test.TestCase):
trace_info = utils.signed_unpack(headers["X-Trace-Info"],
headers["X-Trace-HMAC"], ["key"])
- self.assertIn('hmac_key', trace_info)
- self.assertEqual('key', trace_info.pop('hmac_key'))
- self.assertEqual({"parent_id": 'z', 'base_id': 'y'}, trace_info)
+ self.assertIn("hmac_key", trace_info)
+ self.assertEqual("key", trace_info.pop("hmac_key"))
+ self.assertEqual({"parent_id": "z", "base_id": "y"}, trace_info)
@mock.patch("osprofiler.profiler.get")
def test_get_trace_id_headers_no_profiler(self, mock_get_profiler):
@@ -62,8 +62,10 @@ class WebTestCase(test.TestCase):
class WebMiddlewareTestCase(test.TestCase):
def setUp(self):
super(WebMiddlewareTestCase, self).setUp()
- profiler._clean()
- self.addCleanup(profiler._clean)
+ profiler.clean()
+ # it's default state of _ENABLED param, so let's set it here
+ web._ENABLED = None
+ self.addCleanup(profiler.clean)
def tearDown(self):
web.enable()
@@ -252,7 +254,6 @@ class WebMiddlewareTestCase(test.TestCase):
parent_id="2")
expected_info = {
"request": {
- "host_url": request.host_url,
"path": request.path,
"query": request.query_string,
"method": request.method,
@@ -270,11 +271,38 @@ class WebMiddlewareTestCase(test.TestCase):
self.assertEqual("yeah!", middleware(request))
self.assertEqual(mock_profiler_init.call_count, 0)
+ @mock.patch("osprofiler.web.profiler.init")
+ def test_wsgi_middleware_enable_via_python(self, mock_profiler_init):
+ request = mock.MagicMock()
+ request.get_response.return_value = "yeah!"
+ request.url = "someurl"
+ request.host_url = "someurl"
+ request.path = "path"
+ request.query_string = "query"
+ request.method = "method"
+ request.scheme = "scheme"
+ hmac_key = "super_secret_key2"
+
+ pack = utils.signed_pack({"base_id": "1", "parent_id": "2"}, hmac_key)
+ request.headers = {
+ "a": "1",
+ "b": "2",
+ "X-Trace-Info": pack[0],
+ "X-Trace-HMAC": pack[1]
+ }
+
+ web.enable("super_secret_key1,super_secret_key2")
+ middleware = web.WsgiMiddleware("app", enabled=True)
+ self.assertEqual("yeah!", middleware(request))
+ mock_profiler_init.assert_called_once_with(hmac_key=hmac_key,
+ base_id="1",
+ parent_id="2")
+
def test_disable(self):
web.disable()
- self.assertTrue(web._DISABLED)
+ self.assertFalse(web._ENABLED)
def test_enabled(self):
web.disable()
web.enable()
- self.assertFalse(web._DISABLED)
+ self.assertTrue(web._ENABLED)
diff --git a/osprofiler/web.py b/osprofiler/web.py
index fe1bb0efc498b0945c18b0741e27dc05e02a201e..20875b1da1a40265ae658ee6c833dca6792ee170 100644
--- a/osprofiler/web.py
+++ b/osprofiler/web.py
@@ -22,8 +22,14 @@ from osprofiler import profiler
# Trace keys that are required or optional, any other
# keys that are present will cause the trace to be rejected...
-_REQUIRED_KEYS = ('base_id', 'hmac_key')
-_OPTIONAL_KEYS = ('parent_id',)
+_REQUIRED_KEYS = ("base_id", "hmac_key")
+_OPTIONAL_KEYS = ("parent_id",)
+
+#: Http header that will contain the needed traces data.
+X_TRACE_INFO = "X-Trace-Info"
+
+#: Http header that will contain the traces data hmac (that will be validated).
+X_TRACE_HMAC = "X-Trace-HMAC"
def get_trace_id_headers():
@@ -33,13 +39,14 @@ def get_trace_id_headers():
data = {"base_id": p.get_base_id(), "parent_id": p.get_id()}
pack = utils.signed_pack(data, p.hmac_key)
return {
- "X-Trace-Info": pack[0],
- "X-Trace-HMAC": pack[1]
+ X_TRACE_INFO: pack[0],
+ X_TRACE_HMAC: pack[1]
}
return {}
-_DISABLED = False
+_ENABLED = None
+_HMAC_KEYS = None
def disable():
@@ -48,20 +55,21 @@ def disable():
This is the alternative way to disable middleware. It will be used to be
able to disable middleware via oslo.config.
"""
- global _DISABLED
- _DISABLED = True
+ global _ENABLED
+ _ENABLED = False
-def enable():
+def enable(hmac_keys=None):
"""Enable middleware."""
- global _DISABLED
- _DISABLED = False
+ global _ENABLED, _HMAC_KEYS
+ _ENABLED = True
+ _HMAC_KEYS = utils.split(hmac_keys or "")
class WsgiMiddleware(object):
"""WSGI Middleware that enables tracing for an application."""
- def __init__(self, application, hmac_keys, enabled=False):
+ def __init__(self, application, hmac_keys=None, enabled=False, **kwargs):
"""Initialize middleware with api-paste.ini arguments.
:application: wsgi app
@@ -71,6 +79,10 @@ class WsgiMiddleware(object):
by only those who knows this key which helps
avoid DDOS.
:enabled: This middleware can be turned off fully if enabled is False.
+ :kwargs: Other keyword arguments.
+ NOTE(tovin07): Currently, this `kwargs` is not used at all.
+ It's here to avoid some extra keyword arguments in local_conf
+ that cause `__init__() got an unexpected keyword argument`.
"""
self.application = application
self.name = "wsgi"
@@ -95,12 +107,13 @@ class WsgiMiddleware(object):
@webob.dec.wsgify
def __call__(self, request):
- if _DISABLED or not self.enabled:
+ if (_ENABLED is not None and not _ENABLED
+ or _ENABLED is None and not self.enabled):
return request.get_response(self.application)
- trace_info = utils.signed_unpack(request.headers.get("X-Trace-Info"),
- request.headers.get("X-Trace-HMAC"),
- self.hmac_keys)
+ trace_info = utils.signed_unpack(request.headers.get(X_TRACE_INFO),
+ request.headers.get(X_TRACE_HMAC),
+ _HMAC_KEYS or self.hmac_keys)
if not self._trace_is_valid(trace_info):
return request.get_response(self.application)
@@ -108,12 +121,14 @@ class WsgiMiddleware(object):
profiler.init(**trace_info)
info = {
"request": {
- "host_url": request.host_url,
"path": request.path,
"query": request.query_string,
"method": request.method,
"scheme": request.scheme
}
}
- with profiler.Trace(self.name, info=info):
- return request.get_response(self.application)
+ try:
+ with profiler.Trace(self.name, info=info):
+ return request.get_response(self.application)
+ finally:
+ profiler.clean()
diff --git a/playbooks/osprofiler-post.yaml b/playbooks/osprofiler-post.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7328c74329ae883a86d8e1411ba4328b74d8d2f9
--- /dev/null
+++ b/playbooks/osprofiler-post.yaml
@@ -0,0 +1,58 @@
+- hosts: controller
+ vars:
+ osprofiler_traces_dir: '/opt/stack/osprofiler-traces'
+ tasks:
+ - name: Create directory for traces
+ become: True
+ become_user: stack
+ file:
+ path: '{{ osprofiler_traces_dir }}'
+ state: directory
+ owner: stack
+ group: stack
+
+ - name: Read connection string from a file
+ command: "cat /opt/stack/.osprofiler_connection_string"
+ register: osprofiler_connection_string
+
+ - debug:
+ msg: "OSProfiler connection string is: {{ osprofiler_connection_string.stdout }}"
+
+ - name: Get list of traces
+ command: "osprofiler trace list --connection-string {{ osprofiler_connection_string.stdout }}"
+ become: True
+ become_user: stack
+ register: osprofiler_trace_list
+
+ - debug:
+ msg: "{{ osprofiler_trace_list }}"
+
+ - name: Save traces to files
+ shell: |
+ osprofiler trace list --connection-string {{ osprofiler_connection_string.stdout }} > {{ osprofiler_traces_dir }}/trace_list.txt
+ cat {{ osprofiler_traces_dir }}/trace_list.txt | tail -n +4 | head -n -1 | awk '{print $2}' > {{ osprofiler_traces_dir }}/trace_ids.txt
+
+ while read p; do
+ osprofiler trace show --connection-string {{ osprofiler_connection_string.stdout }} --html $p > {{ osprofiler_traces_dir }}/trace-$p.html
+ done < {{ osprofiler_traces_dir }}/trace_ids.txt
+ become: True
+ become_user: stack
+
+ - name: Gzip trace files
+ become: yes
+ become_user: stack
+ shell: "gzip * -9 -q | true"
+ args:
+ chdir: '{{ osprofiler_traces_dir }}'
+
+ - name: Sync trace files to Zuul
+ become: yes
+ synchronize:
+ src: "{{ osprofiler_traces_dir }}"
+ dest: "{{ zuul.executor.log_root }}"
+ mode: pull
+ copy_links: true
+ verify_host: true
+ rsync_opts:
+ - "--include=/**"
+ - "--include=*/"
diff --git a/releasenotes/notes/add-reno-996dd44974d53238.yaml b/releasenotes/notes/add-reno-996dd44974d53238.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2234c38f57f219f32801c4515ec75cdb7cf13511
--- /dev/null
+++ b/releasenotes/notes/add-reno-996dd44974d53238.yaml
@@ -0,0 +1,3 @@
+---
+other:
+ - Introduce reno for deployer release notes.
diff --git a/releasenotes/notes/drop-python-2-7-73d3113c69d724d6.yaml b/releasenotes/notes/drop-python-2-7-73d3113c69d724d6.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0554c61bd6e07f89145c1fd28cfc51a73e47c69a
--- /dev/null
+++ b/releasenotes/notes/drop-python-2-7-73d3113c69d724d6.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Python 2.7 support has been dropped. The minimum version of Python now
+ supported by osprofiler is Python 3.6.
diff --git a/releasenotes/notes/redis-improvement-d4c91683fc89f570.yaml b/releasenotes/notes/redis-improvement-d4c91683fc89f570.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c2ee2b94c0936c4c65ad355e1ff91fe34cfcfdba
--- /dev/null
+++ b/releasenotes/notes/redis-improvement-d4c91683fc89f570.yaml
@@ -0,0 +1,16 @@
+---
+features:
+ - |
+ Redis storage schema is optimized for higher performance.
+ Previously Redis driver stored each tracing event under its own key,
+ as result both list and get operations required full scan of the database.
+ With the optimized schema traces are stored as Redis lists under a key
+ equal to trace id. So list operation iterates only over unique
+ trace ids and get operation retrieves content of a specified list.
+ Note that list operation still needs to retrieve at least 1 event
+ from the trace to get a timestamp.
+upgrade:
+ - |
+ The optimized Redis driver is backward compatible: while new events are stored
+ using new schema the driver can retrieve existing events using both old and new
+ schemas.
diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ea08ddc9280587dc82e504da2ba301d9a96d60a
--- /dev/null
+++ b/releasenotes/source/conf.py
@@ -0,0 +1,277 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+# sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'openstackdocstheme',
+ 'reno.sphinxext',
+]
+
+# openstackdocstheme options
+openstackdocs_repo_name = 'openstack/osprofiler'
+openstackdocs_auto_name = False
+openstackdocs_bug_project = 'osprofiler'
+openstackdocs_bug_tag = ''
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+# source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'osprofiler Release Notes'
+copyright = u'2016, osprofiler Developers'
+
+# Release notes do not need a version in the title, they span
+# multiple versions.
+# The full version, including alpha/beta/rc tags.
+release = ''
+# The short X.Y version.
+version = ''
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+# language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+# today = ''
+# Else, today_fmt is used as the format for a strftime call.
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = []
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'native'
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'openstackdocs'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+# html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# " v documentation".
+# html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+# html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+# html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+# html_domain_indices = True
+
+# If false, no index is generated.
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'osprofilerReleaseNotesDoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ # 'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ ('index', 'osprofilerReleaseNotes.tex',
+ u'osprofiler Release Notes Documentation',
+ u'osprofiler Developers', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+# latex_appendices = []
+
+# If false, no module index is generated.
+# latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'osprofilerReleaseNotes',
+ u'osprofiler Release Notes Documentation',
+ [u'osprofiler Developers'], 1)
+]
+
+# If true, show URL addresses after external links.
+# man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'osprofilerReleaseNotes',
+ u'osprofiler Release Notes Documentation',
+ u'osprofiler Developers', 'osprofilerReleaseNotes',
+ 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+# texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+# texinfo_no_detailmenu = False
+
+# -- Options for Internationalization output ------------------------------
+locale_dirs = ['locale/']
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4dd4d95ba7f3276e5941e647062faacc15c699d7
--- /dev/null
+++ b/releasenotes/source/index.rst
@@ -0,0 +1,15 @@
+==========================
+ osprofiler Release Notes
+==========================
+
+ .. toctree::
+ :maxdepth: 1
+
+ unreleased
+ ussuri
+ train
+ stein
+ rocky
+ queens
+ pike
+ ocata
diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ebe62f42e1793ff713b1eca98e4453306b911cdd
--- /dev/null
+++ b/releasenotes/source/ocata.rst
@@ -0,0 +1,6 @@
+===================================
+ Ocata Series Release Notes
+===================================
+
+.. release-notes::
+ :branch: origin/stable/ocata
diff --git a/releasenotes/source/pike.rst b/releasenotes/source/pike.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e43bfc0ce198dc0834a2835faa9338009de1542a
--- /dev/null
+++ b/releasenotes/source/pike.rst
@@ -0,0 +1,6 @@
+===================================
+ Pike Series Release Notes
+===================================
+
+.. release-notes::
+ :branch: stable/pike
diff --git a/releasenotes/source/queens.rst b/releasenotes/source/queens.rst
new file mode 100644
index 0000000000000000000000000000000000000000..36ac6160ca81bedfe9915584172a5596b147a5e6
--- /dev/null
+++ b/releasenotes/source/queens.rst
@@ -0,0 +1,6 @@
+===================================
+ Queens Series Release Notes
+===================================
+
+.. release-notes::
+ :branch: stable/queens
diff --git a/releasenotes/source/rocky.rst b/releasenotes/source/rocky.rst
new file mode 100644
index 0000000000000000000000000000000000000000..40dd517b756831137a4e891032188997c495d9b2
--- /dev/null
+++ b/releasenotes/source/rocky.rst
@@ -0,0 +1,6 @@
+===================================
+ Rocky Series Release Notes
+===================================
+
+.. release-notes::
+ :branch: stable/rocky
diff --git a/releasenotes/source/stein.rst b/releasenotes/source/stein.rst
new file mode 100644
index 0000000000000000000000000000000000000000..efaceb667be6ab4353004858217b8904ef83af36
--- /dev/null
+++ b/releasenotes/source/stein.rst
@@ -0,0 +1,6 @@
+===================================
+ Stein Series Release Notes
+===================================
+
+.. release-notes::
+ :branch: stable/stein
diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst
new file mode 100644
index 0000000000000000000000000000000000000000..583900393c4a944f8399de94f5f6cbcaa51124d8
--- /dev/null
+++ b/releasenotes/source/train.rst
@@ -0,0 +1,6 @@
+==========================
+Train Series Release Notes
+==========================
+
+.. release-notes::
+ :branch: stable/train
diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cd22aabcccdafb09197c7a37faef4b740ac385b0
--- /dev/null
+++ b/releasenotes/source/unreleased.rst
@@ -0,0 +1,5 @@
+==============================
+ Current Series Release Notes
+==============================
+
+.. release-notes::
diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e21e50e0c616efa6319759a8533eb042b20e7bca
--- /dev/null
+++ b/releasenotes/source/ussuri.rst
@@ -0,0 +1,6 @@
+===========================
+Ussuri Series Release Notes
+===========================
+
+.. release-notes::
+ :branch: stable/ussuri
diff --git a/requirements.txt b/requirements.txt
index 16b03c114dc8f696dc0dfab9bf32a330f771c4cf..7719bc4c1932b201fe8ca82241ffcba094d0c6e5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,9 @@
-argparse
-six>=1.7.0
-WebOb>=1.2.3
+netaddr>=0.7.18 # BSD
+oslo.concurrency>=3.26.0 # Apache-2.0
+oslo.serialization>=2.18.0 # Apache-2.0
+oslo.utils>=3.33.0 # Apache-2.0
+PrettyTable<0.8,>=0.7.2 # BSD
+requests>=2.14.2 # Apache-2.0
+six>=1.10.0 # MIT
+WebOb>=1.7.1 # MIT
+importlib_metadata>=1.7.0;python_version<'3.8' # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 91f497c0765ab67b130ec995ed4c9be7f4ed5273..bc64e0a396cad3f91188dd8e7d88327943c316d5 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,12 +1,12 @@
[metadata]
name = osprofiler
-version = 0.3.0
summary = OpenStack Profiler Library
description-file =
README.rst
author = OpenStack
-author-email = openstack-dev@lists.openstack.org
-home-page = http://www.openstack.org/
+author-email = openstack-discuss@lists.openstack.org
+home-page = https://docs.openstack.org/osprofiler/latest/
+python-requires = >=3.6
classifier =
Environment :: OpenStack
Intended Audience :: Developers
@@ -14,24 +14,25 @@ classifier =
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
- Programming Language :: Python :: 2
- Programming Language :: Python :: 2.6
- Programming Language :: Python :: 2.7
- Programming Language :: Python :: 3.3
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3.6
+ Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3 :: Only
+ Programming Language :: Python :: Implementation :: CPython
[files]
packages =
osprofiler
-[global]
-setup-hooks =
- pbr.hooks.setup_hook
-
-[build_sphinx]
-all_files = 1
-build-dir = doc/build
-source-dir = doc/source
+[extras]
+oslo_config =
+ oslo.config>=5.2.0 # Apache-2.0
[entry_points]
+oslo.config.opts =
+ osprofiler = osprofiler.opts:list_opts
console_scripts =
osprofiler = osprofiler.cmd.shell:main
+paste.filter_factory =
+ osprofiler = osprofiler.web:WsgiMiddleware.factory
diff --git a/setup.py b/setup.py
index b96e399085e582a4c04253bcd44b9fcb4a711822..a7c1710c098126504b34264dfda3c6970f4a390c 100644
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,19 @@
-#!/usr/bin/env python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
setuptools.setup(
- setup_requires=['pbr'],
+ setup_requires=['pbr>=2.0'],
pbr=True)
diff --git a/test-requirements.txt b/test-requirements.txt
index c378c04d501e8c048318c59f2f60af0fceca66b0..a227fe43f49b33f6135389fe0be2429f9aaed63d 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,11 +1,27 @@
-hacking>=0.8.0,<0.9
+hacking>=3.1.0,<3.2.0 # Apache-2.0
+flake8-import-order==0.18.1 # LGPLv3
-coverage>=3.6
-discover
-mock>=1.0
-python-subunit>=0.0.18
-testrepository>=0.0.18
-testtools>=0.9.34
+coverage>=4.0 # Apache-2.0
+ddt>=1.0.1 # MIT
+stestr>=2.0.0 # Apache-2.0
+testtools>=2.2.0 # MIT
-oslosphinx
-sphinx>=1.1.2,!=1.2.0,<1.3
+openstackdocstheme>=2.2.1 # Apache-2.0
+sphinx>=2.0.0,!=2.1.0 # BSD
+
+# Bandit security code scanner
+bandit>=1.6.0,<1.7.0 # Apache-2.0
+
+pymongo!=3.1,>=3.0.2 # Apache-2.0
+
+# Elasticsearch python client
+elasticsearch>=2.0.0,<3.0.0 # Apache-2.0
+
+# Redis python client
+redis>=2.10.0 # MIT
+
+# Build release notes
+reno>=3.1.0 # Apache-2.0
+
+# For Jaeger Tracing
+jaeger-client>=3.8.0 # Apache-2.0
diff --git a/tests/cmd/test_shell.py b/tests/cmd/test_shell.py
deleted file mode 100644
index ec2951fdd8306b0d7708281dc172f4df4175a1a5..0000000000000000000000000000000000000000
--- a/tests/cmd/test_shell.py
+++ /dev/null
@@ -1,226 +0,0 @@
-# Copyright 2014 Mirantis Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-import os
-import sys
-
-import mock
-import six
-
-from osprofiler.cmd import exc
-from osprofiler.cmd import shell
-from tests import test
-
-
-class ShellTestCase(test.TestCase):
- def setUp(self):
- super(ShellTestCase, self).setUp()
- self.old_environment = os.environ.copy()
- os.environ = {
- 'OS_USERNAME': 'username',
- 'OS_USER_ID': 'user_id',
- 'OS_PASSWORD': 'password',
- 'OS_USER_DOMAIN_ID': 'user_domain_id',
- 'OS_USER_DOMAIN_NAME': 'user_domain_name',
- 'OS_PROJECT_DOMAIN_ID': 'project_domain_id',
- 'OS_PROJECT_DOMAIN_NAME': 'project_domain_name',
- 'OS_PROJECT_ID': 'project_id',
- 'OS_PROJECT_NAME': 'project_name',
- 'OS_TENANT_ID': 'tenant_id',
- 'OS_TENANT_NAME': 'tenant_name',
- 'OS_AUTH_URL': 'http://127.0.0.1:5000/v3/',
- 'OS_AUTH_TOKEN': 'pass',
- 'OS_CACERT': '/path/to/cacert',
- 'OS_SERVICE_TYPE': 'service_type',
- 'OS_ENDPOINT_TYPE': 'public',
- 'OS_REGION_NAME': 'test'
- }
-
- self.ceiloclient = mock.MagicMock()
- sys.modules['ceilometerclient'] = self.ceiloclient
- self.addCleanup(sys.modules.pop, 'ceilometerclient', None)
- ceilo_modules = ['client', 'exc', 'shell']
- for module in ceilo_modules:
- sys.modules['ceilometerclient.%s' % module] = getattr(
- self.ceiloclient, module)
- self.addCleanup(
- sys.modules.pop, 'ceilometerclient.%s' % module, None)
-
- def tearDown(self):
- super(ShellTestCase, self).tearDown()
- os.environ = self.old_environment
-
- @mock.patch("sys.stdout", six.StringIO())
- @mock.patch("osprofiler.cmd.shell.OSProfilerShell")
- def test_shell_main(self, mock_shell):
- mock_shell.side_effect = exc.CommandError('some_message')
- shell.main()
- self.assertEqual('some_message\n', sys.stdout.getvalue())
-
- def run_command(self, cmd):
- shell.OSProfilerShell(cmd.split())
-
- def _test_with_command_error(self, cmd, expected_message):
- try:
- self.run_command(cmd)
- except exc.CommandError as actual_error:
- self.assertEqual(str(actual_error), expected_message)
- else:
- raise ValueError(
- 'Expected: `osprofiler.cmd.exc.CommandError` is raised with '
- 'message: "%s".' % expected_message)
-
- def test_username_is_not_presented(self):
- os.environ.pop("OS_USERNAME")
- msg = ("You must provide a username via either --os-username or "
- "via env[OS_USERNAME]")
- self._test_with_command_error("trace show fake-uuid", msg)
-
- def test_password_is_not_presented(self):
- os.environ.pop("OS_PASSWORD")
- msg = ("You must provide a password via either --os-password or "
- "via env[OS_PASSWORD]")
- self._test_with_command_error("trace show fake-uuid", msg)
-
- def test_auth_url(self):
- os.environ.pop("OS_AUTH_URL")
- msg = ("You must provide an auth url via either --os-auth-url or "
- "via env[OS_AUTH_URL]")
- self._test_with_command_error("trace show fake-uuid", msg)
-
- def test_no_project_and_domain_set(self):
- os.environ.pop("OS_PROJECT_ID")
- os.environ.pop("OS_PROJECT_NAME")
- os.environ.pop("OS_TENANT_ID")
- os.environ.pop("OS_TENANT_NAME")
- os.environ.pop("OS_USER_DOMAIN_ID")
- os.environ.pop("OS_USER_DOMAIN_NAME")
-
- msg = ("You must provide a project_id via either --os-project-id or "
- "via env[OS_PROJECT_ID] and a domain_name via either "
- "--os-user-domain-name or via env[OS_USER_DOMAIN_NAME] or a "
- "domain_id via either --os-user-domain-id or via "
- "env[OS_USER_DOMAIN_ID]")
- self._test_with_command_error("trace show fake-uuid", msg)
-
- def test_trace_show_ceilometrclient_is_missed(self):
- sys.modules['ceilometerclient'] = None
- sys.modules['ceilometerclient.client'] = None
- sys.modules['ceilometerclient.exc'] = None
- sys.modules['ceilometerclient.shell'] = None
-
- self.assertRaises(ImportError, shell.main,
- 'trace show fake_uuid'.split())
-
- def test_trace_show_unauthorized(self):
- class FakeHTTPUnauthorized(Exception):
- http_status = 401
-
- self.ceiloclient.client.get_client.side_effect = FakeHTTPUnauthorized
-
- msg = "Invalid OpenStack Identity credentials."
- self._test_with_command_error("trace show fake_id", msg)
-
- def test_trace_show_unknown_error(self):
- class FakeException(Exception):
- pass
-
- self.ceiloclient.client.get_client.side_effect = FakeException
- msg = "Something has gone wrong. See logs for more details."
- self._test_with_command_error("trace show fake_id", msg)
-
- @mock.patch("osprofiler.parsers.ceilometer.get_notifications")
- @mock.patch("osprofiler.parsers.ceilometer.parse_notifications")
- def test_trace_show_no_selected_format(self, mock_notifications, mock_get):
- mock_get.return_value = "some_notificatios"
- msg = ("You should choose one of the following output-formats: "
- "--json or --html.")
- self._test_with_command_error("trace show fake_id", msg)
-
- @mock.patch("osprofiler.parsers.ceilometer.get_notifications")
- def test_trace_show_trace_id_not_found(self, mock_get):
- mock_get.return_value = None
-
- fake_trace_id = "fake_id"
- msg = ("Trace with UUID %s not found. There are 2 possible reasons: \n"
- " 1) You are using not admin credentials\n"
- " 2) You specified wrong trace id" % fake_trace_id)
-
- self._test_with_command_error("trace show %s" % fake_trace_id, msg)
-
- @mock.patch("sys.stdout", six.StringIO())
- @mock.patch("osprofiler.parsers.ceilometer.get_notifications")
- @mock.patch("osprofiler.parsers.ceilometer.parse_notifications")
- def test_trace_show_in_json(self, mock_notifications, mock_get):
- mock_get.return_value = "some notification"
- notifications = {
- 'info': {
- 'started': 0, 'finished': 0, 'name': 'total'}, 'children': []}
- mock_notifications.return_value = notifications
-
- self.run_command("trace show fake_id --json")
- self.assertEqual("%s\n" % json.dumps(notifications),
- sys.stdout.getvalue())
-
- @mock.patch("sys.stdout", six.StringIO())
- @mock.patch("osprofiler.parsers.ceilometer.get_notifications")
- @mock.patch("osprofiler.parsers.ceilometer.parse_notifications")
- def test_trace_show_in_html(self, mock_notifications, mock_get):
- mock_get.return_value = "some notification"
-
- notifications = {
- 'info': {
- 'started': 0, 'finished': 0, 'name': 'total'}, 'children': []}
- mock_notifications.return_value = notifications
-
- #NOTE(akurilin): to simplify assert statement, html-template should be
- # replaced.
- html_template = (
- "A long time ago in a galaxy far, far away..."
- " some_data = $DATA"
- "It is a period of civil war. Rebel"
- "spaceships, striking from a hidden"
- "base, have won their first victory"
- "against the evil Galactic Empire.")
-
- with mock.patch("osprofiler.cmd.commands.open",
- mock.mock_open(read_data=html_template), create=True):
- self.run_command("trace show fake_id --html")
- self.assertEqual("A long time ago in a galaxy far, far away..."
- " some_data = %s"
- "It is a period of civil war. Rebel"
- "spaceships, striking from a hidden"
- "base, have won their first victory"
- "against the evil Galactic Empire."
- "\n" % json.dumps(notifications),
- sys.stdout.getvalue())
-
- @mock.patch("sys.stdout", six.StringIO())
- @mock.patch("osprofiler.parsers.ceilometer.get_notifications")
- @mock.patch("osprofiler.parsers.ceilometer.parse_notifications")
- def test_trace_show_write_to_file(self, mock_notifications, mock_get):
- mock_get.return_value = "some notification"
- notifications = {
- 'info': {
- 'started': 0, 'finished': 0, 'name': 'total'}, 'children': []}
- mock_notifications.return_value = notifications
-
- with mock.patch("osprofiler.cmd.commands.open",
- mock.mock_open(), create=True) as mock_open:
- self.run_command("trace show fake_id --json --out='/file'")
-
- output = mock_open.return_value.__enter__.return_value
- output.write.assert_called_once_with(json.dumps(notifications))
diff --git a/tests/notifiers/test_base.py b/tests/notifiers/test_base.py
deleted file mode 100644
index 8234457dd6b0d9e3606fba634c5e6db8e246eadb..0000000000000000000000000000000000000000
--- a/tests/notifiers/test_base.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2014 Mirantis Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from osprofiler._notifiers import base
-from tests import test
-
-
-class NotifierBaseTestCase(test.TestCase):
-
- def test_factory(self):
-
- class A(base.Notifier):
-
- def notify(self, a):
- return a
-
- self.assertEqual(base.Notifier.factory("A")(10), 10)
-
- def test_factory_with_args(self):
-
- class B(base.Notifier):
-
- def __init__(self, a, b=10):
- self.a = a
- self.b = b
-
- def notify(self, c):
- return self.a + self.b + c
-
- self.assertEqual(base.Notifier.factory("B", 5, b=7)(10), 22)
-
- def test_factory_not_found(self):
- self.assertRaises(TypeError, base.Notifier.factory, "non existing")
-
- def test_notify(self):
- base.Notifier().notify("")
-
- def test_plugins_are_imported(self):
- base.Notifier.factory("Messaging", mock.MagicMock(), "context",
- "transport", "project", "service", "host")
diff --git a/tests/notifiers/test_messaging.py b/tests/notifiers/test_messaging.py
deleted file mode 100644
index e9828924ab7caf879be313b43a62ffaae971c800..0000000000000000000000000000000000000000
--- a/tests/notifiers/test_messaging.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2014 Mirantis Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from osprofiler._notifiers import base
-from tests import test
-
-
-class MessagingTestCase(test.TestCase):
-
- def test_init_and_notify(self):
-
- messaging = mock.MagicMock()
- context = "context"
- transport = "transport"
- project = "project"
- service = "service"
- host = "host"
-
- notify_func = base.Notifier.factory("Messaging", messaging, context,
- transport, project, service, host)
-
- messaging.Notifier.assert_called_once_with(
- transport, publisher_id=host, driver="messaging",
- topic="profiler", retry=0)
-
- info = {
- "a": 10
- }
- notify_func(info)
-
- expected_data = {"project": project, "service": service}
- expected_data.update(info)
- messaging.Notifier().info.assert_called_once_with(
- context, "profiler.%s" % service, expected_data)
-
- messaging.reset_mock()
- notify_func(info, context="my_context")
- messaging.Notifier().info.assert_called_once_with(
- "my_context", "profiler.%s" % service, expected_data)
diff --git a/tests/parsers/test_ceilometer.py b/tests/parsers/test_ceilometer.py
deleted file mode 100644
index 6442a17f4386221d7c31ad6660ee64401789f6ca..0000000000000000000000000000000000000000
--- a/tests/parsers/test_ceilometer.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright 2014 Mirantis Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from osprofiler.parsers import ceilometer
-
-from tests import test
-
-
-class CeilometerParserTestCase(test.TestCase):
-
- def test_build_empty_tree(self):
- self.assertEqual(ceilometer._build_tree({}), [])
-
- def test_build_complex_tree(self):
- test_input = {
- "2": {"parent_id": "0", "trace_id": "2", "info": {"started": 1}},
- "1": {"parent_id": "0", "trace_id": "1", "info": {"started": 0}},
- "21": {"parent_id": "2", "trace_id": "21", "info": {"started": 6}},
- "22": {"parent_id": "2", "trace_id": "22", "info": {"started": 7}},
- "11": {"parent_id": "1", "trace_id": "11", "info": {"started": 1}},
- "113": {"parent_id": "11", "trace_id": "113",
- "info": {"started": 3}},
- "112": {"parent_id": "11", "trace_id": "112",
- "info": {"started": 2}},
- "114": {"parent_id": "11", "trace_id": "114",
- "info": {"started": 5}}
- }
-
- expected_output = [
- {
- "parent_id": "0",
- "trace_id": "1",
- "info": {"started": 0},
- "children": [
- {
- "parent_id": "1",
- "trace_id": "11",
- "info": {"started": 1},
- "children": [
- {"parent_id": "11", "trace_id": "112",
- "info": {"started": 2}, "children": []},
- {"parent_id": "11", "trace_id": "113",
- "info": {"started": 3}, "children": []},
- {"parent_id": "11", "trace_id": "114",
- "info": {"started": 5}, "children": []}
- ]
- }
- ]
- },
- {
- "parent_id": "0",
- "trace_id": "2",
- "info": {"started": 1},
- "children": [
- {"parent_id": "2", "trace_id": "21",
- "info": {"started": 6}, "children": []},
- {"parent_id": "2", "trace_id": "22",
- "info": {"started": 7}, "children": []}
- ]
- }
- ]
-
- self.assertEqual(ceilometer._build_tree(test_input), expected_output)
-
- def test_parse_notifications_empty(self):
- expected = {
- "info": {
- "name": "total",
- "started": 0,
- "finished": 0
- },
- "children": []
- }
- self.assertEqual(ceilometer.parse_notifications([]), expected)
-
- def test_parse_notifications(self):
- samples = [
- {
- "id": "896f5e52-d4c9-11e3-a117-46c0b36ac153",
- "metadata": {
- "base_id": "f5587500-07d1-41a0-b434-525d3c28ac49",
- "event_type": "profiler.nova",
- "host": "0.0.0.0",
- "service": "osapi_compute",
- "project": "nova",
- "name": "WSGI-stop",
- "parent_id": "82281b35-63aa-45fc-8578-5a32a66370ab",
- "trace_id": "837eb0bd-323a-4e3f-b223-3be78ad86aab"
- },
- "meter": "WSGI-stop",
- "project_id": None,
- "recorded_at": "2014-05-06T02:53:03.110724",
- "resource_id": "profiler-f5587500-07d1-41a0-b434-525d3c28ac49",
- "source": "openstack",
- "timestamp": "2014-05-06T02:52:59.357020",
- "type": "gauge",
- "unit": "sample",
- "user_id": None,
- "volume": 1.0
- },
- {
- "id": "895043a0-d4c9-11e3-a117-46c0b36ac153",
- "metadata": {
- "base_id": "f5587500-07d1-41a0-b434-525d3c28ac49",
- "event_type": "profiler.nova",
- "host": "0.0.0.0",
- "service": "osapi_compute",
- "project": "nova",
- "name": "WSGI-start",
- "parent_id": "82281b35-63aa-45fc-8578-5a32a66370ab",
- "trace_id": "837eb0bd-323a-4e3f-b223-3be78ad86aab"
- },
- "meter": "WSGI-start",
- "project_id": None,
- "recorded_at": "2014-05-06T02:53:03.020620",
- "resource_id": "profiler-f5587500-07d1-41a0-b434-525d3c28ac49",
- "source": "openstack",
- "timestamp": "2014-05-06T02:52:59.225552",
- "type": "gauge",
- "unit": "sample",
- "user_id": None,
- "volume": 1.0
- },
-
- {
- "id": "89558414-d4c9-11e3-a117-46c0b36ac153",
- "metadata": {
- "base_id": "f5587500-07d1-41a0-b434-525d3c28ac49",
- "event_type": "profiler.nova",
- "host": "0.0.0.0",
- "service": "osapi_compute",
- "project": "nova",
- "info.db:multiparams": "(immutabledict({}),)",
- "info.db:params": "{}",
- "name": "db-start",
- "parent_id": "837eb0bd-323a-4e3f-b223-3be78ad86aab",
- "trace_id": "f8ab042e-1085-4df2-9f3a-cfb6390b8090"
- },
- "meter": "db-start",
- "project_id": None,
- "recorded_at": "2014-05-06T02:53:03.038692",
- "resource_id": "profiler-f5587500-07d1-41a0-b434-525d3c28ac49",
- "source": "openstack",
- "timestamp": "2014-05-06T02:52:59.273422",
- "type": "gauge",
- "unit": "sample",
- "user_id": None,
- "volume": 1.0
- },
- {
- "id": "892d3018-d4c9-11e3-a117-46c0b36ac153",
- "metadata": {
- "base_id": "f5587500-07d1-41a0-b434-525d3c28ac49",
- "event_type": "profiler.generic",
- "host": "ubuntu",
- "service": "nova-conductor",
- "project": "nova",
- "name": "db-stop",
- "parent_id": "aad4748f-99d5-45c8-be0a-4025894bb3db",
- "trace_id": "8afee05d-0ad2-4515-bd03-db0f2d30eed0"
- },
- "meter": "db-stop",
- "project_id": None,
- "recorded_at": "2014-05-06T02:53:02.894015",
- "resource_id": "profiler-f5587500-07d1-41a0-b434-525d3c28ac49",
- "source": "openstack",
- "timestamp": "2014-05-06T02:53:00.473201",
- "type": "gauge",
- "unit": "sample",
- "user_id": None,
- "volume": 1.0
- }
- ]
-
- excepted = {
- "info": {
- "finished": 1247,
- "name": "total",
- "started": 0
- },
- "children": [
- {
- "info": {
- "finished": 131,
- "host": "0.0.0.0",
- "service": "osapi_compute",
- "name": "WSGI",
- "project": "nova",
- "started": 0
- },
- "parent_id": "82281b35-63aa-45fc-8578-5a32a66370ab",
- "trace_id": "837eb0bd-323a-4e3f-b223-3be78ad86aab",
- "children": [{
- "children": [],
- "info": {
- "finished": 47,
- "host": "0.0.0.0",
- "service": "osapi_compute",
- "project": "nova",
- "info.db:multiparams": "(immutabledict({}),)",
- "info.db:params": "{}",
- "name": "db",
- "started": 47
- },
-
- "parent_id": "837eb0bd-323a-4e3f-b223-3be78ad86aab",
- "trace_id": "f8ab042e-1085-4df2-9f3a-cfb6390b8090"
- }]
- },
- {
- "children": [],
- "info": {
- "finished": 1247,
- "host": "ubuntu",
- "name": "db",
- "service": "nova-conductor",
- "project": "nova",
- "started": 1247
- },
- "parent_id": "aad4748f-99d5-45c8-be0a-4025894bb3db",
- "trace_id": "8afee05d-0ad2-4515-bd03-db0f2d30eed0"
- }
- ]
- }
-
- self.assertEqual(ceilometer.parse_notifications(samples), excepted)
-
- def test_get_notifications(self):
- mock_ceil_client = mock.MagicMock()
- results = [mock.MagicMock(), mock.MagicMock()]
- mock_ceil_client.query_samples.query.return_value = results
- base_id = "10"
-
- result = ceilometer.get_notifications(mock_ceil_client, base_id)
-
- expected_filter = '{"=": {"resource_id": "profiler-%s"}}' % base_id
- mock_ceil_client.query_samples.query.assert_called_once_with(
- expected_filter, None, None)
- self.assertEqual(result, [results[0].to_dict(), results[1].to_dict()])
diff --git a/tests/test_profiler.py b/tests/test_profiler.py
deleted file mode 100644
index 726bc62f135e0bd61879cc0b321d9cd994706e6a..0000000000000000000000000000000000000000
--- a/tests/test_profiler.py
+++ /dev/null
@@ -1,314 +0,0 @@
-# Copyright 2014 Mirantis Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import collections
-import copy
-import mock
-import re
-
-from osprofiler import profiler
-
-from tests import test
-
-
-class ProfilerGlobMethodsTestCase(test.TestCase):
-
- def test_get_profiler_not_inited(self):
- profiler._clean()
- self.assertIsNone(profiler.get())
-
- def test_get_profiler_and_init(self):
- p = profiler.init("secret", base_id="1", parent_id="2")
- self.assertEqual(profiler.get(), p)
-
- self.assertEqual(p.get_base_id(), "1")
- # NOTE(boris-42): until we make first start we don't have
- self.assertEqual(p.get_id(), "2")
-
- def test_start_not_inited(self):
- profiler._clean()
- profiler.start("name")
-
- def test_start(self):
- p = profiler.init("secret", base_id="1", parent_id="2")
- p.start = mock.MagicMock()
- profiler.start("name", info="info")
- p.start.assert_called_once_with("name", info="info")
-
- def test_stop_not_inited(self):
- profiler._clean()
- profiler.stop()
-
- def test_stop(self):
- p = profiler.init("secret", base_id="1", parent_id="2")
- p.stop = mock.MagicMock()
- profiler.stop(info="info")
- p.stop.assert_called_once_with(info="info")
-
-
-class ProfilerTestCase(test.TestCase):
-
- def test_profiler_get_base_id(self):
- prof = profiler._Profiler("secret", base_id="1", parent_id="2")
- self.assertEqual(prof.get_base_id(), "1")
-
- @mock.patch("osprofiler.profiler.uuid.uuid4")
- def test_profiler_get_parent_id(self, mock_uuid4):
- mock_uuid4.return_value = "42"
- prof = profiler._Profiler("secret", base_id="1", parent_id="2")
- prof.start("test")
- self.assertEqual(prof.get_parent_id(), "2")
-
- @mock.patch("osprofiler.profiler.uuid.uuid4")
- def test_profiler_get_base_id_unset_case(self, mock_uuid4):
- mock_uuid4.return_value = "42"
- prof = profiler._Profiler("secret")
- self.assertEqual(prof.get_base_id(), "42")
- self.assertEqual(prof.get_parent_id(), "42")
-
- @mock.patch("osprofiler.profiler.uuid.uuid4")
- def test_profiler_get_id(self, mock_uuid4):
- mock_uuid4.return_value = "43"
- prof = profiler._Profiler("secret")
- prof.start("test")
- self.assertEqual(prof.get_id(), "43")
-
- @mock.patch("osprofiler.profiler.uuid.uuid4")
- @mock.patch("osprofiler.profiler.notifier.notify")
- def test_profiler_start(self, mock_notify, mock_uuid4):
- mock_uuid4.return_value = "44"
-
- info = {"some": "info"}
- payload = {
- "name": "test-start",
- "base_id": "1",
- "parent_id": "2",
- "trace_id": "44",
- "info": info
- }
-
- prof = profiler._Profiler("secret", base_id="1", parent_id="2")
- prof.start("test", info=info)
-
- mock_notify.assert_called_once_with(payload)
-
- @mock.patch("osprofiler.profiler.notifier.notify")
- def test_profiler_stop(self, mock_notify):
- prof = profiler._Profiler("secret", base_id="1", parent_id="2")
- prof._trace_stack.append("44")
- prof._name.append("abc")
-
- info = {"some": "info"}
- prof.stop(info=info)
-
- payload = {
- "name": "abc-stop",
- "base_id": "1",
- "parent_id": "2",
- "trace_id": "44",
- "info": info
- }
-
- mock_notify.assert_called_once_with(payload)
- self.assertEqual(len(prof._name), 0)
- self.assertEqual(prof._trace_stack, collections.deque(["1", "2"]))
-
- def test_profiler_hmac(self):
- hmac = "secret"
- prof = profiler._Profiler(hmac, base_id="1", parent_id="2")
- self.assertEqual(hmac, prof.hmac_key)
-
-
-class WithTraceTestCase(test.TestCase):
-
- @mock.patch("osprofiler.profiler.stop")
- @mock.patch("osprofiler.profiler.start")
- def test_with_trace(self, mock_start, mock_stop):
-
- with profiler.Trace("a", info="a1"):
- mock_start.assert_called_once_with("a", info="a1")
- mock_start.reset_mock()
- with profiler.Trace("b", info="b1"):
- mock_start.assert_called_once_with("b", info="b1")
- mock_stop.assert_called_once_with()
- mock_stop.reset_mock()
- mock_stop.assert_called_once_with()
-
-
-@profiler.trace("function", info={"info": "some_info"})
-def tracede_func(i):
- return i
-
-
-@profiler.trace("hide_args", hide_args=True)
-def trace_hide_args_func(a, i=10):
- return (a, i)
-
-
-class TraceDecoratorTestCase(test.TestCase):
-
- @mock.patch("osprofiler.profiler.stop")
- @mock.patch("osprofiler.profiler.start")
- def test_with_args(self, mock_start, mock_stop):
- self.assertEqual(1, tracede_func(1))
- expected_info = {
- "info": "some_info",
- "function": {
- "name": "tests.test_profiler.tracede_func",
- "args": str((1,)),
- "kwargs": str({})
- }
- }
- mock_start.assert_called_once_with("function", info=expected_info)
- mock_stop.assert_called_once_with()
-
- @mock.patch("osprofiler.profiler.stop")
- @mock.patch("osprofiler.profiler.start")
- def test_without_args(self, mock_start, mock_stop):
- self.assertEqual((1, 2), trace_hide_args_func(1, i=2))
- expected_info = {
- "function": {
- "name": "tests.test_profiler.trace_hide_args_func"
- }
- }
- mock_start.assert_called_once_with("hide_args", info=expected_info)
- mock_stop.assert_called_once_with()
-
-
-class FakeTracedCls(object):
-
- def method1(self, a, b, c=10):
- return a + b + c
-
- def method2(self, d, e):
- return d - e
-
- def method3(self, g=10, h=20):
- return g * h
-
- def _method(self, i):
- return i
-
-
-@profiler.trace_cls("rpc", info={"a": 10})
-class FakeTraceClassWithInfo(FakeTracedCls):
- pass
-
-
-@profiler.trace_cls("a", info={"b": 20}, hide_args=True)
-class FakeTraceClassHideArgs(FakeTracedCls):
- pass
-
-
-@profiler.trace_cls("rpc", trace_private=True)
-class FakeTracePrivate(FakeTracedCls):
- pass
-
-
-def py3_info(info):
- # NOTE(boris-42): py33 I hate you.
- info_py3 = copy.deepcopy(info)
- new_name = re.sub("FakeTrace[^.]*", "FakeTracedCls",
- info_py3["function"]["name"])
- info_py3["function"]["name"] = new_name
- return info_py3
-
-
-def possible_mock_calls(name, info):
- # NOTE(boris-42): py33 I hate you.
- return [mock.call(name, info=info), mock.call(name, info=py3_info(info))]
-
-
-class TraceClsDecoratorTestCase(test.TestCase):
-
- @mock.patch("osprofiler.profiler.stop")
- @mock.patch("osprofiler.profiler.start")
- def test_args(self, mock_start, mock_stop):
- fake_cls = FakeTraceClassWithInfo()
- self.assertEqual(30, fake_cls.method1(5, 15))
- expected_info = {
- "a": 10,
- "function": {
- "name": "tests.test_profiler.FakeTraceClassWithInfo.method1",
- "args": str((fake_cls, 5, 15)),
- "kwargs": str({})
- }
- }
- self.assertEqual(1, len(mock_start.call_args_list))
- self.assertIn(mock_start.call_args_list[0],
- possible_mock_calls("rpc", expected_info))
- mock_stop.assert_called_once_with()
-
- @mock.patch("osprofiler.profiler.stop")
- @mock.patch("osprofiler.profiler.start")
- def test_kwargs(self, mock_start, mock_stop):
- fake_cls = FakeTraceClassWithInfo()
- self.assertEqual(50, fake_cls.method3(g=5, h=10))
- expected_info = {
- "a": 10,
- "function": {
- "name": "tests.test_profiler.FakeTraceClassWithInfo.method3",
- "args": str((fake_cls,)),
- "kwargs": str({"g": 5, "h": 10})
- }
- }
- self.assertEqual(1, len(mock_start.call_args_list))
- self.assertIn(mock_start.call_args_list[0],
- possible_mock_calls("rpc", expected_info))
- mock_stop.assert_called_once_with()
-
- @mock.patch("osprofiler.profiler.stop")
- @mock.patch("osprofiler.profiler.start")
- def test_without_private(self, mock_start, mock_stop):
- fake_cls = FakeTraceClassHideArgs()
- self.assertEqual(10, fake_cls._method(10))
- self.assertFalse(mock_start.called)
- self.assertFalse(mock_stop.called)
-
- @mock.patch("osprofiler.profiler.stop")
- @mock.patch("osprofiler.profiler.start")
- def test_without_args(self, mock_start, mock_stop):
- fake_cls = FakeTraceClassHideArgs()
- self.assertEqual(40, fake_cls.method1(5, 15, c=20))
- expected_info = {
- "b": 20,
- "function": {
- "name": "tests.test_profiler.FakeTraceClassHideArgs.method1"
- }
- }
-
- self.assertEqual(1, len(mock_start.call_args_list))
- self.assertIn(mock_start.call_args_list[0],
- possible_mock_calls("a", expected_info))
- mock_stop.assert_called_once_with()
-
- @mock.patch("osprofiler.profiler.stop")
- @mock.patch("osprofiler.profiler.start")
- def test_private_methods(self, mock_start, mock_stop):
- fake_cls = FakeTracePrivate()
- self.assertEqual(5, fake_cls._method(5))
-
- expected_info = {
- "function": {
- "name": "tests.test_profiler.FakeTracePrivate._method",
- "args": str((fake_cls, 5)),
- "kwargs": str({})
- }
- }
-
- self.assertEqual(1, len(mock_start.call_args_list))
- self.assertIn(mock_start.call_args_list[0],
- possible_mock_calls("rpc", expected_info))
- mock_stop.assert_called_once_with()
diff --git a/tests/test_sqlalchemy.py b/tests/test_sqlalchemy.py
deleted file mode 100644
index 7c56049962af07d469ca5fb9e645936e8b6ebe29..0000000000000000000000000000000000000000
--- a/tests/test_sqlalchemy.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2014 Mirantis Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from osprofiler import sqlalchemy
-
-from tests import test
-
-
-class SqlalchemyTracingTestCase(test.TestCase):
-
- @mock.patch("osprofiler.sqlalchemy.profiler")
- def test_before_execute(self, mock_profiler):
- handler = sqlalchemy._before_cursor_execute("sql")
-
- handler(mock.MagicMock(), 1, 2, 3, 4, 5)
- expected_info = {
- "db.statement": 2,
- "db.params": 3
- }
- mock_profiler.start.assert_called_once_with("sql", info=expected_info)
-
- @mock.patch("osprofiler.sqlalchemy.profiler")
- def test_after_execute(self, mock_profiler):
- handler = sqlalchemy._after_cursor_execute()
- handler(mock.MagicMock(), 1, 2, 3, 4, 5)
- mock_profiler.stop.assert_called_once_with()
-
- @mock.patch("osprofiler.sqlalchemy._before_cursor_execute")
- @mock.patch("osprofiler.sqlalchemy._after_cursor_execute")
- def test_add_tracing(self, mock_after_exc, mock_before_exc):
- sa = mock.MagicMock()
- engine = mock.MagicMock()
-
- mock_before_exc.return_value = "before"
- mock_after_exc.return_value = "after"
-
- sqlalchemy.add_tracing(sa, engine, "sql")
-
- mock_before_exc.assert_called_once_with("sql")
- mock_after_exc.assert_called_once_with()
- expected_calls = [
- mock.call(engine, "before_cursor_execute", "before"),
- mock.call(engine, "after_cursor_execute", "after")
- ]
- self.assertEqual(sa.event.listen.call_args_list, expected_calls)
-
- @mock.patch("osprofiler.sqlalchemy._before_cursor_execute")
- @mock.patch("osprofiler.sqlalchemy._after_cursor_execute")
- def test_disable_and_enable(self, mock_after_exc, mock_before_exc):
- sqlalchemy.disable()
-
- sa = mock.MagicMock()
- engine = mock.MagicMock()
- sqlalchemy.add_tracing(sa, engine, "sql")
- self.assertFalse(mock_after_exc.called)
- self.assertFalse(mock_before_exc.called)
-
- sqlalchemy.enable()
- sqlalchemy.add_tracing(sa, engine, "sql")
- self.assertTrue(mock_after_exc.called)
- self.assertTrue(mock_before_exc.called)
diff --git a/tools/lint.py b/tools/lint.py
index 69b88ca365ff6ce9319c4670cf62473cf5c59d77..d2a545b3f9f8892ad87b5367d7eb6e56aec8fb59 100644
--- a/tools/lint.py
+++ b/tools/lint.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright (c) 2013 Intel Corporation.
# All Rights Reserved.
#
diff --git a/tools/patch_tox_venv.py b/tools/patch_tox_venv.py
index dc9ce836bf7d06ee07f1b68eecdc9353c9cd5e7c..ee8f53c1a36d51d794222aba2a1130ddae09bf72 100644
--- a/tools/patch_tox_venv.py
+++ b/tools/patch_tox_venv.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/tox.ini b/tox.ini
index ce48b9e5144bc647a7d2f3ac6f9d9528beceea9a..112fd78e41160543f56a9fd5163e2eff93ad3529 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,39 +1,93 @@
[tox]
-minversion = 1.6
-skipsdist = True
-envlist = py26,py27,pep8
+minversion = 3.1.0
+# Needed to create ChangeLog for docs building
+skipsdist = False
+envlist = py38,pep8
+ignore_basepython_conflict = True
[testenv]
+basepython = python3
setenv = VIRTUAL_ENV={envdir}
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-install_command = pip install -U {opts} {packages}
+deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt}
+ -r{toxinidir}/requirements.txt
+ -r{toxinidir}/test-requirements.txt
usedevelop = True
-commands = python setup.py testr --slowest --testr-args='{posargs}'
+commands = stestr run --slowest {posargs}
distribute = false
+[testenv:functional]
+setenv = {[testenv]setenv}
+ OS_TEST_PATH=./osprofiler/tests/functional
+deps =
+ {[testenv]deps}
+ oslo.messaging
+
+[testenv:functional-py36]
+basepython = python3.6
+setenv = {[testenv:functional]setenv}
+deps =
+ {[testenv:functional]deps}
+
[testenv:pep8]
-commands = flake8
+commands =
+ flake8
+ # Run security linter
+ bandit -r osprofiler -n5
distribute = false
[testenv:venv]
commands = {posargs}
[testenv:cover]
-commands = python setup.py testr --coverage --testr-args='{posargs}'
+setenv =
+ PYTHON=coverage run --source osprofiler --parallel-mode
+commands =
+ stestr run {posargs}
+ coverage combine
+ coverage html -d cover
+ coverage xml -o cover/coverage.xml
-[testenv:doc]
-changedir = doc/source
-commands = make html
+[testenv:docs]
+commands =
+ sphinx-build -W --keep-going -b html -d doc/build/doctrees doc/source doc/build/html
-[tox:jenkins]
-downloadcache = ~/cache/pip
+[testenv:bandit]
+commands = bandit -r osprofiler -n5
[flake8]
-ignore = E126,H703
show-source = true
builtins = _
-exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools
+# E741 ambiguous variable name 'l'
+# W503 line break before binary operator
+ignore = E741,W503
+exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,setup.py,build,releasenotes
+import-order-style = pep8
+application-import-names = osprofiler
+
+[flake8:local-plugins]
+extension =
+ N301 = checks:check_assert_methods_from_mock
+ N320 = checks:assert_true_instance
+ N321 = checks:assert_equal_type
+ N322 = checks:assert_equal_none
+ N323 = checks:assert_true_or_false_with_in
+ N324 = checks:assert_equal_in
+ N350 = checks:check_quotes
+ N351 = checks:check_no_constructor_data_struct
+ N352 = checks:check_dict_formatting_in_string
+ N353 = checks:check_using_unicode
+ N354 = checks:check_raises
+paths = ./osprofiler/hacking
+
+[testenv:releasenotes]
+commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
+
+[testenv:lower-constraints]
+deps =
+ -c{toxinidir}/lower-constraints.txt
+ -r{toxinidir}/test-requirements.txt
+ -r{toxinidir}/requirements.txt