Commit 8997def2 authored by Michael Johnson's avatar Michael Johnson Committed by Carlos Goncalves

Updates Octavia to support octavia-lib

This is the base patch that updates octavia to use the new octavia-lib.
It is backwards compatible by using debtcollector moves.

It adds a new controller process called the "driver-agent".

This patch also adds unit test coverage for a few additional modules.

Depends-On: https://review.openstack.org/#/c/641180/

Change-Id: I438e1548ec0fb6111d1ab85b05015007d9d0a006
parent c00faa7e
......@@ -23,7 +23,7 @@ For example
For example
ENABLED_SERVICES+=octavia,o-api,o-cw,o-hk,o-hm
ENABLED_SERVICES+=octavia,o-api,o-cw,o-hk,o-hm,o-da
For more information, see the "Externally Hosted Plugins" section of
https://docs.openstack.org/devstack/latest/plugins.html
......@@ -45,7 +45,7 @@ ENABLED_SERVICES+=,neutron-metadata-agent,neutron-qos
# Tempest (optional)
#ENABLED_SERVICES+=,tempest
# Octavia
ENABLED_SERVICES+=,octavia,o-api,o-cw,o-hm,o-hk
ENABLED_SERVICES+=,octavia,o-api,o-cw,o-hm,o-hk,o-da
EOF
# Create the stack user
......
......@@ -29,6 +29,15 @@ function octaviaclient_install {
fi
}
function octavia_lib_install {
if use_library_from_git "octavia-lib"; then
git_clone_by_name "octavia-lib"
setup_dev_lib "octavia-lib"
else
pip_install_gr octavia-lib
fi
}
function install_diskimage_builder {
if use_library_from_git "diskimage-builder"; then
GITREPO["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_URL
......@@ -209,6 +218,9 @@ function octavia_configure {
sudo mkdir -m 755 -p $OCTAVIA_CONF_DIR
safe_chown $STACK_USER $OCTAVIA_CONF_DIR
sudo mkdir -m 700 -p $OCTAVIA_RUN_DIR
safe_chown $STACK_USER $OCTAVIA_RUN_DIR
if ! [ -e $OCTAVIA_CONF ] ; then
cp $OCTAVIA_DIR/etc/octavia.conf $OCTAVIA_CONF
fi
......@@ -477,6 +489,7 @@ function octavia_start {
run_process $OCTAVIA_API "$OCTAVIA_API_BINARY $OCTAVIA_API_ARGS"
fi
run_process $OCTAVIA_DRIVER_AGENT "$OCTAVIA_DRIVER_AGENT_BINARY $OCTAVIA_DRIVER_AGENT_ARGS"
run_process $OCTAVIA_CONSUMER "$OCTAVIA_CONSUMER_BINARY $OCTAVIA_CONSUMER_ARGS"
run_process $OCTAVIA_HOUSEKEEPER "$OCTAVIA_HOUSEKEEPER_BINARY $OCTAVIA_HOUSEKEEPER_ARGS"
run_process $OCTAVIA_HEALTHMANAGER "$OCTAVIA_HEALTHMANAGER_BINARY $OCTAVIA_HEALTHMANAGER_ARGS"
......@@ -489,6 +502,7 @@ function octavia_stop {
else
stop_process $OCTAVIA_API
fi
stop_process $OCTAVIA_DRIVER_AGENT
stop_process $OCTAVIA_CONSUMER
stop_process $OCTAVIA_HOUSEKEEPER
stop_process $OCTAVIA_HEALTHMANAGER
......@@ -523,6 +537,9 @@ function octavia_cleanup {
if [ ${OCTAVIA_CONF_DIR}x != x ] ; then
sudo rm -rf ${OCTAVIA_CONF_DIR}
fi
if [ ${OCTAVIA_RUN_DIR}x != x ] ; then
sudo rm -rf ${OCTAVIA_RUN_DIR}
fi
if [ ${OCTAVIA_AMP_SSH_KEY_PATH}x != x ] ; then
rm -f ${OCTAVIA_AMP_SSH_KEY_PATH} ${OCTAVIA_AMP_SSH_KEY_PATH}.pub
fi
......@@ -635,6 +652,7 @@ if is_service_enabled $OCTAVIA; then
if [[ "$1" == "stack" && "$2" == "install" ]]; then
# Perform installation of service source
echo_summary "Installing octavia"
octavia_lib_install
octavia_install
octaviaclient_install
......
......@@ -61,6 +61,7 @@ enable_service o-hm
enable_service o-hk
enable_service o-api
enable_service o-api-ha
enable_service o-da
OCTAVIA_USE_PREGENERATED_CERTS=True
OCTAVIA_USE_PREGENERATED_SSH_KEY=True
......
......@@ -73,6 +73,7 @@ enable_service o-cw
enable_service o-hm
enable_service o-hk
enable_service o-api
enable_service o-da
# enable DVR
......
......@@ -16,6 +16,7 @@ OCTAVIA_DHCLIENT_CONF=${OCTAVIA_DHCLIENT_CONF:-${OCTAVIA_DHCLIENT_DIR}/dhclient.
OCTAVIA_CONF=${OCTAVIA_CONF:-${OCTAVIA_CONF_DIR}/octavia.conf}
OCTAVIA_AUDIT_MAP=${OCTAVIA_AUDIT_MAP:-${OCTAVIA_CONF_DIR}/octavia_api_audit_map.conf}
OCTAVIA_TEMPEST_DIR=${OCTAVIA_TEMPEST_DIR:-${OCTAVIA_DIR}/octavia/tests/tempest}
OCTAVIA_RUN_DIR=${OCTAVIA_RUN_DIR:-"/var/run/octavia"}
OCTAVIA_AMPHORA_DRIVER=${OCTAVIA_AMPHORA_DRIVER:-"amphora_haproxy_rest_driver"}
OCTAVIA_NETWORK_DRIVER=${OCTAVIA_NETWORK_DRIVER:-"allowed_address_pairs_driver"}
......@@ -61,11 +62,13 @@ OCTAVIA_API_BINARY=${OCTAVIA_API_BINARY:-${OCTAVIA_BIN_DIR}/octavia-api}
OCTAVIA_CONSUMER_BINARY=${OCTAVIA_CONSUMER_BINARY:-${OCTAVIA_BIN_DIR}/octavia-worker}
OCTAVIA_HOUSEKEEPER_BINARY=${OCTAVIA_HOUSEKEEPER_BINARY:-${OCTAVIA_BIN_DIR}/octavia-housekeeping}
OCTAVIA_HEALTHMANAGER_BINARY=${OCTAVIA_HEALTHMANAGER_BINARY:-${OCTAVIA_BIN_DIR}/octavia-health-manager}
OCTAVIA_DRIVER_AGENT_BINARY=${OCTAVIA_DRIVER_AGENT_BINARY:-${OCTAVIA_BIN_DIR}/octavia-driver-agent}
OCTAVIA_API_ARGS=${OCTAVIA_API_ARGS:-" --config-file $OCTAVIA_CONF"}
OCTAVIA_CONSUMER_ARGS=${OCTAVIA_CONSUMER_ARGS:-" --config-file $OCTAVIA_CONF"}
OCTAVIA_HOUSEKEEPER_ARGS=${OCTAVIA_HOUSEKEEPER_ARGS:-" --config-file $OCTAVIA_CONF"}
OCTAVIA_HEALTHMANAGER_ARGS=${OCTAVIA_HEALTHMANAGER_ARGS:-" --config-file $OCTAVIA_CONF"}
OCTAVIA_DRIVER_AGENT_ARGS=${OCTAVIA_DRIVER_AGENT_ARGS:-" --config-file $OCTAVIA_CONF"}
OCTAVIA_TEMPEST=${OCTAVIA_TEMPEST:-"disabled"}
......@@ -75,12 +78,18 @@ OCTAVIA_HOUSEKEEPER="o-hk"
OCTAVIA_HEALTHMANAGER="o-hm"
OCTAVIA_SERVICE="octavia"
OCTAVIA_API_HAPROXY="o-api-ha"
OCTAVIA_DRIVER_AGENT="o-da"
# Client settings
GITREPO["python-octaviaclient"]=${OCTAVIACLIENT_REPO:-${GIT_BASE}/openstack/python-octaviaclient.git}
GITBRANCH["python-octaviaclient"]=${OCTAVIACLIENT_BRANCH:-master}
GITDIR["python-octaviaclient"]=$DEST/python-octaviaclient
# Library settings
GITREPO["octavia-lib"]=${OCTAVIA_LIB_REPO:-${GIT_BASE}/openstack/octavia-lib.git}
GITBRANCH["octavia-lib"]=${OCTAVIA_LIB_BRANCH:-master}
GITDIR["octavia-lib"]=$DEST/octavia-lib
NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas
NEUTRON_LBAAS_CONF=$NEUTRON_CONF_DIR/neutron_lbaas.conf
OCTAVIA_SERVICE_PROVIDER=${OCTAVIA_SERVICE_PROVIDER:-"LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default"}
......
......@@ -55,10 +55,11 @@ Provider drivers should only access the following Octavia APIs. All other
Octavia APIs are not considered stable or safe for provider driver use and
may change at any time.
* octavia.api.drivers.data_models
* octavia.api.drivers.driver_lib
* octavia.api.drivers.exceptions
* octavia.api.drivers.provider_base
* octavia_lib.api.drivers.data_models
* octavia_lib.api.drivers.driver_lib
* octavia_lib.api.drivers.exceptions
* octavia_lib.api.drivers.provider_base
* octavia_lib.common.constants
Octavia Provider Driver API
===========================
......@@ -1695,7 +1696,7 @@ Driver Support Library
Provider drivers need support for updating provisioning status, operating
status, and statistics. Drivers will not directly use database operations,
and instead will callback to Octavia using a new API.
and instead will callback to octavia-lib using a new API.
.. warning::
......@@ -1708,7 +1709,7 @@ and instead will callback to Octavia using a new API.
This library is interim and will be removed when the driver support endpoint
is made available. At which point drivers will not import any code from
Octavia.
octavia-lib.
Update Provisioning and Operating Status API
--------------------------------------------
......@@ -1723,6 +1724,13 @@ and operating status parameters are as defined by Octavia status codes. If an
existing object is not included in the input parameter, the status remains
unchanged.
.. note::
If the driver-agent exceeds its configured `status_max_processes` this call
may block while it waits for a status process slot to become available.
The operator will be notified if the driver-agent approaches or reaches
the configured limit.
provisioning_status: status associated with lifecycle of the
resource. See `Octavia Provisioning Status Codes <https://developer.openstack.org/api-ref/load-balancer/v2/index.html#provisioning-status-codes>`_.
......@@ -1765,6 +1773,13 @@ with multiple listener statistics is used to update statistics in a single
call. If an existing listener is not included, the statistics that object
remain unchanged.
.. note::
If the driver-agent exceeds its configured `stats_max_processes` this call
may block while it waits for a stats process slot to become available.
The operator will be notified if the driver-agent approaches or reaches
the configured limit.
The general form of the input dictionary is a list of listener statistics:
.. code-block:: python
......
......@@ -83,10 +83,9 @@ It is also possible to use Octavia as a Neutron LBaaS plugin, in the same way
as any other vendor. You can think of Octavia as an "open source vendor" for
Neutron LBaaS.
Soon, Octavia will support third-party vendor drivers just like Neutron LBaaS,
and will then fully replace Neutron LBaaS as the load balancing solution for
OpenStack. At that time, third-party vendor drivers that presently "plug in" to
Neutron LBaaS will plug in to Octavia instead.
Octavia supports third-party vendor drivers just like Neutron LBaaS,
and fully replaces Neutron LBaaS as the load balancing solution for
OpenStack.
For further information on OpenStack Neutron LBaaS deprecation, please refer to
https://wiki.openstack.org/wiki/Neutron/LBaaS/Deprecation.
......@@ -119,7 +118,7 @@ A 10,000-foot overview of Octavia components
:width: 660px
:alt: Octavia Component Overview
Octavia version 0.9 consists of the following major components:
Octavia version 4.0 consists of the following major components:
* **amphorae** - Amphorae are the individual virtual machines, containers, or
bare metal servers that accomplish the delivery of load balancing services to
......@@ -128,7 +127,7 @@ Octavia version 0.9 consists of the following major components:
HAProxy.
* **controller** - The Controller is the "brains" of Octavia. It consists of
four sub-components, which are individual daemons. They can be run on
five sub-components, which are individual daemons. They can be run on
separate back-end infrastructure if desired:
* **API Controller** - As the name implies, this subcomponent runs Octavia's
......@@ -147,6 +146,9 @@ Octavia version 0.9 consists of the following major components:
database records, manages the spares pool, and manages amphora certificate
rotation.
* **Driver Agent** - The driver agent receives status and statistics updates
from provider drivers.
* **network** - Octavia cannot accomplish what it does without manipulating
the network environment. Amphorae are spun up with a network interface on the
"load balancer network," and they may also plug directly into tenant networks
......
......@@ -460,3 +460,23 @@
# A URL representing messaging driver to use for notification. If not
# specified, we fall back to the same configuration used for RPC.
# transport_url =
[driver_agent]
# status_socket_path = /var/run/octavia/status.sock
# stats_socket_path = /var/run/octavia/stats.sock
# Maximum time to wait for a status message before checking for shutdown
# status_request_timeout = 5
# Maximum number of status processes per driver-agent
# status_max_processes = 50
# Maximum time to wait for a stats message before checking for shutdown
# stats_request_timeout = 5
# Maximum number of stats processes per driver-agent
# stats_max_processes = 50
# Percentage of max_processes (both status and stats) in use to start
# logging warning messages about an overloaded driver-agent.
# max_process_warning_percent = .75
......@@ -72,6 +72,7 @@ munch==2.2.0
netaddr==0.7.19
netifaces==0.10.4
networkx==1.11
octavia-lib==1.1.1
openstacksdk==0.12.0
os-client-config==1.29.0
os-service-types==1.2.0
......
......@@ -14,15 +14,17 @@
from jsonschema import exceptions as js_exceptions
from jsonschema import validate
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from stevedore import driver as stevedore_driver
from octavia_lib.api.drivers import data_models as driver_dm
from octavia_lib.api.drivers import exceptions
from octavia_lib.api.drivers import provider_base as driver_base
from octavia.api.drivers.amphora_driver import flavor_schema
from octavia.api.drivers import data_models as driver_dm
from octavia.api.drivers import exceptions
from octavia.api.drivers import provider_base as driver_base
from octavia.api.drivers import utils as driver_utils
from octavia.common import constants as consts
from octavia.common import data_models
......@@ -93,7 +95,7 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
# expects
vip_qos_policy_id = lb_dict.pop('vip_qos_policy_id', None)
if vip_qos_policy_id:
vip_dict = {"qos_policy_id": vip_qos_policy_id}
vip_dict = {"vip_qos_policy_id": vip_qos_policy_id}
lb_dict["vip"] = vip_dict
payload = {consts.LOAD_BALANCER_ID: lb_id,
......
This diff is collapsed.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright 2018 Rackspace, US Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import signal
import threading
import six.moves.socketserver as socketserver
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from octavia.api.drivers.driver_agent import driver_updater
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _recv(recv_socket):
size_str = b''
char = recv_socket.recv(1)
while char != b'\n':
size_str += char
char = recv_socket.recv(1)
payload_size = int(size_str)
mv_buffer = memoryview(bytearray(payload_size))
next_offset = 0
while payload_size - next_offset > 0:
recv_size = recv_socket.recv_into(mv_buffer[next_offset:],
payload_size - next_offset)
next_offset += recv_size
return jsonutils.loads(mv_buffer.tobytes())
class StatusRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
# Get the update data
status = _recv(self.request)
# Process the update
updater = driver_updater.DriverUpdater()
response = updater.update_loadbalancer_status(status)
# Send the response
json_data = jsonutils.dump_as_bytes(response)
len_str = '{}\n'.format(len(json_data)).encode('utf-8')
self.request.send(len_str)
self.request.sendall(json_data)
class StatsRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
# Get the update data
stats = _recv(self.request)
# Process the update
updater = driver_updater.DriverUpdater()
response = updater.update_listener_statistics(stats)
# Send the response
json_data = jsonutils.dump_as_bytes(response)
len_str = '{}\n'.format(len(json_data)).encode('utf-8')
self.request.send(len_str)
self.request.sendall(json_data)
class ForkingUDSServer(socketserver.ForkingMixIn,
socketserver.UnixStreamServer):
pass
def _mutate_config(*args, **kwargs):
CONF.mutate_config_files()
def _cleanup_socket_file(filename):
# Remove the socket file if it already exists
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def status_listener(exit_event):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGHUP, _mutate_config)
_cleanup_socket_file(CONF.driver_agent.status_socket_path)
server = ForkingUDSServer(CONF.driver_agent.status_socket_path,
StatusRequestHandler)
server.timeout = CONF.driver_agent.status_request_timeout
server.max_children = CONF.driver_agent.status_max_processes
while not exit_event.is_set():
server.handle_request()
LOG.info('Waiting for driver status listener to shutdown...')
# Can't shut ourselves down as we would deadlock, spawn a thread
threading.Thread(target=server.shutdown).start()
LOG.info('Driver status listener shutdown finished.')
server.server_close()
_cleanup_socket_file(CONF.driver_agent.status_socket_path)
def stats_listener(exit_event):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGHUP, _mutate_config)
_cleanup_socket_file(CONF.driver_agent.stats_socket_path)
server = ForkingUDSServer(CONF.driver_agent.stats_socket_path,
StatsRequestHandler)
server.timeout = CONF.driver_agent.stats_request_timeout
server.max_children = CONF.driver_agent.stats_max_processes
while not exit_event.is_set():
server.handle_request()
LOG.info('Waiting for driver statistics listener to shutdown...')
# Can't shut ourselves down as we would deadlock, spawn a thread
threading.Thread(target=server.shutdown).start()
LOG.info('Driver statistics listener shutdown finished.')
server.server_close()
_cleanup_socket_file(CONF.driver_agent.stats_socket_path)
# Copyright 2018 Rackspace, US Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octavia_lib.api.drivers import exceptions as driver_exceptions
from octavia_lib.common import constants as lib_consts
from octavia.common import constants as consts
from octavia.common import utils
from octavia.db import api as db_apis
from octavia.db import repositories as repo
class DriverUpdater(object):
def __init__(self, **kwargs):
self.loadbalancer_repo = repo.LoadBalancerRepository()
self.listener_repo = repo.ListenerRepository()
self.pool_repo = repo.PoolRepository()
self.health_mon_repo = repo.HealthMonitorRepository()
self.member_repo = repo.MemberRepository()
self.l7policy_repo = repo.L7PolicyRepository()
self.l7rule_repo = repo.L7RuleRepository()
self.listener_stats_repo = repo.ListenerStatisticsRepository()
self.db_session = db_apis.get_session()
super(DriverUpdater, self).__init__(**kwargs)
def _check_for_lb_vip_deallocate(self, repo, lb_id):
lb = repo.get(self.db_session, id=lb_id)
if lb.vip.octavia_owned:
vip = lb.vip
# We need a backreference
vip.load_balancer = lb
# Only lookup the network driver if we have a VIP to deallocate
network_driver = utils.get_network_driver()
network_driver.deallocate_vip(vip)
def _process_status_update(self, repo, object_name, record,
delete_record=False):
# Zero it out so that if the ID is missing from a record we do not
# report the last LB as the failed record in the exception
record_id = None
try:
record_id = record['id']
record_kwargs = {}
prov_status = record.get(consts.PROVISIONING_STATUS, None)
if prov_status:
if (prov_status == consts.DELETED and
object_name == consts.LOADBALANCERS):
self._check_for_lb_vip_deallocate(repo, record_id)
elif prov_status == consts.DELETED and delete_record:
repo.delete(self.db_session, id=record_id)
return
record_kwargs[consts.PROVISIONING_STATUS] = prov_status
op_status = record.get(consts.OPERATING_STATUS, None)
if op_status:
record_kwargs[consts.OPERATING_STATUS] = op_status
if prov_status or op_status:
repo.update(self.db_session, record_id, **record_kwargs)
except Exception as e:
# We need to raise a failure here to notify the driver it is
# sending bad status data.
raise driver_exceptions.UpdateStatusError(
fault_string=str(e), status_object_id=record_id,
status_object=object_name)
def update_loadbalancer_status(self, status):
"""Update load balancer status.
:param status: dictionary defining the provisioning status and
operating status for load balancer objects, including pools,
members, listeners, L7 policies, and L7 rules.
iod (string): ID for the object.
provisioning_status (string): Provisioning status for the object.
operating_status (string): Operating status for the object.
:type status: dict
:raises: UpdateStatusError
:returns: None
"""
try:
members = status.pop(consts.MEMBERS, [])
for member in members:
self._process_status_update(self.member_repo, consts.MEMBERS,
member, delete_record=True)
health_mons = status.pop(consts.HEALTHMONITORS, [])
for health_mon in health_mons:
self._process_status_update(
self.health_mon_repo, consts.HEALTHMONITORS, health_mon,
delete_record=True)
pools = status.pop(consts.POOLS, [])
for pool in pools:
self._process_status_update(self.pool_repo, consts.POOLS,
pool, delete_record=True)
l7rules = status.pop(consts.L7RULES, [])
for l7rule in l7rules:
self._process_status_update(self.l7rule_repo, consts.L7RULES,
l7rule, delete_record=True)
l7policies = status.pop(consts.L7POLICIES, [])
for l7policy in l7policies:
self._process_status_update(
self.l7policy_repo, consts.L7POLICIES, l7policy,
delete_record=True)
listeners = status.pop(lib_consts.LISTENERS, [])
for listener in listeners:
self._process_status_update(
self.listener_repo, lib_consts.LISTENERS, listener,
delete_record=True)
lbs = status.pop(consts.LOADBALANCERS, [])
for lb in lbs:
self._process_status_update(self.loadbalancer_repo,
consts.LOADBALANCERS, lb)
except driver_exceptions.UpdateStatusError as e:
return {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED,
lib_consts.FAULT_STRING: e.fault_string,
lib_consts.STATUS_OBJECT: e.status_object,
lib_consts.STATUS_OBJECT_ID: e.status_object_id}
except Exception as e:
return {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED,
lib_consts.FAULT_STRING: str(e)}
return {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_OK}
def update_listener_statistics(self, statistics):
"""Update listener statistics.
:param statistics: Statistics for listeners:
id (string): ID for listener.
active_connections (int): Number of currently active connections.
bytes_in (int): Total bytes received.
bytes_out (int): Total bytes sent.
request_errors (int): Total requests not fulfilled.
total_connections (int): The total connections handled.
:type statistics: dict
:raises: UpdateStatisticsError
:returns: None
"""
listener_stats = statistics.get(lib_consts.LISTENERS, [])
for stat in listener_stats:
try:
listener_id = stat.pop('id')
except Exception as e:
return {
lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED,
lib_consts.FAULT_STRING: str(e),
lib_consts.STATS_OBJECT: lib_consts.LISTENERS}
# Provider drivers other than the amphora driver do not have
# an amphora ID, use the listener ID again here to meet the
# constraint requirement.
try:
self.listener_stats_repo.replace(self.db_session, listener_id,
listener_id, **stat)
except Exception as e:
return {
lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_FAILED,
lib_consts.FAULT_STRING: str(e),
lib_consts.STATS_OBJECT: lib_consts.LISTENERS,
lib_consts.STATS_OBJECT_ID: listener_id}
return {lib_consts.STATUS_CODE: lib_consts.DRVR_STATUS_CODE_OK}
......@@ -12,144 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
from octavia.api.drivers import exceptions as driver_exceptions
from octavia.common import constants as consts
from octavia.common import utils
from octavia.db import api as db_apis
from octavia.db import repositories as repo
import warnings
from debtcollector import moves
class DriverLibrary(object):
from octavia_lib.api.drivers import driver_lib as lib_driver_lib
def __init__(self, **kwargs):
self.loadbalancer_repo = repo.LoadBalancerRepository()
self.listener_repo = repo.ListenerRepository()
self.pool_repo = repo.PoolRepository()
self.health_mon_repo = repo.HealthMonitorRepository()
self.member_repo = repo.MemberRepository()
self.l7policy_repo = repo.L7PolicyRepository()
self.l7rule_repo = repo.L7RuleRepository()
self.listener_stats_repo = repo.ListenerStatisticsRepository()
self.db_session = db_apis.get_session()
super(DriverLibrary, self).__init__(**kwargs)
warnings.simplefilter('default', DeprecationWarning)
def _check_for_lb_vip_deallocate(self, repo, lb_id):
lb = repo.get(self.db_session, id=lb_id)
if lb.vip.octavia_owned:
vip = lb.vip
# We need a backreference
vip.load_balancer = lb
# Only lookup the network driver if we have a VIP to deallocate
network_driver = utils.get_network_driver()
network_driver.deallocate_vip(vip)
def _process_status_update(self, repo, object_name, record,
delete_record=False):
# Zero it out so that if the ID is missing from a record we do not
# report the last LB as the failed record in the exception
record_id = None
try:
record_id = record['id']
record_kwargs = {}
prov_status = record.get(consts.PROVISIONING_STATUS, None)
if prov_status:
if (prov_status == consts.DELETED and
object_name == consts.LOADBALANCERS):
self._check_for_lb_vip_deallocate(repo, record_id)
elif prov_status == consts.DELETED and delete_record:
repo.delete(self.db_session, id=record_id)
return
record_kwargs[consts.PROVISIONING_STATUS] = prov_status
op_status = record.get(consts.OPERATING_STATUS, None)
if op_status:
record_kwargs[consts.OPERATING_STATUS] = op_status
if prov_status or op_status:
repo.update(self.db_session, record_id, **record_kwargs)
except Exception as e:
# We need to raise a failure here to notify the driver it is
# sending bad status data.
raise driver_exceptions.UpdateStatusError(
fault_string=str(e), status_object_id=record_id,
status_object=object_name)
def update_loadbalancer_status(self, status):
"""Update load balancer status.
:param status: dictionary defining the provisioning status and
operating status for load balancer objects, including pools,
members, listeners, L7 policies, and L7 rules.