Commit 6b97c4e4 authored by Joshua Harlow's avatar Joshua Harlow

Remerge against head/master

parents ea4bc2c6 880d9fc2
......@@ -33,7 +33,8 @@ API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
import json
import platform
import serial
from cloudinit import serial
# these high timeouts are necessary as read may read a lot of data.
READ_TIMEOUT = 60
......
......@@ -26,6 +26,7 @@ from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import net
from cloudinit.net import eni
from cloudinit import util
from cloudinit.distros.parsers.hostname import HostnameConf
......@@ -56,6 +57,7 @@ class Distro(distros.Distro):
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
self.osfamily = 'debian'
self._net_renderer = eni.Renderer()
def apply_locale(self, locale, out_fn=None):
if not out_fn:
......@@ -80,10 +82,10 @@ class Distro(distros.Distro):
def _write_network_config(self, netconfig):
ns = net.parse_net_config_data(netconfig)
net.render_network_state(target="/", network_state=ns,
eni=self.network_conf_fn,
links_prefix=self.links_prefix,
netrules=None)
self._net_renderer.render_network_state(
target="/", network_state=ns,
eni=self.network_conf_fn, links_prefix=self.links_prefix,
netrules=None)
_maybe_remove_legacy_eth0()
return []
......
This diff is collapsed.
# Copyright (C) 2013-2014 Canonical Ltd.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Blake Rouse <blake.rouse@canonical.com>
#
# Curtin is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
# more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
import base64
import glob
import gzip
import io
import shlex
import sys
import six
from cloudinit.net import get_devicelist
from cloudinit.net import sys_netdev_info
from cloudinit import util
PY26 = sys.version_info[0:2] == (2, 6)
def _shlex_split(blob):
if PY26 and isinstance(blob, six.text_type):
# Older versions don't support unicode input
blob = blob.encode("utf8")
return shlex.split(blob)
def _load_shell_content(content, add_empty=False, empty_val=None):
"""Given shell like syntax (key=value\nkey2=value2\n) in content
return the data in dictionary form. If 'add_empty' is True
then add entries in to the returned dictionary for 'VAR='
variables. Set their value to empty_val."""
data = {}
for line in _shlex_split(content):
try:
key, value = line.split("=", 1)
except ValueError:
# Unsplittable line, skip it...
pass
else:
if not value:
value = empty_val
if add_empty or value:
data[key] = value
return data
def _klibc_to_config_entry(content, mac_addrs=None):
"""Convert a klibc writtent shell content file to a 'config' entry
When ip= is seen on the kernel command line in debian initramfs
and networking is brought up, ipconfig will populate
/run/net-<name>.cfg.
The files are shell style syntax, and examples are in the tests
provided here. There is no good documentation on this unfortunately.
DEVICE=<name> is expected/required and PROTO should indicate if
this is 'static' or 'dhcp'.
"""
if mac_addrs is None:
mac_addrs = {}
data = _load_shell_content(content)
try:
name = data['DEVICE']
except KeyError:
raise ValueError("no 'DEVICE' entry in data")
# ipconfig on precise does not write PROTO
proto = data.get('PROTO')
if not proto:
if data.get('filename'):
proto = 'dhcp'
else:
proto = 'static'
if proto not in ('static', 'dhcp'):
raise ValueError("Unexpected value for PROTO: %s" % proto)
iface = {
'type': 'physical',
'name': name,
'subnets': [],
}
if name in mac_addrs:
iface['mac_address'] = mac_addrs[name]
# originally believed there might be IPV6* values
for v, pre in (('ipv4', 'IPV4'),):
# if no IPV4ADDR or IPV6ADDR, then go on.
if pre + "ADDR" not in data:
continue
subnet = {'type': proto, 'control': 'manual'}
# these fields go right on the subnet
for key in ('NETMASK', 'BROADCAST', 'GATEWAY'):
if pre + key in data:
subnet[key.lower()] = data[pre + key]
dns = []
# handle IPV4DNS0 or IPV6DNS0
for nskey in ('DNS0', 'DNS1'):
ns = data.get(pre + nskey)
# verify it has something other than 0.0.0.0 (or ipv6)
if ns and len(ns.strip(":.0")):
dns.append(data[pre + nskey])
if dns:
subnet['dns_nameservers'] = dns
# add search to both ipv4 and ipv6, as it has no namespace
search = data.get('DOMAINSEARCH')
if search:
if ',' in search:
subnet['dns_search'] = search.split(",")
else:
subnet['dns_search'] = search.split()
iface['subnets'].append(subnet)
return name, iface
def config_from_klibc_net_cfg(files=None, mac_addrs=None):
if files is None:
files = glob.glob('/run/net*.conf')
entries = []
names = {}
for cfg_file in files:
name, entry = _klibc_to_config_entry(util.load_file(cfg_file),
mac_addrs=mac_addrs)
if name in names:
raise ValueError(
"device '%s' defined multiple times: %s and %s" % (
name, names[name], cfg_file))
names[name] = cfg_file
entries.append(entry)
return {'config': entries, 'version': 1}
def _decomp_gzip(blob, strict=True):
# decompress blob. raise exception if not compressed unless strict=False.
with io.BytesIO(blob) as iobuf:
gzfp = None
try:
gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf)
return gzfp.read()
except IOError:
if strict:
raise
return blob
finally:
if gzfp:
gzfp.close()
def _b64dgz(b64str, gzipped="try"):
# decode a base64 string. If gzipped is true, transparently uncompresss
# if gzipped is 'try', then try gunzip, returning the original on fail.
try:
blob = base64.b64decode(b64str)
except TypeError:
raise ValueError("Invalid base64 text: %s" % b64str)
if not gzipped:
return blob
return _decomp_gzip(blob, strict=gzipped != "try")
def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None):
if cmdline is None:
cmdline = util.get_cmdline()
if 'network-config=' in cmdline:
data64 = None
for tok in cmdline.split():
if tok.startswith("network-config="):
data64 = tok.split("=", 1)[1]
if data64:
return util.load_yaml(_b64dgz(data64))
if 'ip=' not in cmdline:
return None
if mac_addrs is None:
mac_addrs = dict((k, sys_netdev_info(k, 'address'))
for k in get_devicelist())
return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs)
This diff is collapsed.
This diff is collapsed.
# vi: ts=4 expandtab
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
try:
from serial import Serial
except ImportError:
# For older versions of python (ie 2.6) pyserial may not exist and/or
# work and/or be installed, so make a dummy/fake serial that blows up
# when used...
class Serial(object):
def __init__(self, *args, **kwargs):
pass
@staticmethod
def isOpen():
return False
@staticmethod
def write(data):
raise IOError("Unable to perform serial `write` operation,"
" pyserial not installed.")
@staticmethod
def readline():
raise IOError("Unable to perform serial `readline` operation,"
" pyserial not installed.")
@staticmethod
def flush():
raise IOError("Unable to perform serial `flush` operation,"
" pyserial not installed.")
@staticmethod
def read(size=1):
raise IOError("Unable to perform serial `read` operation,"
" pyserial not installed.")
......@@ -423,7 +423,7 @@ def write_files(datadir, files, dirmode=None):
elem.text = DEF_PASSWD_REDACTION
return ET.tostring(root)
except Exception:
LOG.critical("failed to redact userpassword in {}".format(fname))
LOG.critical("failed to redact userpassword in %s", fname)
return cnt
if not datadir:
......
......@@ -61,7 +61,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
mstr += "[source=%s]" % (self.source)
return mstr
def get_data(self):
def get_data(self, skip_first_boot=False):
found = None
md = {}
results = {}
......@@ -119,7 +119,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
# instance-id
prev_iid = get_previous_iid(self.paths)
cur_iid = md['instance-id']
if prev_iid != cur_iid and self.dsmode == "local":
if prev_iid != cur_iid and \
self.dsmode == "local" and not skip_first_boot:
on_first_boot(results, distro=self.distro)
# dsmode != self.dsmode here if:
......@@ -163,7 +164,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
def network_config(self):
if self._network_config is None:
if self.network_json is not None:
self._network_config = convert_network_data(self.network_json)
self._network_config = openstack.convert_net_json(
self.network_json)
return self._network_config
......@@ -303,122 +305,3 @@ datasources = [
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
def convert_network_data(network_json=None):
"""Return a dictionary of network_config by parsing provided
OpenStack ConfigDrive NetworkData json format
OpenStack network_data.json provides a 3 element dictionary
- "links" (links are network devices, physical or virtual)
- "networks" (networks are ip network configurations for one or more
links)
- services (non-ip services, like dns)
networks and links are combined via network items referencing specific
links via a 'link_id' which maps to a links 'id' field.
To convert this format to network_config yaml, we first iterate over the
links and then walk the network list to determine if any of the networks
utilize the current link; if so we generate a subnet entry for the device
We also need to map network_data.json fields to network_config fields. For
example, the network_data links 'id' field is equivalent to network_config
'name' field for devices. We apply more of this mapping to the various
link types that we encounter.
There are additional fields that are populated in the network_data.json
from OpenStack that are not relevant to network_config yaml, so we
enumerate a dictionary of valid keys for network_yaml and apply filtering
to drop these superflous keys from the network_config yaml.
"""
if network_json is None:
return None
# dict of network_config key for filtering network_json
valid_keys = {
'physical': [
'name',
'type',
'mac_address',
'subnets',
'params',
],
'subnet': [
'type',
'address',
'netmask',
'broadcast',
'metric',
'gateway',
'pointopoint',
'mtu',
'scope',
'dns_nameservers',
'dns_search',
'routes',
],
}
links = network_json.get('links', [])
networks = network_json.get('networks', [])
services = network_json.get('services', [])
config = []
for link in links:
subnets = []
cfg = {k: v for k, v in link.items()
if k in valid_keys['physical']}
cfg.update({'name': link['id']})
for network in [net for net in networks
if net['link'] == link['id']]:
subnet = {k: v for k, v in network.items()
if k in valid_keys['subnet']}
if 'dhcp' in network['type']:
t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
subnet.update({
'type': t,
})
else:
subnet.update({
'type': 'static',
'address': network.get('ip_address'),
})
subnets.append(subnet)
cfg.update({'subnets': subnets})
if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']:
cfg.update({
'type': 'physical',
'mac_address': link['ethernet_mac_address']})
elif link['type'] in ['bond']:
params = {}
for k, v in link.items():
if k == 'bond_links':
continue
elif k.startswith('bond'):
params.update({k: v})
cfg.update({
'bond_interfaces': copy.deepcopy(link['bond_links']),
'params': params,
})
elif link['type'] in ['vlan']:
cfg.update({
'name': "%s.%s" % (link['vlan_link'],
link['vlan_id']),
'vlan_link': link['vlan_link'],
'vlan_id': link['vlan_id'],
'mac_address': link['vlan_mac_address'],
})
else:
raise ValueError(
'Unknown network_data link type: %s' % link['type'])
config.append(cfg)
for service in services:
cfg = service
cfg.update({'type': 'nameserver'})
config.append(cfg)
return {'version': 1, 'config': config}
......@@ -40,13 +40,11 @@ import re
import socket
import stat
import serial
from cloudinit import log as logging
from cloudinit import serial
from cloudinit import sources
from cloudinit import util
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
......
......@@ -474,6 +474,131 @@ class MetadataReader(BaseReader):
retries=self.retries)
def convert_net_json(network_json):
"""Return a dictionary of network_config by parsing provided
OpenStack ConfigDrive NetworkData json format
OpenStack network_data.json provides a 3 element dictionary
- "links" (links are network devices, physical or virtual)
- "networks" (networks are ip network configurations for one or more
links)
- services (non-ip services, like dns)
networks and links are combined via network items referencing specific
links via a 'link_id' which maps to a links 'id' field.
To convert this format to network_config yaml, we first iterate over the
links and then walk the network list to determine if any of the networks
utilize the current link; if so we generate a subnet entry for the device
We also need to map network_data.json fields to network_config fields. For
example, the network_data links 'id' field is equivalent to network_config
'name' field for devices. We apply more of this mapping to the various
link types that we encounter.
There are additional fields that are populated in the network_data.json
from OpenStack that are not relevant to network_config yaml, so we
enumerate a dictionary of valid keys for network_yaml and apply filtering
to drop these superflous keys from the network_config yaml.
"""
# Dict of network_config key for filtering network_json
valid_keys = {
'physical': [
'name',
'type',
'mac_address',
'subnets',
'params',
],
'subnet': [
'type',
'address',
'netmask',
'broadcast',
'metric',
'gateway',
'pointopoint',
'mtu',
'scope',
'dns_nameservers',
'dns_search',
'routes',
],
}
links = network_json.get('links', [])
networks = network_json.get('networks', [])
services = network_json.get('services', [])
config = []
for link in links:
subnets = []
cfg = dict((k, v) for k, v in link.items()
if k in valid_keys['physical'])
cfg.update({'name': link['id']})
for network in [net for net in networks
if net['link'] == link['id']]:
subnet = dict((k, v) for k, v in network.items()
if k in valid_keys['subnet'])
if 'dhcp' in network['type']:
t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
subnet.update({
'type': t,
})
else:
subnet.update({
'type': 'static',
'address': network.get('ip_address'),
})
if network['type'] == 'ipv6':
subnet['ipv6'] = True
else:
subnet['ipv4'] = True
subnets.append(subnet)
cfg.update({'subnets': subnets})
if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']:
cfg.update({
'type': 'physical',
'mac_address': link['ethernet_mac_address']})
elif link['type'] in ['bond']:
params = {}
for k, v in link.items():
if k == 'bond_links':
continue
elif k.startswith('bond'):
params.update({k: v})
cfg.update({
'bond_interfaces': copy.deepcopy(link['bond_links']),
'params': params,
})
elif link['type'] in ['vlan']:
cfg.update({
'name': "%s.%s" % (link['vlan_link'],
link['vlan_id']),
'vlan_link': link['vlan_link'],
'vlan_id': link['vlan_id'],
'mac_address': link['vlan_mac_address'],
})
elif link['type'] in ['bridge']:
cfg.update({
'type': 'bridge',
'mac_address': link['ethernet_mac_address'],
'mtu': link['mtu']})
else:
raise ValueError(
'Unknown network_data link type: %s' % link['type'])
config.append(cfg)
for service in services:
cfg = copy.deepcopy(service)
cfg.update({'type': 'nameserver'})
config.append(cfg)
return {'version': 1, 'config': config}
def convert_vendordata_json(data, recurse=True):
"""data: a loaded json *object* (strings, arrays, dicts).
return something suitable for cloudinit vendordata_raw.
......
......@@ -45,6 +45,7 @@ from cloudinit import importer
from cloudinit import log as logging
from cloudinit import net
from cloudinit.reporting import events
from cloudinit.net import cmdline
from cloudinit import sources
from cloudinit import type_utils
from cloudinit import util
......@@ -579,7 +580,7 @@ class Init(object):
if os.path.exists(disable_file):
return (None, disable_file)
cmdline_cfg = ('cmdline', net.read_kernel_cmdline_config())
cmdline_cfg = ('cmdline', cmdline.read_kernel_cmdline_config())
dscfg = ('ds', None)
if self.datasource and hasattr(self.datasource, 'network_config'):
dscfg = ('ds', self.datasource.network_config)
......
......@@ -171,7 +171,8 @@ class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None,
exit_code=None, cmd=None,
description=None, reason=None):
description=None, reason=None,
errno=None):
if not cmd:
self.cmd = '-'
else:
......@@ -202,6 +203,7 @@ class ProcessExecutionError(IOError):
else:
self.reason = '-'
self.errno = errno
message = self.MESSAGE_TMPL % {
'description': self.description,
'cmd': self.cmd,
......@@ -1147,7 +1149,14 @@ def find_devs_with(criteria=None, oformat='device',
options.append(path)
cmd = blk_id_cmd + options
# See man blkid for why 2 is added
(out, _err) = subp(cmd, rcs=[0, 2])
try:
(out, _err) = subp(cmd, rcs=[0, 2])
except ProcessExecutionError as e:
if e.errno == errno.ENOENT:
# blkid not found...
out = ""
else:
raise
entries = []
for line in out.splitlines():
line = line.strip()
......@@ -1696,7 +1705,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
sp = subprocess.Popen(args, **kws)
(out, err) = sp.communicate(data)
except OSError as e:
raise ProcessExecutionError(cmd=args, reason=e)
raise ProcessExecutionError(cmd=args, reason=e,
errno=e.errno)
rc = sp.returncode
if rc not in rcs:
raise ProcessExecutionError(stdout=out, stderr=err,
......
......@@ -11,8 +11,12 @@ PrettyTable
oauthlib
# This one is currently used only by the CloudSigma and SmartOS datasources.
# If these datasources are removed, this is no longer needed
pyserial
# If these datasources are removed, this is no longer needed.
#
# This will not work in py2.6 so it is only optionally installed on
# python 2.7 and later.
#
# pyserial
# This is only needed for places where we need to support configs in a manner
# that the built-in config parser is not sufficent (ie
......
......@@ -197,7 +197,6 @@ requirements = read_requires()
if sys.version_info < (3,):
requirements.append('cheetah')
setuptools.setup(
name='cloud-init',
version=get_version(),
......
......@@ -2,6 +2,7 @@
httpretty>=0.7.1
mock
nose
unittest2
# Only really needed on older versions of python
contextlib2
......
......@@ -7,12 +7,10 @@ import sys
import tempfile
import unittest
import mock
import six
import unittest2
try:
from unittest import mock
except ImportError:
import mock
try:
from contextlib import ExitStack