Merge branch 'master' of git://github.com/harlowja/Openstack-DevstackPy into harlowja-master
Conflicts: devstack/components/rabbit.py
This commit is contained in:
commit
a03ffd73cf
@ -24,6 +24,16 @@ commands:
|
||||
- service
|
||||
- httpd
|
||||
- stop
|
||||
git:
|
||||
checkout:
|
||||
- git
|
||||
- checkout
|
||||
clone:
|
||||
- git
|
||||
- clone
|
||||
pull:
|
||||
- git
|
||||
- pull
|
||||
libvirt:
|
||||
restart:
|
||||
- service
|
||||
|
@ -23,6 +23,16 @@ commands:
|
||||
- service
|
||||
- apache2
|
||||
- stop
|
||||
git:
|
||||
checkout:
|
||||
- git
|
||||
- checkout
|
||||
clone:
|
||||
- git
|
||||
- clone
|
||||
pull:
|
||||
- git
|
||||
- pull
|
||||
iscsi:
|
||||
restart:
|
||||
- service
|
||||
|
@ -13,7 +13,13 @@ components:
|
||||
- nova-client
|
||||
- horizon
|
||||
description: Devstack.sh matching component installation.
|
||||
options: null
|
||||
options:
|
||||
no-vnc:
|
||||
# This is the nova component name (we need this to hook into the nova conf...)
|
||||
nova: nova
|
||||
nova:
|
||||
# We are enabling no-vnc (or trying to)
|
||||
- no-vnc
|
||||
subsystems:
|
||||
glance:
|
||||
- api
|
||||
|
@ -37,6 +37,11 @@ syslog = 0
|
||||
# Which run type to use [fork (the default), upstart, screen]
|
||||
run_type = fork
|
||||
|
||||
# How many seconds to wait until a service comes online before using it.
|
||||
# For example, before uploading to glance we need keystone and glance to be online.
|
||||
# Sometimes this takes 5 to 10 seconds to start these up....
|
||||
service_wait_seconds = 5
|
||||
|
||||
[upstart]
|
||||
|
||||
# These flags are used for starting components under upstart (if default/run_type is upstart)
|
||||
|
@ -52,6 +52,10 @@ RUNNER_CLS_MAPPING = {
|
||||
# Where symlinks will go
|
||||
BASE_LINK_DIR = "/etc"
|
||||
|
||||
# Progress bar titles
|
||||
UNINSTALL_TITLE = 'Uninstalling'
|
||||
INSTALL_TITLE = 'Installing'
|
||||
|
||||
|
||||
class ComponentBase(object):
|
||||
def __init__(self,
|
||||
@ -60,6 +64,7 @@ class ComponentBase(object):
|
||||
runner,
|
||||
component_dir,
|
||||
all_instances,
|
||||
options,
|
||||
name,
|
||||
*args,
|
||||
**kargs):
|
||||
@ -68,6 +73,7 @@ class ComponentBase(object):
|
||||
self.instances = all_instances
|
||||
self.component_name = name
|
||||
self.subsystem_info = subsystem_info
|
||||
self.options = options
|
||||
|
||||
# The runner has a reference to us, so use a weakref here to
|
||||
# avoid breaking garbage collection.
|
||||
@ -93,20 +99,29 @@ class ComponentBase(object):
|
||||
knowns = self.known_subsystems()
|
||||
for s in self.desired_subsystems:
|
||||
if s not in knowns:
|
||||
raise ValueError("Unknown subsystem %r requested" % (s))
|
||||
raise ValueError("Unknown subsystem %r requested for (%s)" % (s, self))
|
||||
for s in self.subsystem_info.keys():
|
||||
if s not in knowns:
|
||||
raise ValueError("Unknown subsystem %r provided" % (s))
|
||||
raise ValueError("Unknown subsystem %r provided for (%s)" % (s, self))
|
||||
known_options = self.known_options()
|
||||
for s in self.options:
|
||||
if s not in known_options:
|
||||
LOG.warning("Unknown option %r provided for (%s)" % (s, self))
|
||||
|
||||
def __str__(self):
|
||||
return "%r: %s" % (self.__class__.__name__, self.component_name)
|
||||
|
||||
def known_subsystems(self):
|
||||
return list()
|
||||
return set()
|
||||
|
||||
def known_options(self):
|
||||
return set()
|
||||
|
||||
def warm_configs(self):
|
||||
pass
|
||||
|
||||
def is_started(self):
|
||||
reader = tr.TraceReader(tr.trace_fn(self.trace_dir, tr.START_TRACE))
|
||||
return reader.exists()
|
||||
return tr.TraceReader(tr.trace_fn(self.trace_dir, tr.START_TRACE)).exists()
|
||||
|
||||
def is_installed(self):
|
||||
return tr.TraceReader(tr.trace_fn(self.trace_dir, tr.IN_TRACE)).exists()
|
||||
@ -146,17 +161,22 @@ class PkgInstallComponent(ComponentBase):
|
||||
msg = "No uri entry found at config location [%s]" % \
|
||||
(cfg_helpers.make_id(cfg_section, cfg_key))
|
||||
raise excp.ConfigException(msg)
|
||||
# Activate da download!
|
||||
self.tracewriter.download_happened(target_loc, uri)
|
||||
dirs_made = down.download(target_loc, uri, branch)
|
||||
dirs_made = self._do_download(uri, target_loc, branch)
|
||||
# Here we ensure this is always added so that
|
||||
# if a keep old happens then this of course
|
||||
# won't be recreated, but if u uninstall without keeping old
|
||||
# then this won't be deleted this time around
|
||||
# adding it in is harmless and will make sure its removed.
|
||||
if target_loc not in dirs_made:
|
||||
dirs_made.append(target_loc)
|
||||
self.tracewriter.dirs_made(*dirs_made)
|
||||
return len(locations)
|
||||
|
||||
def _do_download(self, uri, target_dir, branch):
|
||||
return down.GitDownloader(self.distro, uri, target_dir, branch).download()
|
||||
|
||||
def _get_param_map(self, config_fn):
|
||||
return dict()
|
||||
|
||||
@ -177,9 +197,11 @@ class PkgInstallComponent(ComponentBase):
|
||||
if pkgs:
|
||||
pkg_names = set([p['name'] for p in pkgs])
|
||||
LOG.info("Setting up %s packages (%s)" % (len(pkg_names), ", ".join(pkg_names)))
|
||||
for p in pkgs:
|
||||
with utils.progress_bar(INSTALL_TITLE, len(pkgs)) as p_bar:
|
||||
for (i, p) in enumerate(pkgs):
|
||||
self.tracewriter.package_installed(p)
|
||||
self.packager.install(p)
|
||||
p_bar.update(i + 1)
|
||||
else:
|
||||
LOG.info('No packages to install for %s',
|
||||
self.component_name)
|
||||
@ -287,9 +309,11 @@ class PythonInstallComponent(PkgInstallComponent):
|
||||
if pips:
|
||||
pip_names = set([p['name'] for p in pips])
|
||||
LOG.info("Setting up %s pips (%s)", len(pip_names), ", ".join(pip_names))
|
||||
for p in pips:
|
||||
with utils.progress_bar(INSTALL_TITLE, len(pips)) as p_bar:
|
||||
for (i, p) in enumerate(pips):
|
||||
self.tracewriter.pip_installed(p)
|
||||
pip.install(p, self.distro)
|
||||
p_bar.update(i + 1)
|
||||
|
||||
def _install_python_setups(self):
|
||||
pydirs = self._get_python_directories()
|
||||
@ -342,7 +366,6 @@ class PkgUninstallComponent(ComponentBase):
|
||||
|
||||
def _unconfigure_runners(self):
|
||||
if RUNNER_CLS_MAPPING:
|
||||
LOG.info("Unconfiguring %s runners.", len(RUNNER_CLS_MAPPING))
|
||||
for (_, cls) in RUNNER_CLS_MAPPING.items():
|
||||
instance = cls(self.cfg, self.component_name, self.trace_dir)
|
||||
instance.unconfigure()
|
||||
@ -375,12 +398,17 @@ class PkgUninstallComponent(ComponentBase):
|
||||
pass
|
||||
|
||||
def _uninstall_pkgs(self):
|
||||
pkgsfull = self.tracereader.packages_installed()
|
||||
if pkgsfull:
|
||||
pkg_names = set([p['name'] for p in pkgsfull])
|
||||
pkgs = self.tracereader.packages_installed()
|
||||
if pkgs:
|
||||
pkg_names = set([p['name'] for p in pkgs])
|
||||
LOG.info("Potentially removing %s packages (%s)",
|
||||
len(pkg_names), ", ".join(pkg_names))
|
||||
which_removed = self.packager.remove_batch(pkgsfull)
|
||||
which_removed = set()
|
||||
with utils.progress_bar(UNINSTALL_TITLE, len(pkgs), reverse=True) as p_bar:
|
||||
for (i, p) in enumerate(pkgs):
|
||||
if self.packager.remove(p):
|
||||
which_removed.add(p['name'])
|
||||
p_bar.update(i + 1)
|
||||
LOG.info("Actually removed %s packages (%s)",
|
||||
len(which_removed), ", ".join(which_removed))
|
||||
|
||||
@ -423,7 +451,10 @@ class PythonUninstallComponent(PkgUninstallComponent):
|
||||
if pips:
|
||||
names = set([p['name'] for p in pips])
|
||||
LOG.info("Uninstalling %s python packages (%s)" % (len(names), ", ".join(names)))
|
||||
pip.uninstall_batch(pips, self.distro)
|
||||
with utils.progress_bar(UNINSTALL_TITLE, len(pips), reverse=True) as p_bar:
|
||||
for (i, p) in enumerate(pips):
|
||||
pip.uninstall(p, self.distro)
|
||||
p_bar.update(i + 1)
|
||||
|
||||
def _uninstall_python(self):
|
||||
pylisting = self.tracereader.py_listing()
|
||||
@ -472,10 +503,10 @@ class ProgramRuntime(ComponentBase):
|
||||
self._get_param_map(app_name),
|
||||
)
|
||||
# Configure it with the given settings
|
||||
LOG.info("Configuring runner for program [%s]", app_name)
|
||||
LOG.debug("Configuring runner for program [%s]", app_name)
|
||||
cfg_am = instance.configure(app_name,
|
||||
(app_pth, app_dir, program_opts))
|
||||
LOG.info("Configured %s files for runner for program [%s]",
|
||||
LOG.debug("Configured %s files for runner for program [%s]",
|
||||
cfg_am, app_name)
|
||||
tot_am += cfg_am
|
||||
return tot_am
|
||||
@ -495,12 +526,12 @@ class ProgramRuntime(ComponentBase):
|
||||
self._get_param_map(app_name),
|
||||
)
|
||||
# Start it with the given settings
|
||||
LOG.info("Starting [%s] with options [%s]",
|
||||
LOG.debug("Starting [%s] with options [%s]",
|
||||
app_name, ", ".join(program_opts))
|
||||
info_fn = instance.start(app_name,
|
||||
(app_pth, app_dir, program_opts),
|
||||
)
|
||||
LOG.info("Started [%s] details are in [%s]", app_name, info_fn)
|
||||
LOG.debug("Started [%s] details are in [%s]", app_name, info_fn)
|
||||
# This trace is used to locate details about what to stop
|
||||
self.tracewriter.started_info(app_name, info_fn)
|
||||
am_started += 1
|
||||
|
@ -17,7 +17,6 @@
|
||||
from devstack import component as comp
|
||||
from devstack import exceptions as excp
|
||||
from devstack import log as logging
|
||||
from devstack import settings
|
||||
from devstack import shell as sh
|
||||
from devstack import utils
|
||||
|
||||
@ -25,9 +24,6 @@ import abc
|
||||
|
||||
LOG = logging.getLogger("devstack.components.db")
|
||||
|
||||
# How long we wait before using the database after a restart
|
||||
START_WAIT_TIME = settings.WAIT_ALIVE_SECS
|
||||
|
||||
# Need to reset pw to blank since this distributions don't seem to
|
||||
# always reset it when u uninstall the db
|
||||
RESET_BASE_PW = ''
|
||||
@ -158,6 +154,7 @@ class DBInstaller(comp.PkgInstallComponent):
|
||||
class DBRuntime(comp.EmptyRuntime):
|
||||
def __init__(self, *args, **kargs):
|
||||
comp.EmptyRuntime.__init__(self, *args, **kargs)
|
||||
self.wait_time = max(self.cfg.getint('default', 'service_wait_seconds'), 1)
|
||||
|
||||
def _get_run_actions(self, act, exception_cls):
|
||||
dbtype = self.cfg.get("db", "type")
|
||||
@ -173,8 +170,8 @@ class DBRuntime(comp.EmptyRuntime):
|
||||
sh.execute(*startcmd,
|
||||
run_as_root=True,
|
||||
check_exit_code=True)
|
||||
LOG.info("Please wait %s seconds while it starts up." % START_WAIT_TIME)
|
||||
sh.sleep(START_WAIT_TIME)
|
||||
LOG.info("Please wait %s seconds while it starts up." % self.wait_time)
|
||||
sh.sleep(self.wait_time)
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
@ -195,8 +192,8 @@ class DBRuntime(comp.EmptyRuntime):
|
||||
sh.execute(*restartcmd,
|
||||
run_as_root=True,
|
||||
check_exit_code=True)
|
||||
LOG.info("Please wait %s seconds while it restarts." % START_WAIT_TIME)
|
||||
sh.sleep(START_WAIT_TIME)
|
||||
LOG.info("Please wait %s seconds while it restarts." % self.wait_time)
|
||||
sh.sleep(self.wait_time)
|
||||
return 1
|
||||
|
||||
def status(self):
|
||||
@ -211,7 +208,8 @@ class DBRuntime(comp.EmptyRuntime):
|
||||
combined = combined.lower()
|
||||
if combined.find("running") != -1:
|
||||
return comp.STATUS_STARTED
|
||||
elif combined.find("stop") != -1:
|
||||
elif combined.find("stop") != -1 or \
|
||||
combined.find('unrecognized') != -1:
|
||||
return comp.STATUS_STOPPED
|
||||
else:
|
||||
return comp.STATUS_UNKNOWN
|
||||
|
@ -19,13 +19,12 @@ import io
|
||||
from devstack import cfg
|
||||
from devstack import component as comp
|
||||
from devstack import log as logging
|
||||
from devstack import settings
|
||||
from devstack import shell as sh
|
||||
|
||||
from devstack.components import db
|
||||
from devstack.components import keystone
|
||||
|
||||
from devstack.image import creator
|
||||
from devstack.image import uploader
|
||||
|
||||
LOG = logging.getLogger("devstack.components.glance")
|
||||
|
||||
@ -53,9 +52,6 @@ GSCR = 'scrub'
|
||||
# This db will be dropped and created
|
||||
DB_NAME = "glance"
|
||||
|
||||
# How long to wait before attempting image upload
|
||||
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
|
||||
|
||||
# What applications to start
|
||||
APP_OPTIONS = {
|
||||
'glance-api': ['--config-file', sh.joinpths('%ROOT%', "etc", API_CONF)],
|
||||
@ -187,7 +183,7 @@ class GlanceRuntime(comp.PythonRuntime):
|
||||
comp.PythonRuntime.__init__(self, *args, **kargs)
|
||||
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
|
||||
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
|
||||
self.options = kargs.get('options', set())
|
||||
self.wait_time = max(self.cfg.getint('default', 'service_wait_seconds'), 1)
|
||||
|
||||
def known_subsystems(self):
|
||||
return SUB_TO_APP.keys()
|
||||
@ -204,6 +200,9 @@ class GlanceRuntime(comp.PythonRuntime):
|
||||
def _get_app_options(self, app):
|
||||
return APP_OPTIONS.get(app)
|
||||
|
||||
def known_options(self):
|
||||
return set(['no-load-images'])
|
||||
|
||||
def post_start(self):
|
||||
comp.PythonRuntime.post_start(self)
|
||||
if 'no-load-images' in self.options:
|
||||
@ -211,6 +210,6 @@ class GlanceRuntime(comp.PythonRuntime):
|
||||
else:
|
||||
# Install any images that need activating...
|
||||
# TODO: make this less cheesy - need to wait till glance goes online
|
||||
LOG.info("Waiting %s seconds so that glance can start up before image install." % (WAIT_ONLINE_TO))
|
||||
sh.sleep(WAIT_ONLINE_TO)
|
||||
creator.ImageCreationService(self.cfg, self.pw_gen).install()
|
||||
LOG.info("Waiting %s seconds so that glance can start up before image install." % (self.wait_time))
|
||||
sh.sleep(self.wait_time)
|
||||
uploader.Service(self.cfg, self.pw_gen).install()
|
||||
|
@ -17,7 +17,6 @@
|
||||
from devstack import component as comp
|
||||
from devstack import exceptions as excp
|
||||
from devstack import log as logging
|
||||
from devstack import settings
|
||||
from devstack import shell as sh
|
||||
from devstack import utils
|
||||
|
||||
@ -75,17 +74,25 @@ class HorizonInstaller(comp.PythonInstallComponent):
|
||||
})
|
||||
return places
|
||||
|
||||
def known_options(self):
|
||||
return set(['quantum-client'])
|
||||
|
||||
def verify(self):
|
||||
comp.PythonInstallComponent.verify(self)
|
||||
self._check_ug()
|
||||
|
||||
def _get_symlinks(self):
|
||||
links = comp.PythonInstallComponent._get_symlinks(self)
|
||||
link_tgt = self.distro.get_command('apache', 'settings',
|
||||
'conf-link-target', quiet=True)
|
||||
if link_tgt:
|
||||
src = self._get_target_config_name(HORIZON_APACHE_CONF)
|
||||
links[src] = self.distro.get_command('apache', 'settings', 'conf-link-target')
|
||||
if utils.service_enabled(settings.QUANTUM_CLIENT, self.instances, False):
|
||||
links[src] = link_tgt
|
||||
if 'quantum-client' in self.options:
|
||||
q_name = self.options['quantum-client']
|
||||
if q_name in self.instances:
|
||||
# TODO remove this junk, blah, puke that we have to do this
|
||||
qc = self.instances[settings.QUANTUM_CLIENT]
|
||||
qc = self.instances[q_name]
|
||||
src_pth = sh.joinpths(qc.app_dir, 'quantum')
|
||||
tgt_dir = sh.joinpths(self.dash_dir, 'quantum')
|
||||
links[src_pth] = tgt_dir
|
||||
@ -100,7 +107,9 @@ class HorizonInstaller(comp.PythonInstallComponent):
|
||||
msg = "No group named %s exists on this system!" % (group)
|
||||
raise excp.ConfigException(msg)
|
||||
if user in BAD_APACHE_USERS:
|
||||
msg = "You may want to adjust your configuration, (user=%s, group=%s) will not work with apache!" % (user, group)
|
||||
msg = ("You may want to adjust your configuration, "
|
||||
"(user=%s, group=%s) will not work with apache!"
|
||||
% (user, group))
|
||||
raise excp.ConfigException(msg)
|
||||
|
||||
def _get_target_config_name(self, config_name):
|
||||
@ -226,9 +235,11 @@ class HorizonRuntime(comp.EmptyRuntime):
|
||||
(sysout, stderr) = run_result[0]
|
||||
combined = str(sysout) + str(stderr)
|
||||
combined = combined.lower()
|
||||
if sysout.find("is running") != -1:
|
||||
if combined.find("is running") != -1:
|
||||
return comp.STATUS_STARTED
|
||||
elif sysout.find("not running") != -1 or sysout.find("stopped") != -1:
|
||||
elif combined.find("not running") != -1 or \
|
||||
combined.find("stopped") != -1 or \
|
||||
combined.find('unrecognized') != -1:
|
||||
return comp.STATUS_STOPPED
|
||||
else:
|
||||
return comp.STATUS_UNKNOWN
|
||||
|
@ -21,7 +21,6 @@ from urlparse import urlunparse
|
||||
from devstack import cfg
|
||||
from devstack import component as comp
|
||||
from devstack import log as logging
|
||||
from devstack import settings
|
||||
from devstack import shell as sh
|
||||
from devstack import utils
|
||||
|
||||
@ -63,9 +62,6 @@ APP_OPTIONS = {
|
||||
}
|
||||
|
||||
|
||||
# Used to wait until started before we can run the data setup script
|
||||
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
|
||||
|
||||
# Swift template additions
|
||||
# TODO: get rid of these
|
||||
SWIFT_TEMPL_ADDS = ['catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
|
||||
@ -109,6 +105,9 @@ class KeystoneInstaller(comp.PythonInstallComponent):
|
||||
self._sync_db()
|
||||
self._setup_initer()
|
||||
|
||||
def known_options(self):
|
||||
return set(['swift', 'quantum'])
|
||||
|
||||
def _sync_db(self):
|
||||
LOG.info("Syncing keystone to database named %s.", DB_NAME)
|
||||
params = dict()
|
||||
@ -154,14 +153,13 @@ class KeystoneInstaller(comp.PythonInstallComponent):
|
||||
self.tracewriter.file_touched(sh.touch_file(log_filename))
|
||||
elif name == CATALOG_CONF:
|
||||
nlines = list()
|
||||
if utils.service_enabled(settings.SWIFT, self.instances):
|
||||
if 'swift' in self.options:
|
||||
mp = dict()
|
||||
mp['SERVICE_HOST'] = self.cfg.get('host', 'ip')
|
||||
nlines.append("# Swift additions")
|
||||
nlines.extend(utils.param_replace_list(SWIFT_TEMPL_ADDS, mp))
|
||||
nlines.append("")
|
||||
if utils.service_enabled(settings.QUANTUM, self.instances) or \
|
||||
utils.service_enabled(settings.QUANTUM_CLIENT, self.instances):
|
||||
if 'quantum' in self.options:
|
||||
mp = dict()
|
||||
mp['SERVICE_HOST'] = self.cfg.get('host', 'ip')
|
||||
nlines.append("# Quantum additions")
|
||||
@ -204,6 +202,7 @@ class KeystoneRuntime(comp.PythonRuntime):
|
||||
comp.PythonRuntime.__init__(self, *args, **kargs)
|
||||
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
|
||||
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
|
||||
self.wait_time = max(self.cfg.getint('default', 'service_wait_seconds'), 1)
|
||||
|
||||
def post_start(self):
|
||||
tgt_fn = sh.joinpths(self.bin_dir, MANAGE_DATA_CONF)
|
||||
@ -211,8 +210,8 @@ class KeystoneRuntime(comp.PythonRuntime):
|
||||
# If its still there, run it
|
||||
# these environment additions are important
|
||||
# in that they eventually affect how this script runs
|
||||
LOG.info("Waiting %s seconds so that keystone can start up before running first time init." % (WAIT_ONLINE_TO))
|
||||
sh.sleep(WAIT_ONLINE_TO)
|
||||
LOG.info("Waiting %s seconds so that keystone can start up before running first time init." % (self.wait_time))
|
||||
sh.sleep(self.wait_time)
|
||||
env = dict()
|
||||
env['ENABLED_SERVICES'] = ",".join(self.instances.keys())
|
||||
env['BIN_DIR'] = self.bin_dir
|
||||
|
@ -19,7 +19,6 @@ import io
|
||||
from devstack import cfg
|
||||
from devstack import component as comp
|
||||
from devstack import log as logging
|
||||
from devstack import settings
|
||||
from devstack import shell as sh
|
||||
from devstack import utils
|
||||
|
||||
@ -57,10 +56,6 @@ APP_OPTIONS = {
|
||||
'melange-server': ['--config-file', '%CFG_FILE%'],
|
||||
}
|
||||
|
||||
# Special option that specifies we should make the network cidr using melange
|
||||
CREATE_CIDR = "create-cidr"
|
||||
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
|
||||
|
||||
|
||||
class MelangeUninstaller(comp.PythonUninstallComponent):
|
||||
def __init__(self, *args, **kargs):
|
||||
@ -138,6 +133,7 @@ class MelangeRuntime(comp.PythonRuntime):
|
||||
comp.PythonRuntime.__init__(self, *args, **kargs)
|
||||
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
|
||||
self.cfg_dir = sh.joinpths(self.app_dir, *CFG_LOC)
|
||||
self.wait_time = max(self.cfg.getint('default', 'service_wait_seconds'), 1)
|
||||
|
||||
def _get_apps_to_start(self):
|
||||
apps = list()
|
||||
@ -156,13 +152,14 @@ class MelangeRuntime(comp.PythonRuntime):
|
||||
pmap['CFG_FILE'] = sh.joinpths(self.cfg_dir, ROOT_CONF_REAL_NAME)
|
||||
return pmap
|
||||
|
||||
def known_options(self):
|
||||
return set(["create-cidr"])
|
||||
|
||||
def post_start(self):
|
||||
comp.PythonRuntime.post_start(self)
|
||||
# FIXME: This is a bit of a hack. How do we document "flags" like this?
|
||||
flags = []
|
||||
if CREATE_CIDR in flags:
|
||||
LOG.info("Waiting %s seconds so that the melange server can start up before cidr range creation." % (WAIT_ONLINE_TO))
|
||||
sh.sleep(WAIT_ONLINE_TO)
|
||||
if "create-cidr" in self.options:
|
||||
LOG.info("Waiting %s seconds so that the melange server can start up before cidr range creation." % (self.wait_time))
|
||||
sh.sleep(self.wait_time)
|
||||
mp = dict()
|
||||
mp['CIDR_RANGE'] = self.cfg.getdefaulted('melange', 'm_mac_range', DEF_CIDR_RANGE)
|
||||
utils.execute_template(*CIDR_CREATE_CMD, params=mp)
|
||||
|
@ -21,7 +21,6 @@ from devstack import date
|
||||
from devstack import exceptions
|
||||
from devstack import libvirt as virsh
|
||||
from devstack import log as logging
|
||||
from devstack import settings
|
||||
from devstack import shell as sh
|
||||
from devstack import utils
|
||||
|
||||
@ -199,9 +198,6 @@ STD_COMPUTE_EXTS = 'nova.api.openstack.compute.contrib.standard_extensions'
|
||||
# Config keys we warm up so u won't be prompted later
|
||||
WARMUP_PWS = [('rabbit', rabbit.PW_USER_PROMPT)]
|
||||
|
||||
# Used to wait until started before we can run the data setup script
|
||||
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
|
||||
|
||||
# Nova conf default section
|
||||
NV_CONF_DEF_SECTION = "[DEFAULT]"
|
||||
|
||||
@ -272,6 +268,9 @@ class NovaInstaller(comp.PythonInstallComponent):
|
||||
if NXVNC in self.desired_subsystems:
|
||||
self.xvnc_enabled = True
|
||||
|
||||
def known_options(self):
|
||||
return set(['no-vnc', 'quantum', 'melange'])
|
||||
|
||||
def known_subsystems(self):
|
||||
return SUBSYSTEMS
|
||||
|
||||
@ -400,6 +399,7 @@ class NovaRuntime(comp.PythonRuntime):
|
||||
comp.PythonRuntime.__init__(self, *args, **kargs)
|
||||
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
|
||||
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
|
||||
self.wait_time = max(self.cfg.getint('default', 'service_wait_seconds'), 1)
|
||||
|
||||
def _setup_network_init(self):
|
||||
tgt_fn = sh.joinpths(self.bin_dir, NET_INIT_CONF)
|
||||
@ -408,9 +408,9 @@ class NovaRuntime(comp.PythonRuntime):
|
||||
# If still there, run it
|
||||
# these environment additions are important
|
||||
# in that they eventually affect how this script runs
|
||||
if utils.service_enabled(settings.QUANTUM, self.instances, False):
|
||||
LOG.info("Waiting %s seconds so that quantum can start up before running first time init." % (WAIT_ONLINE_TO))
|
||||
sh.sleep(WAIT_ONLINE_TO)
|
||||
if 'quantum' in self.options:
|
||||
LOG.info("Waiting %s seconds so that quantum can start up before running first time init." % (self.wait_time))
|
||||
sh.sleep(self.wait_time)
|
||||
env = dict()
|
||||
env['ENABLED_SERVICES'] = ",".join(self.instances.keys())
|
||||
setup_cmd = NET_INIT_CMD_ROOT + [tgt_fn]
|
||||
@ -422,6 +422,9 @@ class NovaRuntime(comp.PythonRuntime):
|
||||
def post_start(self):
|
||||
self._setup_network_init()
|
||||
|
||||
def known_options(self):
|
||||
return set(['quantum'])
|
||||
|
||||
def known_subsystems(self):
|
||||
return SUBSYSTEMS
|
||||
|
||||
@ -545,7 +548,8 @@ class NovaConfConfigurator(object):
|
||||
self.cfg_dir = ni.cfg_dir
|
||||
self.xvnc_enabled = ni.xvnc_enabled
|
||||
self.volumes_enabled = ni.volumes_enabled
|
||||
self.novnc_enabled = utils.service_enabled(settings.NOVNC, self.instances)
|
||||
self.options = ni.options
|
||||
self.novnc_enabled = 'no-vnc' in self.options
|
||||
|
||||
def _getbool(self, name):
|
||||
return self.cfg.getboolean('nova', name)
|
||||
@ -737,7 +741,7 @@ class NovaConfConfigurator(object):
|
||||
|
||||
def _configure_network_settings(self, nova_conf):
|
||||
# TODO this might not be right....
|
||||
if utils.service_enabled(settings.QUANTUM, self.instances, False):
|
||||
if 'quantum' in self.options:
|
||||
nova_conf.add('network_manager', QUANTUM_MANAGER)
|
||||
hostip = self.cfg.get('host', 'ip')
|
||||
nova_conf.add('quantum_connection_host', self.cfg.getdefaulted('quantum', 'q_host', hostip))
|
||||
@ -745,7 +749,7 @@ class NovaConfConfigurator(object):
|
||||
if self.cfg.get('quantum', 'q_plugin') == 'openvswitch':
|
||||
for (key, value) in QUANTUM_OPENSWITCH_OPS.items():
|
||||
nova_conf.add(key, value)
|
||||
if utils.service_enabled(settings.MELANGE_CLIENT, self.instances, False):
|
||||
if 'melange' in self.options:
|
||||
nova_conf.add('quantum_ipam_lib', QUANTUM_IPAM_LIB)
|
||||
nova_conf.add('use_melange_mac_generation', True)
|
||||
nova_conf.add('melange_host', self.cfg.getdefaulted('melange', 'm_host', hostip))
|
||||
|
@ -16,9 +16,7 @@
|
||||
|
||||
from devstack import component as comp
|
||||
from devstack import log as logging
|
||||
from devstack import settings
|
||||
from devstack import shell as sh
|
||||
from devstack import utils
|
||||
|
||||
from devstack.components import nova
|
||||
|
||||
@ -69,11 +67,16 @@ class NoVNCRuntime(comp.ProgramRuntime):
|
||||
})
|
||||
return apps
|
||||
|
||||
def known_options(self):
|
||||
return set(['nova'])
|
||||
|
||||
def _get_param_map(self, app_name):
|
||||
root_params = comp.ProgramRuntime._get_param_map(self, app_name)
|
||||
if app_name == VNC_PROXY_APP and utils.service_enabled(settings.NOVA, self.instances, False):
|
||||
if app_name == VNC_PROXY_APP and 'nova' in self.options:
|
||||
nova_name = self.options['nova']
|
||||
if nova_name in self.instances:
|
||||
# FIXME: Have to reach into the nova conf (puke)
|
||||
nova_runtime = self.instances[settings.NOVA]
|
||||
nova_runtime = self.instances[nova_name]
|
||||
root_params['NOVA_CONF'] = sh.joinpths(nova_runtime.cfg_dir, nova.API_CONF)
|
||||
return root_params
|
||||
|
||||
|
@ -19,7 +19,6 @@ import io
|
||||
from devstack import cfg
|
||||
from devstack import component as comp
|
||||
from devstack import log as logging
|
||||
from devstack import settings
|
||||
from devstack import shell as sh
|
||||
from devstack import utils
|
||||
|
||||
@ -85,6 +84,9 @@ class QuantumInstaller(comp.PkgInstallComponent):
|
||||
})
|
||||
return places
|
||||
|
||||
def known_options(self):
|
||||
return set(['no-ovs-db-init', 'no-ovs-bridge-init'])
|
||||
|
||||
def _get_config_files(self):
|
||||
return list(CONFIG_FILES)
|
||||
|
||||
@ -133,8 +135,10 @@ class QuantumInstaller(comp.PkgInstallComponent):
|
||||
return comp.PkgInstallComponent._config_adjust(self, contents, config_fn)
|
||||
|
||||
def _setup_bridge(self):
|
||||
if not self.q_vswitch_agent or \
|
||||
'no-ovs-bridge-init' in self.options:
|
||||
return
|
||||
bridge = self.cfg.getdefaulted("quantum", "ovs_bridge", 'br-int')
|
||||
if bridge:
|
||||
LOG.info("Fixing up ovs bridge named %s.", bridge)
|
||||
external_id = self.cfg.getdefaulted("quantum", 'ovs_bridge_external_name', bridge)
|
||||
params = dict()
|
||||
@ -146,17 +150,17 @@ class QuantumInstaller(comp.PkgInstallComponent):
|
||||
'cmd': cmd_templ,
|
||||
'run_as_root': True,
|
||||
})
|
||||
if cmds:
|
||||
utils.execute_template(*cmds, params=params)
|
||||
|
||||
def post_install(self):
|
||||
comp.PkgInstallComponent.post_install(self)
|
||||
if self.q_vswitch_service and utils.service_enabled(settings.DB, self.instances, False):
|
||||
self._setup_db()
|
||||
if self.q_vswitch_agent:
|
||||
self._setup_bridge()
|
||||
|
||||
def _setup_db(self):
|
||||
if not self.q_vswitch_service or \
|
||||
'no-ovs-db-init' in self.options:
|
||||
return
|
||||
LOG.info("Fixing up database named %s.", DB_NAME)
|
||||
db.drop_db(self.cfg, self.pw_gen, self.distro, DB_NAME)
|
||||
db.create_db(self.cfg, self.pw_gen, self.distro, DB_NAME)
|
||||
|
@ -18,7 +18,6 @@ from tempfile import TemporaryFile
|
||||
|
||||
from devstack import component as comp
|
||||
from devstack import log as logging
|
||||
from devstack import settings
|
||||
from devstack import shell as sh
|
||||
|
||||
LOG = logging.getLogger("devstack.components.rabbit")
|
||||
@ -26,9 +25,6 @@ LOG = logging.getLogger("devstack.components.rabbit")
|
||||
# Default password (guest)
|
||||
RESET_BASE_PW = ''
|
||||
|
||||
# How long we wait for rabbitmq to start up before doing commands on it
|
||||
WAIT_ON_TIME = settings.WAIT_ALIVE_SECS
|
||||
|
||||
# Config keys we warm up so u won't be prompted later
|
||||
WARMUP_PWS = ['rabbit']
|
||||
|
||||
@ -78,6 +74,7 @@ class RabbitInstaller(comp.PkgInstallComponent):
|
||||
class RabbitRuntime(comp.EmptyRuntime):
|
||||
def __init__(self, *args, **kargs):
|
||||
comp.EmptyRuntime.__init__(self, *args, **kargs)
|
||||
self.wait_time = max(self.cfg.getint('default', 'service_wait_seconds'), 1)
|
||||
|
||||
def start(self):
|
||||
if self.status() != comp.STATUS_STARTED:
|
||||
@ -99,7 +96,9 @@ class RabbitRuntime(comp.EmptyRuntime):
|
||||
(sysout, stderr) = run_result
|
||||
combined = str(sysout) + str(stderr)
|
||||
combined = combined.lower()
|
||||
if combined.find('nodedown') != -1 or combined.find("unable to connect to node") != -1:
|
||||
if combined.find('nodedown') != -1 or \
|
||||
combined.find("unable to connect to node") != -1 or \
|
||||
combined.find('unrecognized') != -1:
|
||||
return comp.STATUS_STOPPED
|
||||
elif combined.find('running_applications') != -1:
|
||||
return comp.STATUS_STARTED
|
||||
@ -124,8 +123,8 @@ class RabbitRuntime(comp.EmptyRuntime):
|
||||
def restart(self):
|
||||
LOG.info("Restarting rabbit-mq.")
|
||||
self._run_cmd(self.distro.get_command('rabbit-mq', 'restart'))
|
||||
LOG.info("Please wait %s seconds while it starts up." % (WAIT_ON_TIME))
|
||||
sh.sleep(WAIT_ON_TIME)
|
||||
LOG.info("Please wait %s seconds while it starts up." % (self.wait_time))
|
||||
sh.sleep(self.wait_time)
|
||||
return 1
|
||||
|
||||
def stop(self):
|
||||
|
@ -52,7 +52,7 @@ class AptPackager(apt.AptPackager):
|
||||
if name == 'rabbitmq-server':
|
||||
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
|
||||
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
|
||||
LOG.info("Handling special remove of %s." % (name))
|
||||
LOG.debug("Handling special remove of %s." % (name))
|
||||
pkg_full = self._format_pkg_name(name, info.get("version"))
|
||||
cmd = apt.APT_REMOVE + [pkg_full]
|
||||
self._execute_apt(cmd)
|
||||
@ -68,7 +68,7 @@ class AptPackager(apt.AptPackager):
|
||||
if name == 'rabbitmq-server':
|
||||
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
|
||||
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
|
||||
LOG.info("Handling special install of %s." % (name))
|
||||
LOG.debug("Handling special install of %s." % (name))
|
||||
#this seems to be a temporary fix for that bug
|
||||
with tempfile.TemporaryFile() as f:
|
||||
pkg_full = self._format_pkg_name(name, info.get("version"))
|
||||
|
@ -15,45 +15,93 @@
|
||||
# under the License.
|
||||
|
||||
|
||||
from urlparse import urlparse
|
||||
import re
|
||||
import urllib
|
||||
|
||||
import progressbar
|
||||
|
||||
from devstack import log as logging
|
||||
from devstack import shell as sh
|
||||
|
||||
LOG = logging.getLogger("devstack.downloader")
|
||||
|
||||
GIT_EXT_REG = re.compile(r"^(.*?)\.git\s*$", re.IGNORECASE)
|
||||
# Git master branch
|
||||
GIT_MASTER_BRANCH = "master"
|
||||
CLONE_CMD = ["git", "clone"]
|
||||
CHECKOUT_CMD = ['git', 'checkout']
|
||||
PULL_CMD = ['git', 'pull']
|
||||
|
||||
|
||||
def _gitdownload(storewhere, uri, branch=None):
|
||||
class Downloader(object):
|
||||
|
||||
def __init__(self, uri, store_where):
|
||||
self.uri = uri
|
||||
self.store_where = store_where
|
||||
|
||||
def download(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class GitDownloader(Downloader):
|
||||
|
||||
def __init__(self, distro, uri, store_where, branch):
|
||||
Downloader.__init__(self, uri, store_where)
|
||||
self.branch = branch
|
||||
self.distro = distro
|
||||
|
||||
def download(self):
|
||||
dirsmade = list()
|
||||
if sh.isdir(storewhere):
|
||||
LOG.info("Updating code located at [%s]" % (storewhere))
|
||||
cmd = CHECKOUT_CMD + [GIT_MASTER_BRANCH]
|
||||
sh.execute(*cmd, cwd=storewhere)
|
||||
cmd = PULL_CMD
|
||||
sh.execute(*cmd, cwd=storewhere)
|
||||
if sh.isdir(self.store_where):
|
||||
LOG.info("Updating using git: located at %r" % (self.store_where))
|
||||
cmd = self.distro.get_command('git', 'checkout')
|
||||
cmd += [GIT_MASTER_BRANCH]
|
||||
sh.execute(*cmd, cwd=self.store_where)
|
||||
cmd = self.distro.get_command('git', 'pull')
|
||||
sh.execute(*cmd, cwd=self.store_where)
|
||||
else:
|
||||
LOG.info("Downloading from [%s] to [%s]" % (uri, storewhere))
|
||||
dirsmade.extend(sh.mkdirslist(storewhere))
|
||||
cmd = CLONE_CMD + [uri, storewhere]
|
||||
LOG.info("Downloading using git: %r to %r" % (self.uri, self.store_where))
|
||||
dirsmade.extend(sh.mkdirslist(self.store_where))
|
||||
cmd = self.distro.get_command('git', 'clone')
|
||||
cmd += [self.uri, self.store_where]
|
||||
sh.execute(*cmd)
|
||||
if branch and branch != GIT_MASTER_BRANCH:
|
||||
LOG.info("Adjusting git branch to [%s]" % (branch))
|
||||
cmd = CHECKOUT_CMD + [branch]
|
||||
sh.execute(*cmd, cwd=storewhere)
|
||||
if self.branch and self.branch != GIT_MASTER_BRANCH:
|
||||
LOG.info("Adjusting branch using git: %r" % (self.branch))
|
||||
cmd = self.distro.get_command('git', 'checkout')
|
||||
cmd += [self.branch]
|
||||
sh.execute(*cmd, cwd=self.store_where)
|
||||
return dirsmade
|
||||
|
||||
|
||||
def download(storewhere, uri, branch=None):
|
||||
up = urlparse(uri)
|
||||
if up and up.scheme.lower() == "git" or GIT_EXT_REG.match(up.path):
|
||||
return _gitdownload(storewhere, uri, branch)
|
||||
class UrlLibDownloader(Downloader):
|
||||
|
||||
def __init__(self, uri, store_where, **kargs):
|
||||
Downloader.__init__(self, uri, store_where)
|
||||
self.quiet = kargs.get('quiet', False)
|
||||
self.p_bar = None
|
||||
|
||||
def _make_bar(self, size):
|
||||
widgets = [
|
||||
'Fetching: ', progressbar.Percentage(),
|
||||
' ', progressbar.Bar(),
|
||||
' ', progressbar.ETA(),
|
||||
' ', progressbar.FileTransferSpeed(),
|
||||
]
|
||||
return progressbar.ProgressBar(widgets=widgets, maxval=size)
|
||||
|
||||
def _report(self, blocks, block_size, total_size):
|
||||
if self.quiet:
|
||||
return
|
||||
byte_down = blocks * block_size
|
||||
if not self.p_bar:
|
||||
self.p_bar = self._make_bar(total_size)
|
||||
self.p_bar.start()
|
||||
if byte_down > self.p_bar.maxval:
|
||||
# This seems to happen, huh???
|
||||
pass
|
||||
else:
|
||||
msg = "Currently we do not know how to download from uri [%s]" % (uri)
|
||||
raise NotImplementedError(msg)
|
||||
self.p_bar.update(byte_down)
|
||||
|
||||
def download(self):
|
||||
LOG.info('Downloading using urllib: %r to %r', self.uri, self.store_where)
|
||||
try:
|
||||
urllib.urlretrieve(self.uri, self.store_where, self._report)
|
||||
finally:
|
||||
if self.p_bar:
|
||||
self.p_bar.finish()
|
||||
self.p_bar = None
|
||||
|
@ -1,292 +0,0 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ConfigParser
|
||||
import json
|
||||
import os
|
||||
import tarfile
|
||||
import tempfile
|
||||
import urllib
|
||||
import urllib2
|
||||
|
||||
from devstack import log
|
||||
from devstack import shell
|
||||
from devstack import utils
|
||||
from devstack.components import keystone
|
||||
|
||||
|
||||
LOG = log.getLogger("devstack.image.creator")
|
||||
|
||||
|
||||
class Image(object):
|
||||
|
||||
KERNEL_FORMAT = ['glance', 'add', '-A', '%TOKEN%', '--silent-upload', \
|
||||
'name="%IMAGE_NAME%-kernel"', 'is_public=true', 'container_format=aki', \
|
||||
'disk_format=aki']
|
||||
INITRD_FORMAT = ['glance', 'add', '-A', '%TOKEN%', '--silent-upload', \
|
||||
'name="%IMAGE_NAME%-ramdisk"', 'is_public=true', 'container_format=ari', \
|
||||
'disk_format=ari']
|
||||
IMAGE_FORMAT = ['glance', 'add', '-A', '%TOKEN%', '--silent-upload',
|
||||
'name="%IMAGE_NAME%.img"',
|
||||
'is_public=true', 'container_format=ami', 'disk_format=ami', \
|
||||
'kernel_id=%KERNEL_ID%', 'ramdisk_id=%INITRD_ID%']
|
||||
|
||||
REPORTSIZE = 10485760
|
||||
|
||||
tmpdir = tempfile.gettempdir()
|
||||
|
||||
def __init__(self, url, token):
|
||||
self.url = url
|
||||
self.token = token
|
||||
self.download_name = url.split('/')[-1].lower()
|
||||
self.download_file_name = shell.joinpths(Image.tmpdir, self.download_name)
|
||||
self.image_name = None
|
||||
self.image = None
|
||||
self.kernel = None
|
||||
self.kernel_id = ''
|
||||
self.initrd = None
|
||||
self.initrd_id = ''
|
||||
self.tmp_folder = None
|
||||
self.registry = ImageRegistry(token)
|
||||
self.last_report = 0
|
||||
|
||||
def _format_progress(self, curr_size, total_size):
|
||||
if curr_size > total_size:
|
||||
curr_size = total_size
|
||||
progress = ("%d" % (curr_size)) + "b"
|
||||
progress += "/"
|
||||
progress += ("%d" % (total_size)) + "b"
|
||||
perc_done = "%.02f" % (((curr_size) / (float(total_size)) * 100.0)) + "%"
|
||||
return "[%s](%s)" % (progress, perc_done)
|
||||
|
||||
def _report(self, blocks, block_size, size):
|
||||
downloaded = blocks * block_size
|
||||
if (downloaded - self.last_report) > Image.REPORTSIZE:
|
||||
progress = self._format_progress((blocks * block_size), size)
|
||||
LOG.info('Download progress: %s', progress)
|
||||
self.last_report = downloaded
|
||||
|
||||
def _download(self):
|
||||
LOG.info('Downloading %s to %s', self.url, self.download_file_name)
|
||||
urllib.urlretrieve(self.url, self.download_file_name, self._report)
|
||||
|
||||
def _unpack(self):
|
||||
parts = self.download_name.split('.')
|
||||
|
||||
if self.download_name.endswith('.tgz') \
|
||||
or self.download_name.endswith('.tar.gz'):
|
||||
|
||||
LOG.info('Extracting %s', self.download_file_name)
|
||||
self.image_name = self.download_name\
|
||||
.replace('.tgz', '').replace('.tar.gz', '')
|
||||
self.tmp_folder = shell.joinpths(Image.tmpdir, parts[0])
|
||||
shell.mkdir(self.tmp_folder)
|
||||
|
||||
tar = tarfile.open(self.download_file_name)
|
||||
tar.extractall(self.tmp_folder)
|
||||
|
||||
for file_ in shell.listdir(self.tmp_folder):
|
||||
if file_.find('vmlinuz') != -1:
|
||||
self.kernel = shell.joinpths(self.tmp_folder, file_)
|
||||
elif file_.find('initrd') != -1:
|
||||
self.initrd = shell.joinpths(self.tmp_folder, file_)
|
||||
elif file_.endswith('.img'):
|
||||
self.image = shell.joinpths(self.tmp_folder, file_)
|
||||
else:
|
||||
pass
|
||||
|
||||
elif self.download_name.endswith('.img') \
|
||||
or self.download_name.endswith('.img.gz'):
|
||||
self.image_name = self.download_name.split('.img')[0]
|
||||
self.image = self.download_file_name
|
||||
|
||||
else:
|
||||
raise IOError('Unknown image format for download %s' % (self.download_name))
|
||||
|
||||
def _register(self):
|
||||
if self.kernel:
|
||||
LOG.info('Adding kernel %s to glance.', self.kernel)
|
||||
params = {'TOKEN': self.token, 'IMAGE_NAME': self.image_name}
|
||||
cmd = {'cmd': Image.KERNEL_FORMAT}
|
||||
with open(self.kernel) as file_:
|
||||
res = utils.execute_template(cmd,
|
||||
params=params, stdin_fh=file_,
|
||||
close_stdin=True)
|
||||
self.kernel_id = res[0][0].split(':')[1].strip()
|
||||
|
||||
if self.initrd:
|
||||
LOG.info('Adding ramdisk %s to glance.', self.initrd)
|
||||
params = {'TOKEN': self.token, 'IMAGE_NAME': self.image_name}
|
||||
cmd = {'cmd': Image.INITRD_FORMAT}
|
||||
with open(self.initrd) as file_:
|
||||
res = utils.execute_template(cmd, params=params,
|
||||
stdin_fh=file_, close_stdin=True)
|
||||
self.initrd_id = res[0][0].split(':')[1].strip()
|
||||
|
||||
LOG.info('Adding image %s to glance.', self.image_name)
|
||||
params = {'TOKEN': self.token, 'IMAGE_NAME': self.image_name, \
|
||||
'KERNEL_ID': self.kernel_id, 'INITRD_ID': self.initrd_id}
|
||||
cmd = {'cmd': Image.IMAGE_FORMAT}
|
||||
with open(self.image) as file_:
|
||||
utils.execute_template(cmd, params=params,
|
||||
stdin_fh=file_, close_stdin=True)
|
||||
|
||||
def _cleanup(self):
|
||||
if self.tmp_folder:
|
||||
shell.deldir(self.tmp_folder)
|
||||
shell.unlink(self.download_file_name)
|
||||
|
||||
def _generate_image_name(self, name):
|
||||
return name.replace('.tar.gz', '.img').replace('.tgz', '.img')\
|
||||
.replace('.img.gz', '.img')
|
||||
|
||||
def install(self):
|
||||
possible_name = self._generate_image_name(self.download_name)
|
||||
if not self.registry.has_image(possible_name):
|
||||
try:
|
||||
self._download()
|
||||
self._unpack()
|
||||
if not self.registry.has_image(self.image_name + '.img'):
|
||||
self._register()
|
||||
finally:
|
||||
self._cleanup()
|
||||
else:
|
||||
LOG.warn("You already seem to have image named [%s], skipping that install..." % (possible_name))
|
||||
|
||||
|
||||
class ImageRegistry:
|
||||
|
||||
CMD = ['glance', '-A', '%TOKEN%', 'details']
|
||||
|
||||
def __init__(self, token):
|
||||
self._token = token
|
||||
self._info = {}
|
||||
self._load()
|
||||
|
||||
def _parse(self, text):
|
||||
current = {}
|
||||
|
||||
for line in text.split(os.linesep):
|
||||
if not line:
|
||||
continue
|
||||
|
||||
if line.startswith("==="):
|
||||
if 'id' in current:
|
||||
id_ = current['id']
|
||||
del(current['id'])
|
||||
self._info[id_] = current
|
||||
current = {}
|
||||
else:
|
||||
l = line.split(':', 1)
|
||||
current[l[0].strip().lower()] = l[1].strip().replace('"', '')
|
||||
|
||||
def _load(self):
|
||||
LOG.info('Loading current glance image information.')
|
||||
params = {'TOKEN': self._token}
|
||||
cmd = {'cmd': ImageRegistry.CMD}
|
||||
res = utils.execute_template(cmd, params=params)
|
||||
self._parse(res[0][0])
|
||||
|
||||
def has_image(self, image):
|
||||
return image in self.get_image_names()
|
||||
|
||||
def get_image_names(self):
|
||||
return [self._info[k]['name'] for k in self._info.keys()]
|
||||
|
||||
def __getitem__(self, id_):
|
||||
return self._info[id_]
|
||||
|
||||
|
||||
class ImageCreationService:
|
||||
def __init__(self, cfg, pw_gen):
|
||||
self.cfg = cfg
|
||||
self.pw_gen = pw_gen
|
||||
|
||||
def _get_token(self):
|
||||
LOG.info("Fetching your keystone admin token so that we can perform image uploads.")
|
||||
|
||||
key_params = keystone.get_shared_params(self.cfg, self.pw_gen)
|
||||
keystone_service_url = key_params['SERVICE_ENDPOINT']
|
||||
keystone_token_url = "%s/tokens" % (keystone_service_url)
|
||||
|
||||
# form the post json data
|
||||
data = json.dumps(
|
||||
{
|
||||
"auth":
|
||||
{
|
||||
"passwordCredentials":
|
||||
{
|
||||
"username": key_params['ADMIN_USER_NAME'],
|
||||
"password": key_params['ADMIN_PASSWORD'],
|
||||
},
|
||||
"tenantName": key_params['ADMIN_TENANT_NAME'],
|
||||
}
|
||||
})
|
||||
|
||||
# Prepare the request
|
||||
request = urllib2.Request(keystone_token_url)
|
||||
|
||||
# Post body
|
||||
request.add_data(data)
|
||||
|
||||
# Content type
|
||||
request.add_header('Content-Type', 'application/json')
|
||||
|
||||
# Make the request
|
||||
LOG.info("Getting your token from url [%s], please wait..." % (keystone_token_url))
|
||||
LOG.debug("With post json data %s" % (data))
|
||||
response = urllib2.urlopen(request)
|
||||
|
||||
token = json.loads(response.read())
|
||||
if (not token or not type(token) is dict or
|
||||
not token.get('access') or not type(token.get('access')) is dict or
|
||||
not token.get('access').get('token') or not type(token.get('access').get('token')) is dict or
|
||||
not token.get('access').get('token').get('id')):
|
||||
msg = "Response from url [%s] did not match expected json format." % (keystone_token_url)
|
||||
raise IOError(msg)
|
||||
|
||||
# Basic checks passed, extract it!
|
||||
tok = token['access']['token']['id']
|
||||
LOG.debug("Got token %s" % (tok))
|
||||
return tok
|
||||
|
||||
def install(self):
|
||||
urls = list()
|
||||
token = None
|
||||
LOG.info("Setting up any specified images in glance.")
|
||||
|
||||
# Extract the urls from the config
|
||||
try:
|
||||
flat_urls = self.cfg.getdefaulted('img', 'image_urls', [])
|
||||
expanded_urls = [x.strip() for x in flat_urls.split(',')]
|
||||
for url in expanded_urls:
|
||||
if url:
|
||||
urls.append(url)
|
||||
except(ConfigParser.Error):
|
||||
LOG.warn("No image configuration keys found, skipping glance image install!")
|
||||
|
||||
# Install them in glance
|
||||
am_installed = 0
|
||||
if urls:
|
||||
LOG.info("Attempting to download & extract and upload (%s) images." % (", ".join(urls)))
|
||||
token = self._get_token()
|
||||
for url in urls:
|
||||
try:
|
||||
Image(url, token).install()
|
||||
am_installed += 1
|
||||
except (IOError, tarfile.TarError):
|
||||
LOG.exception('Installing "%s" failed', url)
|
||||
return am_installed
|
353
devstack/image/uploader.py
Normal file
353
devstack/image/uploader.py
Normal file
@ -0,0 +1,353 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import tarfile
|
||||
import urllib2
|
||||
import urlparse
|
||||
|
||||
from devstack import downloader as down
|
||||
from devstack import log
|
||||
from devstack import shell as sh
|
||||
from devstack import utils
|
||||
|
||||
from devstack.components import keystone
|
||||
|
||||
LOG = log.getLogger("devstack.image.creator")
|
||||
|
||||
# These are used when looking inside archives
|
||||
KERNEL_FN_MATCH = re.compile(r"(.*)vmlinuz$", re.I)
|
||||
RAMDISK_FN_MATCH = re.compile(r"(.*)initrd$", re.I)
|
||||
IMAGE_FN_MATCH = re.compile(r"(.*)img$", re.I)
|
||||
|
||||
# Glance commands
|
||||
KERNEL_ADD = ['glance', 'add', '-A', '%TOKEN%', '--silent-upload',
|
||||
'name="%IMAGE_NAME%-kernel"', 'is_public=true', 'container_format=aki',
|
||||
'disk_format=aki']
|
||||
INITRD_ADD = ['glance', 'add', '-A', '%TOKEN%', '--silent-upload',
|
||||
'name="%IMAGE_NAME%-ramdisk"', 'is_public=true', 'container_format=ari',
|
||||
'disk_format=ari']
|
||||
IMAGE_ADD = ['glance', 'add', '-A', '%TOKEN%', '--silent-upload',
|
||||
'name="%IMAGE_NAME%.img"',
|
||||
'is_public=true', 'container_format=ami', 'disk_format=ami',
|
||||
'kernel_id=%KERNEL_ID%', 'ramdisk_id=%INITRD_ID%']
|
||||
DETAILS_SHOW = ['glance', '-A', '%TOKEN%', 'details']
|
||||
|
||||
# Extensions that tarfile knows how to work with
|
||||
TAR_EXTS = ['.tgz', '.gzip', '.gz', '.bz2', '.tar']
|
||||
|
||||
# Used to attempt to produce a name for images (to see if we already have it)
|
||||
# And to use as the final name...
|
||||
# Reverse sorted so that .tar.gz replaces before .tar (and so on)
|
||||
NAME_CLEANUPS = [
|
||||
'.tar.gz',
|
||||
'.img.gz',
|
||||
'.img',
|
||||
] + TAR_EXTS
|
||||
NAME_CLEANUPS.sort()
|
||||
NAME_CLEANUPS.reverse()
|
||||
|
||||
|
||||
class Unpacker(object):
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def _unpack_tar(self, file_name, file_location, tmp_dir):
|
||||
(root_name, _) = os.path.splitext(file_name)
|
||||
kernel_fn = None
|
||||
ramdisk_fn = None
|
||||
root_img_fn = None
|
||||
with contextlib.closing(tarfile.open(file_location, 'r')) as tfh:
|
||||
for tmemb in tfh.getmembers():
|
||||
fn = tmemb.name
|
||||
if KERNEL_FN_MATCH.match(fn):
|
||||
kernel_fn = fn
|
||||
LOG.debug("Found kernel: %r" % (fn))
|
||||
elif RAMDISK_FN_MATCH.match(fn):
|
||||
ramdisk_fn = fn
|
||||
LOG.debug("Found ram disk: %r" % (fn))
|
||||
elif IMAGE_FN_MATCH.match(fn):
|
||||
root_img_fn = fn
|
||||
LOG.debug("Found root image: %r" % (fn))
|
||||
else:
|
||||
LOG.debug("Unknown member %r - skipping" % (fn))
|
||||
if not root_img_fn:
|
||||
msg = "Image %r has no root image member" % (file_name)
|
||||
raise RuntimeError(msg)
|
||||
extract_dir = sh.joinpths(tmp_dir, root_name)
|
||||
sh.mkdir(extract_dir)
|
||||
LOG.info("Extracting %r to %r", file_location, extract_dir)
|
||||
with contextlib.closing(tarfile.open(file_location, 'r')) as tfh:
|
||||
tfh.extractall(extract_dir)
|
||||
locations = dict()
|
||||
if kernel_fn:
|
||||
locations['kernel'] = sh.joinpths(extract_dir, kernel_fn)
|
||||
if ramdisk_fn:
|
||||
locations['ramdisk'] = sh.joinpths(extract_dir, ramdisk_fn)
|
||||
locations['image'] = sh.joinpths(extract_dir, root_img_fn)
|
||||
return locations
|
||||
|
||||
def _unpack_image(self, file_name, file_location, tmp_dir):
|
||||
locations = dict()
|
||||
locations['image'] = file_location
|
||||
return locations
|
||||
|
||||
def unpack(self, file_name, file_location, tmp_dir):
|
||||
(_, fn_ext) = os.path.splitext(file_name)
|
||||
fn_ext = fn_ext.lower()
|
||||
if fn_ext in TAR_EXTS:
|
||||
return self._unpack_tar(file_name, file_location, tmp_dir)
|
||||
elif fn_ext in ['.img']:
|
||||
return self._unpack_image(file_name, file_location, tmp_dir)
|
||||
else:
|
||||
msg = "Currently we do not know how to unpack %r" % (file_name)
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
class Image(object):
|
||||
|
||||
def __init__(self, url, token):
|
||||
self.url = url
|
||||
self.token = token
|
||||
self._registry = Registry(token)
|
||||
|
||||
def _register(self, image_name, locations):
|
||||
|
||||
# Upload the kernel, if we have one
|
||||
kernel = locations.get('kernel')
|
||||
kernel_id = ''
|
||||
if kernel:
|
||||
LOG.info('Adding kernel %r to glance.', kernel)
|
||||
params = {'TOKEN': self.token, 'IMAGE_NAME': image_name}
|
||||
cmd = {'cmd': KERNEL_ADD}
|
||||
with open(kernel, 'r') as fh:
|
||||
res = utils.execute_template(cmd,
|
||||
params=params, stdin_fh=fh,
|
||||
close_stdin=True)
|
||||
if res:
|
||||
(stdout, _) = res[0]
|
||||
kernel_id = stdout.split(':')[1].strip()
|
||||
|
||||
# Upload the ramdisk, if we have one
|
||||
initrd = locations.get('ramdisk')
|
||||
initrd_id = ''
|
||||
if initrd:
|
||||
LOG.info('Adding ramdisk %r to glance.', initrd)
|
||||
params = {'TOKEN': self.token, 'IMAGE_NAME': image_name}
|
||||
cmd = {'cmd': INITRD_ADD}
|
||||
with open(initrd, 'r') as fh:
|
||||
res = utils.execute_template(cmd,
|
||||
params=params, stdin_fh=fh,
|
||||
close_stdin=True)
|
||||
if res:
|
||||
(stdout, _) = res[0]
|
||||
initrd_id = stdout.split(':')[1].strip()
|
||||
|
||||
# Upload the root, we must have one...
|
||||
img_id = ''
|
||||
root_image = locations['image']
|
||||
LOG.info('Adding image %r to glance.', root_image)
|
||||
params = {'TOKEN': self.token, 'IMAGE_NAME': image_name,
|
||||
'KERNEL_ID': kernel_id, 'INITRD_ID': initrd_id}
|
||||
cmd = {'cmd': IMAGE_ADD}
|
||||
with open(root_image, 'r') as fh:
|
||||
res = utils.execute_template(cmd,
|
||||
params=params, stdin_fh=fh,
|
||||
close_stdin=True)
|
||||
if res:
|
||||
(stdout, _) = res[0]
|
||||
img_id = stdout.split(':')[1].strip()
|
||||
|
||||
return img_id
|
||||
|
||||
def _generate_img_name(self, url_fn):
|
||||
name = url_fn
|
||||
for look_for in NAME_CLEANUPS:
|
||||
name = name.replace(look_for, '')
|
||||
return name
|
||||
|
||||
def _generate_check_names(self, url_fn):
|
||||
name_checks = list()
|
||||
name_checks.append(url_fn)
|
||||
name = url_fn
|
||||
for look_for in NAME_CLEANUPS:
|
||||
name = name.replace(look_for, '')
|
||||
name_checks.append(name)
|
||||
name_checks.append("%s.img" % (name))
|
||||
name_checks.append("%s-img" % (name))
|
||||
name_checks.append(name)
|
||||
name_checks.append("%s.img" % (name))
|
||||
name_checks.append("%s-img" % (name))
|
||||
name_checks.append(self._generate_img_name(url_fn))
|
||||
return set(name_checks)
|
||||
|
||||
def _extract_url_fn(self):
|
||||
pieces = urlparse.urlparse(self.url)
|
||||
return sh.basename(pieces.path)
|
||||
|
||||
def install(self):
|
||||
url_fn = self._extract_url_fn()
|
||||
if not url_fn:
|
||||
msg = "Can not determine file name from url: %r" % (self.url)
|
||||
raise RuntimeError(msg)
|
||||
check_names = self._generate_check_names(url_fn)
|
||||
found_name = False
|
||||
for name in check_names:
|
||||
if not name:
|
||||
continue
|
||||
LOG.debug("Checking if you already have an image named %r" % (name))
|
||||
if self._registry.has_image(name):
|
||||
LOG.warn("You already 'seem' to have image named %r, skipping its install..." % (name))
|
||||
found_name = True
|
||||
break
|
||||
if not found_name:
|
||||
with utils.tempdir() as tdir:
|
||||
fetch_fn = sh.joinpths(tdir, url_fn)
|
||||
down.UrlLibDownloader(self.url, fetch_fn).download()
|
||||
locations = Unpacker().unpack(url_fn, fetch_fn, tdir)
|
||||
tgt_image_name = self._generate_img_name(url_fn)
|
||||
self._register(tgt_image_name, locations)
|
||||
return tgt_image_name
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class Registry:
|
||||
|
||||
def __init__(self, token):
|
||||
self.token = token
|
||||
self._info = {}
|
||||
self._loaded = False
|
||||
|
||||
def _parse(self, text):
|
||||
current = {}
|
||||
for line in text.splitlines():
|
||||
if not line:
|
||||
continue
|
||||
if line.startswith("==="):
|
||||
if 'id' in current:
|
||||
id_ = current['id']
|
||||
del(current['id'])
|
||||
self._info[id_] = current
|
||||
current = {}
|
||||
else:
|
||||
l = line.split(':', 1)
|
||||
current[l[0].strip().lower()] = l[1].strip().replace('"', '')
|
||||
|
||||
def _load(self):
|
||||
if self._loaded:
|
||||
return
|
||||
LOG.info('Loading current glance image information.')
|
||||
params = {'TOKEN': self.token}
|
||||
cmd = {'cmd': DETAILS_SHOW}
|
||||
res = utils.execute_template(cmd, params=params)
|
||||
if res:
|
||||
(stdout, _) = res[0]
|
||||
self._parse(stdout)
|
||||
self._loaded = True
|
||||
|
||||
def has_image(self, image):
|
||||
return image in self.get_image_names()
|
||||
|
||||
def get_image_names(self):
|
||||
self._load()
|
||||
return [self._info[k]['name'] for k in self._info.keys()]
|
||||
|
||||
|
||||
class Service:
|
||||
def __init__(self, cfg, pw_gen):
|
||||
self.cfg = cfg
|
||||
self.pw_gen = pw_gen
|
||||
|
||||
def _get_token(self):
|
||||
LOG.info("Fetching your keystone admin token so that we can perform image uploads/detail calls.")
|
||||
|
||||
key_params = keystone.get_shared_params(self.cfg, self.pw_gen)
|
||||
keystone_service_url = key_params['SERVICE_ENDPOINT']
|
||||
keystone_token_url = "%s/tokens" % (keystone_service_url)
|
||||
|
||||
# form the post json data
|
||||
data = json.dumps(
|
||||
{
|
||||
"auth":
|
||||
{
|
||||
"passwordCredentials":
|
||||
{
|
||||
"username": key_params['ADMIN_USER_NAME'],
|
||||
"password": key_params['ADMIN_PASSWORD'],
|
||||
},
|
||||
"tenantName": key_params['ADMIN_TENANT_NAME'],
|
||||
}
|
||||
})
|
||||
|
||||
# Prepare the request
|
||||
request = urllib2.Request(keystone_token_url)
|
||||
|
||||
# Post body
|
||||
request.add_data(data)
|
||||
|
||||
# Content type
|
||||
request.add_header('Content-Type', 'application/json')
|
||||
|
||||
# Make the request
|
||||
LOG.info("Getting your token from url [%s], please wait..." % (keystone_token_url))
|
||||
LOG.debug("With post json data %s" % (data))
|
||||
response = urllib2.urlopen(request)
|
||||
|
||||
token = json.loads(response.read())
|
||||
|
||||
# TODO is there a better way to validate???
|
||||
if (not token or not type(token) is dict or
|
||||
not token.get('access') or not type(token.get('access')) is dict or
|
||||
not token.get('access').get('token') or not type(token.get('access').get('token')) is dict or
|
||||
not token.get('access').get('token').get('id')):
|
||||
msg = "Response from url [%s] did not match expected json format." % (keystone_token_url)
|
||||
raise IOError(msg)
|
||||
|
||||
# Basic checks passed, extract it!
|
||||
tok = token['access']['token']['id']
|
||||
LOG.debug("Got token %s" % (tok))
|
||||
return tok
|
||||
|
||||
def install(self):
|
||||
LOG.info("Setting up any specified images in glance.")
|
||||
|
||||
# Extract the urls from the config
|
||||
urls = list()
|
||||
flat_urls = self.cfg.getdefaulted('img', 'image_urls', [])
|
||||
expanded_urls = [x.strip() for x in flat_urls.split(',')]
|
||||
for url in expanded_urls:
|
||||
if len(url):
|
||||
urls.append(url)
|
||||
|
||||
# Install them in glance
|
||||
am_installed = 0
|
||||
if urls:
|
||||
LOG.info("Attempting to download & extract and upload (%s) images." % (", ".join(urls)))
|
||||
token = self._get_token()
|
||||
for url in urls:
|
||||
try:
|
||||
name = Image(url, token).install()
|
||||
if name:
|
||||
LOG.info("Installed image named %r" % (name))
|
||||
am_installed += 1
|
||||
except (IOError, tarfile.TarError) as e:
|
||||
LOG.exception('Installing %r failed due to: %s', url, e)
|
||||
return am_installed
|
@ -16,7 +16,6 @@
|
||||
|
||||
from devstack import exceptions as excp
|
||||
from devstack import log as logging
|
||||
from devstack import settings
|
||||
from devstack import shell as sh
|
||||
from devstack import utils
|
||||
|
||||
@ -42,7 +41,8 @@ _DEAD = 'DEAD'
|
||||
_ALIVE = 'ALIVE'
|
||||
|
||||
# Alive wait time, just a sleep we put into so that the service can start up
|
||||
WAIT_ALIVE_TIME = settings.WAIT_ALIVE_SECS
|
||||
# FIXME: take from config...
|
||||
WAIT_ALIVE_TIME = 5
|
||||
|
||||
|
||||
def _get_virt_lib():
|
||||
|
@ -56,8 +56,9 @@ def parse():
|
||||
action="store",
|
||||
type="string",
|
||||
dest="persona_fn",
|
||||
default='conf/personas/devstack.sh.yaml',
|
||||
metavar="FILE",
|
||||
help="required persona yaml file to apply")
|
||||
help="required persona yaml file to apply (default: %default)")
|
||||
base_group.add_option("-a", "--action",
|
||||
action="store",
|
||||
type="string",
|
||||
|
@ -31,10 +31,11 @@ class Packager(object):
|
||||
def install(self, pkg):
|
||||
raise NotImplementedError()
|
||||
|
||||
def remove_batch(self, pkgs):
|
||||
if not self.keep_packages:
|
||||
return self._remove_batch(pkgs)
|
||||
return list()
|
||||
def remove(self, pkg):
|
||||
if self.keep_packages:
|
||||
return False
|
||||
else:
|
||||
return self._remove(pkg)
|
||||
|
||||
def pre_install(self, pkgs, params=None):
|
||||
for info in pkgs:
|
||||
@ -52,5 +53,5 @@ class Packager(object):
|
||||
info['name'])
|
||||
utils.execute_template(*cmds, params=params)
|
||||
|
||||
def _remove_batch(self, pkgs):
|
||||
def _remove(self, pkg):
|
||||
raise NotImplementedError()
|
||||
|
@ -57,28 +57,19 @@ class AptPackager(pack.Packager):
|
||||
env_overrides=ENV_ADDITIONS,
|
||||
**kargs)
|
||||
|
||||
def _remove_batch(self, pkgs):
|
||||
cmds = []
|
||||
which_removed = []
|
||||
for info in pkgs:
|
||||
name = info['name']
|
||||
removable = info.get('removable', True)
|
||||
def _remove(self, pkg):
|
||||
removable = pkg.get('removable', True)
|
||||
if not removable:
|
||||
continue
|
||||
if self._remove_special(name, info):
|
||||
which_removed.append(name)
|
||||
continue
|
||||
pkg_full = self._format_pkg_name(name, info.get("version"))
|
||||
if pkg_full:
|
||||
cmds.append(pkg_full)
|
||||
which_removed.append(name)
|
||||
if cmds:
|
||||
cmd = APT_DO_REMOVE + cmds
|
||||
return False
|
||||
name = pkg['name']
|
||||
if self._remove_special(name, pkg):
|
||||
return True
|
||||
pkg_full = self._format_pkg_name(name, pkg.get("version"))
|
||||
cmd = APT_DO_REMOVE + [pkg_full]
|
||||
self._execute_apt(cmd)
|
||||
if which_removed and self.auto_remove:
|
||||
cmd = APT_AUTOREMOVE
|
||||
self._execute_apt(cmd)
|
||||
return which_removed
|
||||
if self.auto_remove:
|
||||
self._execute_apt(APT_AUTOREMOVE)
|
||||
return True
|
||||
|
||||
def install(self, pkg):
|
||||
name = pkg['name']
|
||||
@ -86,7 +77,6 @@ class AptPackager(pack.Packager):
|
||||
return
|
||||
else:
|
||||
pkg_full = self._format_pkg_name(name, pkg.get("version"))
|
||||
if pkg_full:
|
||||
cmd = APT_INSTALL + [pkg_full]
|
||||
self._execute_apt(cmd)
|
||||
|
||||
|
@ -58,25 +58,19 @@ class YumPackager(pack.Packager):
|
||||
if self._install_special(name, pkg):
|
||||
return
|
||||
else:
|
||||
full_pkg_name = self._format_pkg_name(name, pkg.get("version"))
|
||||
cmd = YUM_INSTALL + [full_pkg_name]
|
||||
pkg_full = self._format_pkg_name(name, pkg.get("version"))
|
||||
cmd = YUM_INSTALL + [pkg_full]
|
||||
self._execute_yum(cmd)
|
||||
|
||||
def _remove_batch(self, pkgs):
|
||||
pkg_full_names = []
|
||||
which_removed = []
|
||||
for info in pkgs:
|
||||
name = info['name']
|
||||
removable = info.get('removable', True)
|
||||
def _remove(self, pkg):
|
||||
removable = pkg.get('removable', True)
|
||||
if not removable:
|
||||
continue
|
||||
if self._remove_special(name, info):
|
||||
which_removed.append(name)
|
||||
return False
|
||||
name = pkg['name']
|
||||
if self._remove_special(name, pkg):
|
||||
return True
|
||||
else:
|
||||
full_pkg_name = self._format_pkg_name(name, info.get("version"))
|
||||
pkg_full_names.append(full_pkg_name)
|
||||
which_removed.append(name)
|
||||
if pkg_full_names:
|
||||
cmd = YUM_REMOVE + pkg_full_names
|
||||
pkg_full = self._format_pkg_name(name, pkg.get("version"))
|
||||
cmd = YUM_REMOVE + [pkg_full]
|
||||
self._execute_yum(cmd)
|
||||
return which_removed
|
||||
return True
|
||||
|
@ -24,34 +24,37 @@ PIP_UNINSTALL_CMD_OPTS = ['-y', '-q']
|
||||
PIP_INSTALL_CMD_OPTS = ['-q']
|
||||
|
||||
|
||||
def _make_pip_name(name, version):
|
||||
if version is None:
|
||||
return str(name)
|
||||
return "%s==%s" % (name, version)
|
||||
|
||||
|
||||
def install(pip, distro):
|
||||
name = pip['name']
|
||||
root_cmd = distro.get_command('pip')
|
||||
LOG.audit("Installing python package (%s) using pip command (%s)" % (name, root_cmd))
|
||||
name_full = name
|
||||
version = pip.get('version')
|
||||
if version is not None:
|
||||
name_full += "==" + str(version)
|
||||
name_full = _make_pip_name(name, pip.get('version'))
|
||||
real_cmd = [root_cmd, 'install'] + PIP_INSTALL_CMD_OPTS
|
||||
options = pip.get('options')
|
||||
if options is not None:
|
||||
LOG.debug("Using pip options: %s" % (str(options)))
|
||||
if options:
|
||||
LOG.debug("Using pip options: %s" % (options))
|
||||
real_cmd += [str(options)]
|
||||
real_cmd += [name_full]
|
||||
sh.execute(*real_cmd, run_as_root=True)
|
||||
|
||||
|
||||
def uninstall_batch(pips, distro, skip_errors=True):
|
||||
names = set([p['name'] for p in pips])
|
||||
def uninstall(pip, distro, skip_errors=True):
|
||||
root_cmd = distro.get_command('pip')
|
||||
for name in names:
|
||||
try:
|
||||
LOG.debug("Uninstalling python package (%s)" % (name))
|
||||
cmd = [root_cmd, 'uninstall'] + PIP_UNINSTALL_CMD_OPTS + [str(name)]
|
||||
# Versions don't seem to matter here...
|
||||
name = _make_pip_name(pip['name'], None)
|
||||
LOG.audit("Uninstalling python package (%s) using pip command (%s)" % (name, root_cmd))
|
||||
cmd = [root_cmd, 'uninstall'] + PIP_UNINSTALL_CMD_OPTS + [name]
|
||||
sh.execute(*cmd, run_as_root=True)
|
||||
except excp.ProcessExecutionError:
|
||||
if skip_errors:
|
||||
LOG.warn(("Ignoring execution error that occured when uninstalling pip %s!"
|
||||
LOG.debug(("Ignoring execution error that occured when uninstalling pip %s!"
|
||||
" (this may be ok if it was uninstalled by a previous component)") % (name))
|
||||
else:
|
||||
raise
|
||||
|
@ -163,12 +163,12 @@ class ActionRunner(object):
|
||||
cls_kvs = dict()
|
||||
cls_kvs['runner'] = self
|
||||
cls_kvs['component_dir'] = sh.joinpths(root_dir, c)
|
||||
cls_kvs['subsystem_info'] = my_info.pop('subsystems', dict())
|
||||
cls_kvs['subsystem_info'] = my_info.get('subsystems') or dict()
|
||||
cls_kvs['all_instances'] = instances
|
||||
cls_kvs['name'] = c
|
||||
cls_kvs['keep_old'] = self.keep_old
|
||||
cls_kvs['desired_subsystems'] = set(desired_subsystems.get(c, list()))
|
||||
cls_kvs['options'] = set(component_opts.get(c, list()))
|
||||
cls_kvs['desired_subsystems'] = desired_subsystems.get(c) or set()
|
||||
cls_kvs['options'] = component_opts.get(c) or dict()
|
||||
# The above is not overrideable...
|
||||
for (k, v) in my_info.items():
|
||||
if k not in cls_kvs:
|
||||
|
@ -181,7 +181,7 @@ class ForkRunner(base.RunnerBase):
|
||||
trace_info[STDOUT_FN] = stdoutfn
|
||||
trace_info[ARGS] = json.dumps(program_args)
|
||||
tracefn = self._do_trace(fn_name, trace_info)
|
||||
LOG.info("Forking [%s] by running command [%s]" % (app_name, program))
|
||||
LOG.debug("Forking [%s] by running command [%s]" % (app_name, program))
|
||||
with sh.Rooted(ROOT_GO):
|
||||
self._fork_start(program, appdir, pidfile, stdoutfn, stderrfn, *program_args)
|
||||
return tracefn
|
||||
|
@ -17,7 +17,6 @@
|
||||
import json
|
||||
import re
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
from devstack import date
|
||||
from devstack import exceptions as excp
|
||||
@ -62,9 +61,6 @@ SCREEN_KILLER = ['screen', '-X', '-S', '%SCREEN_ID%', 'quit']
|
||||
SCREEN_SOCKET_DIR_NAME = "devstack-screen-sockets"
|
||||
SCREEN_SOCKET_PERM = 0700
|
||||
|
||||
# Used to wait until started before we can run the actual start cmd
|
||||
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
|
||||
|
||||
# Run screen as root?
|
||||
ROOT_GO = True
|
||||
|
||||
@ -76,6 +72,7 @@ class ScreenRunner(base.RunnerBase):
|
||||
def __init__(self, cfg, component_name, trace_dir):
|
||||
base.RunnerBase.__init__(self, cfg, component_name, trace_dir)
|
||||
self.socket_dir = sh.joinpths(tempfile.gettempdir(), SCREEN_SOCKET_DIR_NAME)
|
||||
self.wait_time = max(self.cfg.getint('default', 'service_wait_seconds'), 1)
|
||||
|
||||
def stop(self, app_name):
|
||||
trace_fn = tr.trace_fn(self.trace_dir, SCREEN_TEMPL % (app_name))
|
||||
@ -97,7 +94,7 @@ class ScreenRunner(base.RunnerBase):
|
||||
mp = dict()
|
||||
mp['SESSION_NAME'] = session_id
|
||||
mp['NAME'] = app_name
|
||||
LOG.info("Stopping program running in session [%s] in window named [%s]." % (session_id, app_name))
|
||||
LOG.debug("Stopping program running in session [%s] in window named [%s]." % (session_id, app_name))
|
||||
kill_cmd = self._gen_cmd(CMD_KILL, mp)
|
||||
sh.execute(*kill_cmd,
|
||||
shell=True,
|
||||
@ -155,14 +152,14 @@ class ScreenRunner(base.RunnerBase):
|
||||
return sessions[0]
|
||||
|
||||
def _do_screen_init(self):
|
||||
LOG.info("Creating a new screen session named [%s]" % (SESSION_NAME))
|
||||
LOG.debug("Creating a new screen session named [%s]" % (SESSION_NAME))
|
||||
session_init_cmd = self._gen_cmd(SESSION_INIT)
|
||||
sh.execute(*session_init_cmd,
|
||||
shell=True,
|
||||
run_as_root=ROOT_GO,
|
||||
env_overrides=self._get_env())
|
||||
LOG.info("Waiting %s seconds before we attempt to set the title bar for that session." % (WAIT_ONLINE_TO))
|
||||
time.sleep(WAIT_ONLINE_TO)
|
||||
LOG.debug("Waiting %s seconds before we attempt to set the title bar for that session." % (self.wait_time))
|
||||
sh.sleep(self.wait_time)
|
||||
bar_init_cmd = self._gen_cmd(BAR_INIT)
|
||||
sh.execute(*bar_init_cmd,
|
||||
shell=True,
|
||||
@ -177,13 +174,13 @@ class ScreenRunner(base.RunnerBase):
|
||||
mp['NAME'] = prog_name
|
||||
mp['CMD'] = run_cmd
|
||||
init_cmd = self._gen_cmd(CMD_INIT, mp)
|
||||
LOG.info("Creating a new screen window named [%s] in session [%s]" % (prog_name, session))
|
||||
LOG.debug("Creating a new screen window named [%s] in session [%s]" % (prog_name, session))
|
||||
sh.execute(*init_cmd,
|
||||
shell=True,
|
||||
run_as_root=ROOT_GO,
|
||||
env_overrides=self._get_env())
|
||||
LOG.info("Waiting %s seconds before we attempt to run command [%s] in that window." % (WAIT_ONLINE_TO, run_cmd))
|
||||
time.sleep(WAIT_ONLINE_TO)
|
||||
LOG.debug("Waiting %s seconds before we attempt to run command [%s] in that window." % (self.wait_time, run_cmd))
|
||||
sh.sleep(self.wait_time)
|
||||
start_cmd = self._gen_cmd(CMD_START, mp)
|
||||
sh.execute(*start_cmd,
|
||||
shell=True,
|
||||
|
@ -63,7 +63,7 @@ class UpstartRunner(base.RunnerBase):
|
||||
if component_event in self.events:
|
||||
LOG.debug("Already emitted event: %s" % (component_event))
|
||||
else:
|
||||
LOG.info("About to emit event: %s" % (component_event))
|
||||
LOG.debug("About to emit event: %s" % (component_event))
|
||||
cmd = EMIT_BASE_CMD + [component_event]
|
||||
sh.execute(*cmd, run_as_root=True)
|
||||
self.events.add(component_event)
|
||||
@ -104,7 +104,7 @@ class UpstartRunner(base.RunnerBase):
|
||||
# https://bugs.launchpad.net/upstart/+bug/665022
|
||||
cfg_fn = sh.joinpths(CONF_ROOT, app_name + CONF_EXT)
|
||||
if sh.isfile(cfg_fn):
|
||||
LOG.info("Upstart config file already exists: %s" % (cfg_fn))
|
||||
LOG.debug("Upstart config file already exists: %s" % (cfg_fn))
|
||||
return
|
||||
LOG.debug("Loading upstart template to be used by: %s" % (cfg_fn))
|
||||
(_, contents) = utils.load_template('general', UPSTART_CONF_TMPL)
|
||||
@ -125,7 +125,7 @@ class UpstartRunner(base.RunnerBase):
|
||||
if component_event in self.events:
|
||||
LOG.debug("Already emitted event: %s" % (component_event))
|
||||
else:
|
||||
LOG.info("About to emit event: %s" % (component_event))
|
||||
LOG.debug("About to emit event: %s" % (component_event))
|
||||
cmd = EMIT_BASE_CMD + [component_event]
|
||||
sh.execute(*cmd, run_as_root=True)
|
||||
self.events.add(component_event)
|
||||
|
@ -24,39 +24,6 @@ PROG_NICE_NAME = "DEVSTACKpy"
|
||||
IPV4 = 'IPv4'
|
||||
IPV6 = 'IPv6'
|
||||
|
||||
# How long to wait for a service to startup
|
||||
WAIT_ALIVE_SECS = 5
|
||||
|
||||
# Component names
|
||||
# FIXME: move?? remove??
|
||||
NOVA = "nova"
|
||||
NOVA_CLIENT = 'nova-client'
|
||||
GLANCE = "glance"
|
||||
QUANTUM = "quantum-server"
|
||||
QUANTUM_CLIENT = 'quantum-client'
|
||||
SWIFT = "swift"
|
||||
HORIZON = "horizon"
|
||||
KEYSTONE = "keystone"
|
||||
KEYSTONE_CLIENT = 'keystone-client'
|
||||
DB = "db"
|
||||
RABBIT = "rabbit"
|
||||
NOVNC = 'no-vnc'
|
||||
XVNC = 'xvnc'
|
||||
MELANGE = 'melange'
|
||||
MELANGE_CLIENT = 'melange-client'
|
||||
COMPONENT_NAMES = [
|
||||
NOVA, NOVA_CLIENT,
|
||||
GLANCE,
|
||||
QUANTUM, QUANTUM_CLIENT,
|
||||
SWIFT,
|
||||
HORIZON,
|
||||
KEYSTONE, KEYSTONE_CLIENT,
|
||||
DB,
|
||||
RABBIT,
|
||||
NOVNC,
|
||||
MELANGE, MELANGE_CLIENT,
|
||||
]
|
||||
|
||||
# Different run types supported
|
||||
RUN_TYPE_FORK = "FORK"
|
||||
RUN_TYPE_UPSTART = "UPSTART"
|
||||
|
@ -17,14 +17,17 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import distutils.version
|
||||
import json
|
||||
import netifaces
|
||||
import contextlib
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import distutils.version
|
||||
import netifaces
|
||||
import progressbar
|
||||
import termcolor
|
||||
|
||||
from devstack import colorlog
|
||||
@ -139,6 +142,37 @@ def to_bytes(text):
|
||||
return byte_val
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def progress_bar(name, max_am, reverse=False):
|
||||
widgets = list()
|
||||
widgets.append('%s: ' % (name))
|
||||
widgets.append(progressbar.Percentage())
|
||||
widgets.append(' ')
|
||||
if reverse:
|
||||
widgets.append(progressbar.ReverseBar())
|
||||
else:
|
||||
widgets.append(progressbar.Bar())
|
||||
widgets.append(' ')
|
||||
widgets.append(progressbar.ETA())
|
||||
p_bar = progressbar.ProgressBar(maxval=max_am, widgets=widgets)
|
||||
p_bar.start()
|
||||
try:
|
||||
yield p_bar
|
||||
finally:
|
||||
p_bar.finish()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def tempdir():
|
||||
# This seems like it was only added in python 3.2
|
||||
# Make it since its useful...
|
||||
tdir = tempfile.mkdtemp()
|
||||
try:
|
||||
yield tdir
|
||||
finally:
|
||||
sh.deldir(tdir)
|
||||
|
||||
|
||||
def import_module(module_name, quiet=True):
|
||||
try:
|
||||
__import__(module_name)
|
||||
@ -150,18 +184,6 @@ def import_module(module_name, quiet=True):
|
||||
raise
|
||||
|
||||
|
||||
def load_json(fn):
|
||||
data = sh.load_file(fn)
|
||||
lines = data.splitlines()
|
||||
new_lines = list()
|
||||
for line in lines:
|
||||
if line.lstrip().startswith('#'):
|
||||
continue
|
||||
new_lines.append(line)
|
||||
data = joinlinesep(*new_lines)
|
||||
return json.loads(data)
|
||||
|
||||
|
||||
def versionize(input_version):
|
||||
segments = input_version.split(".")
|
||||
cleaned_segments = list()
|
||||
@ -272,16 +294,6 @@ def joinlinesep(*pieces):
|
||||
return os.linesep.join(pieces)
|
||||
|
||||
|
||||
def service_enabled(name, components, empty_true=True):
|
||||
if not components and empty_true:
|
||||
return True
|
||||
if not components:
|
||||
return False
|
||||
if name in components:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def param_replace_list(values, replacements, ignore_missing=False):
|
||||
new_values = list()
|
||||
if not values:
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
# Create EC2 credentials for the current user as defined by OS_TENANT_NAME:OS_USERNAME
|
||||
|
||||
set -x
|
||||
ME=`basename $0`
|
||||
|
||||
if [[ -n "$1" ]]; then
|
||||
USERNAME=$1
|
||||
@ -15,11 +15,21 @@ if [[ -n "$2" ]]; then
|
||||
fi
|
||||
|
||||
# Find the other rc files
|
||||
RC_DIR=../
|
||||
CORE_RC="os-core.rc"
|
||||
GEN_CMD="stack -a install"
|
||||
|
||||
if [ ! -f $CORE_RC ];
|
||||
then
|
||||
echo "File '$CORE_RC' needed before running '$ME'"
|
||||
echo "Please run './$GEN_CMD' to get this file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Now we start showing whats happening
|
||||
set -x
|
||||
|
||||
# Get user configuration
|
||||
source $RC_DIR/$CORE_RC
|
||||
source $CORE_RC
|
||||
|
||||
# Set the ec2 url so euca2ools works
|
||||
export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }')
|
60
prepare.sh
Executable file
60
prepare.sh
Executable file
@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
echo "This script must be run as root!" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# This should follow what is on the following website.
|
||||
URL="https://github.com/yahoo/Openstack-DevstackPy/wiki/Simple-Setup"
|
||||
ME=`basename $0`
|
||||
|
||||
if [[ `cat /etc/issue | grep -i "ubuntu"` ]] ; then
|
||||
PKGS="git python-pip python-dev python-yaml gcc pep8 pylint python-progressbar python"
|
||||
PIPS="netifaces termcolor"
|
||||
APT="apt-get -y -qq"
|
||||
PIP="pip -q"
|
||||
# Now do it!
|
||||
echo "Preparing DEVSTACKpy for ubuntu."
|
||||
echo "Installing packages: $PKGS"
|
||||
$APT install $PKGS
|
||||
echo "Installing pypi packages: $PIPS"
|
||||
$PIP install netifaces termcolor
|
||||
echo "DEVSTACKpy for ubuntu is ready to rock & roll."
|
||||
elif [[ `cat /etc/issue | grep -i "red hat enterprise.*release.*6.*"` ]] ; then
|
||||
EPEL_RPM="epel-release-6-5.noarch.rpm"
|
||||
PKGS="python-pip gcc python-netifaces git python-pep8 pylint python-progressbar python"
|
||||
PIPS="termcolor pyyaml"
|
||||
PIP="pip-python -q"
|
||||
YUM="yum install -q"
|
||||
WGET="wget -q"
|
||||
# Now do it!
|
||||
echo "Preparing DEVSTACKpy for RHEL 6"
|
||||
echo "Fetching and installing EPEL rpm: $EPEL_RPM"
|
||||
TMP_DIR=`mktemp -d`
|
||||
$WGET http://download.fedoraproject.org/pub/epel/6/i386/$EPEL_RPM -O $TMP_DIR/$EPEL_RPM
|
||||
$YUM install $TMP_DIR/$EPEL_RPM
|
||||
rm -rf $TMP_DIR
|
||||
echo "Installing packages: $PKGS"
|
||||
$YUM install $PKGS
|
||||
echo "Installing pypi packages: $PIPS"
|
||||
$PIP install $PIPS
|
||||
echo "DEVSTACKpy for RHEL 6 is ready to rock & roll."
|
||||
elif [[ `cat /etc/issue | grep -i "fedora.*release.*16"` ]] ; then
|
||||
PKGS="python-pip gcc python-netifaces git python-pep8 pylint python-yaml python python-progressbar"
|
||||
PIPS="termcolor"
|
||||
PIP="pip-python -q"
|
||||
YUM="yum install -q"
|
||||
# Now do it!
|
||||
echo "Preparing DEVSTACKpy for Fedora 16"
|
||||
echo "Installing packages: $PKGS"
|
||||
$YUM install $PKGS
|
||||
echo "Installing pypi packages: $PIPS"
|
||||
$PIP install $PIPS
|
||||
echo "DEVSTACKpy for Fedora 16 is ready to rock & roll."
|
||||
else
|
||||
echo "DEVSTACKpy '$ME' is being ran on an unknown distribution."
|
||||
echo "Please update '$URL' when you get it to run. Much appreciated!"
|
||||
fi
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
netifaces
|
||||
termcolor
|
||||
pyyaml # reading data files
|
||||
progressbar
|
||||
|
||||
# Testing
|
||||
nose # for test discovery and console feedback
|
||||
|
Loading…
Reference in New Issue
Block a user