Massive comment cleanup

This commit is contained in:
Joshua Harlow 2012-03-15 23:10:15 -07:00
parent d89a219989
commit b69701f0ec
33 changed files with 392 additions and 378 deletions

View File

@ -24,7 +24,7 @@ commands:
stop: ["service", 'mysql', "stop"]
status: ["service", 'mysql', "status"]
restart: ["service", 'mysql', "restart"]
#NOTE: we aren't stopping any sql injection...
# NOTE: we aren't stopping any sql injection...
set_pwd: ['mysql', '--user=%USER%', '--password=%OLD_PASSWORD%', '-e',
"\"USE mysql; UPDATE user SET password=PASSWORD('%NEW_PASSWORD%') WHERE User='%USER%'; FLUSH PRIVILEGES;\""]
create_db: ['mysql', '--user=%USER%', '--password=%PASSWORD%', '-e', 'CREATE DATABASE %DB%;']

View File

@ -55,7 +55,8 @@ BASE_LINK_DIR = "/etc"
class ComponentBase(object):
def __init__(self,
active_subsystems,
subsystems,
subsystem_info,
runner,
component_dir,
all_instances,
@ -63,9 +64,10 @@ class ComponentBase(object):
*args,
**kargs):
self.active_subsystems = active_subsystems
self.desired_subsystems = subsystems
self.instances = all_instances
self.component_name = name
self.subsystem_info = subsystem_info
# The runner has a reference to us, so use a weakref here to
# avoid breaking garbage collection.
@ -87,7 +89,17 @@ class ComponentBase(object):
settings.COMPONENT_CONFIG_DIR)
def verify(self):
pass
# Ensure subsystems are known...
knowns = self.known_subsystems()
for s in self.desired_subsystems:
if s not in knowns:
raise RuntimeError("Unknown subsystem %r requested" % (s))
for s in self.subsystem_info.keys():
if s not in knowns:
raise RuntimeError("Unknown subsystem %r provided" % (s))
def known_subsystems(self):
return list()
def warm_configs(self):
pass
@ -106,7 +118,6 @@ class PkgInstallComponent(ComponentBase):
self.tracewriter = tr.TraceWriter(tr.trace_fn(self.trace_dir,
tr.IN_TRACE))
self.packages = kargs.get('packages', list())
self.subsystems = kargs.get('subsystems', dict())
def _get_download_locations(self):
return list()
@ -117,10 +128,10 @@ class PkgInstallComponent(ComponentBase):
for location_info in locations:
uri_tuple = location_info["uri"]
branch_tuple = location_info.get("branch")
subdir = location_info.get("subdir")
sub_dir = location_info.get("subdir")
target_loc = base_dir
if subdir:
target_loc = sh.joinpths(base_dir, subdir)
if sub_dir:
target_loc = sh.joinpths(base_dir, sub_dir)
branch = None
if branch_tuple:
(cfg_section, cfg_key) = branch_tuple
@ -137,11 +148,11 @@ class PkgInstallComponent(ComponentBase):
raise excp.ConfigException(msg)
self.tracewriter.download_happened(target_loc, uri)
dirs_made = down.download(target_loc, uri, branch)
#ensure this is always added so that
#if a keep old happens then this of course
#won't be recreated, but if u uninstall without keeping old
#then this won't be deleted this time around
#adding it in is harmless and willl make sure its removed
# Here we ensure this is always added so that
# if a keep old happens then this of course
# won't be recreated, but if u uninstall without keeping old
# then this won't be deleted this time around
# adding it in is harmless and will make sure its removed.
dirs_made.append(target_loc)
self.tracewriter.dirs_made(*dirs_made)
return len(locations)
@ -151,11 +162,11 @@ class PkgInstallComponent(ComponentBase):
def _get_packages(self):
pkg_list = list(self.packages)
for name in self.active_subsystems:
if name in self.subsystems:
for name in self.desired_subsystems:
if name in self.subsystem_info:
# Todo handle duplicates/version differences?
LOG.debug("Extending package list with packages for subsystem %s" % (name))
subsystem_pkgs = self.subsystems[name].get('packages', list())
subsystem_pkgs = self.subsystem_info[name].get('packages', list())
pkg_list.extend(subsystem_pkgs)
return pkg_list
@ -230,6 +241,9 @@ class PkgInstallComponent(ComponentBase):
def _configure_symlinks(self):
links = self._get_symlinks()
# This sort happens so that we link in the correct order
# although it might not matter. Either way. We ensure that the right
# order happens. Ie /etc/blah link runs before /etc/blah/blah
link_srcs = sorted(links.keys())
link_srcs.reverse()
for source in link_srcs:
@ -238,8 +252,8 @@ class PkgInstallComponent(ComponentBase):
LOG.info("Symlinking %s => %s", link, source)
self.tracewriter.dirs_made(*sh.symlink(source, link))
self.tracewriter.symlink_made(link)
except OSError:
LOG.warn("Symlink %s => %s already exists.", link, source)
except OSError as e:
LOG.warn("Symlink (%s => %s) error (%s)", link, source, e)
return len(links)
def configure(self):
@ -260,11 +274,11 @@ class PythonInstallComponent(PkgInstallComponent):
def _get_pips(self):
pip_list = list(self.pips)
for name in self.active_subsystems:
if name in self.subsystems:
for name in self.desired_subsystems:
if name in self.subsystem_info:
# Todo handle duplicates/version differences?
LOG.debug("Extending pip list with pips for subsystem %s" % (name))
subsystem_pips = self.subsystems[name].get('pips', list())
subsystem_pips = self.subsystem_info[name].get('pips', list())
pip_list.extend(subsystem_pips)
return pip_list
@ -292,6 +306,8 @@ class PythonInstallComponent(PkgInstallComponent):
py_trace_name = "%s-%s" % (tr.PY_TRACE, name)
py_writer = tr.TraceWriter(tr.trace_fn(self.trace_dir,
py_trace_name))
# Format or json encoding isn't really needed here since this is
# more just for information output/lookup if desired.
py_writer.trace("CMD", " ".join(PY_INSTALL))
py_writer.trace("STDOUT", stdout)
py_writer.trace("STDERR", stderr)
@ -316,10 +332,10 @@ class PkgUninstallComponent(ComponentBase):
def unconfigure(self):
if not self.keep_old:
#TODO this may not be the best solution siance we might
#actually want to remove config files but since most
#config files can be regenerated this should be fine (some
#can not though) so this is why we need to keep them
# TODO this may not be the best solution siance we might
# actually want to remove config files but since most
# config files can be regenerated this should be fine (some
# can not though) so this is why we need to keep them.
self._unconfigure_files()
self._unconfigure_links()
self._unconfigure_runners()

View File

@ -154,11 +154,7 @@ class DBRuntime(comp.EmptyRuntime):
def _get_run_actions(self, act, exception_cls):
dbtype = self.cfg.get("db", "type")
type_actions = self.distro.commands[dbtype]
if type_actions is None:
msg = BASE_ERROR % (act, dbtype)
raise NotImplementedError(msg)
distro_options = self.distro.commands[dbtype]
distro_options = self.distro.get_command(dbtype)
if distro_options is None:
msg = BASE_ERROR % (act, dbtype)
raise NotImplementedError(msg)

View File

@ -30,7 +30,7 @@ from devstack.image import creator
LOG = logging.getLogger("devstack.components.glance")
#config files/sections
# Config files/sections
API_CONF = "glance-api.conf"
REG_CONF = "glance-registry.conf"
API_PASTE_CONF = 'glance-api-paste.ini'
@ -46,33 +46,32 @@ CONFIGS = [API_CONF, REG_CONF, API_PASTE_CONF,
READ_CONFIGS = [API_CONF, REG_CONF, API_PASTE_CONF,
REG_PASTE_CONF, SCRUB_CONF, SCRUB_PASTE_CONF]
#reg, api are here as possible subcomponents
# Reg, api, scrub are here as possible subsystems
GAPI = "api"
GREG = "reg"
GSCR = 'scrub'
#this db will be dropped and created
# This db will be dropped and created
DB_NAME = "glance"
#special subcomponents/options that are used in starting to know that images should be uploaded
NO_IMG_START = "no-image-upload"
# How long to wait before attempting image upload
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
#what to start
# What applications to start
APP_OPTIONS = {
'glance-api': ['--config-file', sh.joinpths('%ROOT%', "etc", API_CONF)],
'glance-registry': ['--config-file', sh.joinpths('%ROOT%', "etc", REG_CONF)],
'glance-scrubber': ['--config-file', sh.joinpths('%ROOT%', "etc", REG_CONF)],
}
#how the subcompoent small name translates to an actual app
# How the subcompoent small name translates to an actual app
SUB_TO_APP = {
GAPI: 'glance-api',
GREG: 'glance-registry',
GSCR: 'glance-scrubber',
}
#subdirs of the downloaded
# Subdirs of the downloaded (we are overriding the original)
CONFIG_DIR = 'etc'
BIN_DIR = 'bin'
@ -82,6 +81,9 @@ class GlanceUninstaller(comp.PythonUninstallComponent):
comp.PythonUninstallComponent.__init__(self, *args, **kargs)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
def known_subsystems(self):
return SUB_TO_APP.keys()
class GlanceInstaller(comp.PythonInstallComponent):
def __init__(self, *args, **kargs):
@ -96,6 +98,9 @@ class GlanceInstaller(comp.PythonInstallComponent):
})
return places
def known_subsystems(self):
return SUB_TO_APP.keys()
def _get_config_files(self):
return list(CONFIGS)
@ -120,12 +125,12 @@ class GlanceInstaller(comp.PythonInstallComponent):
return comp.PythonInstallComponent._get_source_config(self, config_fn)
def _config_adjust(self, contents, name):
#even bother opening??
# Even bother opening??
if name not in READ_CONFIGS:
return contents
#use config parser and
#then extract known configs that
#will need locations/directories/files made (or touched)...
# Use config parser and
# then extract known configs that
# will need locations/directories/files made (or touched)...
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
config.readfp(stream)
@ -134,15 +139,15 @@ class GlanceInstaller(comp.PythonInstallComponent):
if cache_dir:
LOG.info("Ensuring image cache data directory %s exists "\
"(and is empty)" % (cache_dir))
#destroy then recreate the image cache directory
# Destroy then recreate the image cache directory
sh.deldir(cache_dir)
self.tracewriter.dirs_made(*sh.mkdirslist(cache_dir))
if config.get('default', 'default_store') == 'file':
file_dir = config.get('default', 'filesystem_store_datadir')
if file_dir:
LOG.info("Ensuring file system store directory %s exists and is empty." % (file_dir))
#delete existing images
#and recreate the image directory
# Delete existing images
# and recreate the image directory
sh.deldir(file_dir)
self.tracewriter.dirs_made(*sh.mkdirslist(file_dir))
log_filename = config.get('default', 'log_file')
@ -152,22 +157,22 @@ class GlanceInstaller(comp.PythonInstallComponent):
if log_dir:
LOG.info("Ensuring log directory %s exists." % (log_dir))
self.tracewriter.dirs_made(*sh.mkdirslist(log_dir))
#destroy then recreate it (the log file)
# Destroy then recreate it (the log file)
sh.unlink(log_filename)
self.tracewriter.file_touched(sh.touch_file(log_filename))
if config.getboolean('default', 'delayed_delete'):
data_dir = config.get('default', 'scrubber_datadir')
if data_dir:
LOG.info("Ensuring scrubber data dir %s exists and is empty." % (data_dir))
#destroy then recreate the scrubber data directory
# Destroy then recreate the scrubber data directory
sh.deldir(data_dir)
self.tracewriter.dirs_made(*sh.mkdirslist(data_dir))
#nothing modified so just return the original
# Nothing modified so just return the original
return contents
def _get_param_map(self, config_fn):
#this dict will be used to fill in the configuration
#params with actual values
# This dict will be used to fill in the configuration
# params with actual values
mp = dict()
mp['DEST'] = self.app_dir
mp['SYSLOG'] = self.cfg.getboolean("default", "syslog")
@ -182,13 +187,18 @@ class GlanceRuntime(comp.PythonRuntime):
def __init__(self, *args, **kargs):
comp.PythonRuntime.__init__(self, *args, **kargs)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
def known_subsystems(self):
return SUB_TO_APP.keys()
def _get_apps_to_start(self):
apps = [{'name': app_name,
'path': sh.joinpths(self.app_dir, BIN_DIR, app_name),
}
for app_name in APP_OPTIONS.keys()
]
apps = list()
for subsys in self.desired_subsystems:
app = dict()
app['name'] = SUB_TO_APP[subsys]
app['path'] = sh.joinpths(self.bin_dir, app['name'])
apps.append(app)
return apps
def _get_app_options(self, app):
@ -196,7 +206,7 @@ class GlanceRuntime(comp.PythonRuntime):
def post_start(self):
comp.PythonRuntime.post_start(self)
#install any images that need activating...
# Install any images that need activating...
# TODO: make this less cheesy - need to wait till glance goes online
LOG.info("Waiting %s seconds so that glance can start up before image install." % (WAIT_ONLINE_TO))
sh.sleep(WAIT_ONLINE_TO)

View File

@ -76,6 +76,7 @@ class HorizonInstaller(comp.PythonInstallComponent):
return places
def verify(self):
comp.PythonInstallComponent.verify(self)
self._check_ug()
def _get_symlinks(self):

View File

@ -30,32 +30,32 @@ from devstack.components import db
LOG = logging.getLogger("devstack.components.keystone")
#this db will be dropped then created
# This db will be dropped then created
DB_NAME = "keystone"
#subdirs of the git checkout
# Subdirs of the git checkout
BIN_DIR = "bin"
CONFIG_DIR = "etc"
#simple confs
# Simple confs
ROOT_CONF = "keystone.conf"
CATALOG_CONF = 'default_catalog.templates'
LOGGING_CONF = "logging.conf"
LOGGING_SOURCE_FN = 'logging.conf.sample'
CONFIGS = [ROOT_CONF, CATALOG_CONF, LOGGING_CONF]
#this is a special conf
# This is a special conf/init script
MANAGE_DATA_CONF = 'keystone_init.sh'
MANAGE_CMD_ROOT = [sh.joinpths("/", "bin", 'bash')]
MANAGE_ADMIN_USER = 'admin'
MANAGE_DEMO_USER = 'demo'
MANAGE_INVIS_USER = 'invisible_to_admin'
#sync db command
# Sync db command
MANAGE_APP_NAME = 'keystone-manage'
SYNC_DB_CMD = [sh.joinpths('%BINDIR%', MANAGE_APP_NAME), 'db_sync']
#what to start
# What to start
APP_NAME = 'keystone-all'
APP_OPTIONS = {
APP_NAME: ['--config-file', sh.joinpths('%ROOT%', CONFIG_DIR, ROOT_CONF),
@ -64,17 +64,19 @@ APP_OPTIONS = {
}
#used to wait until started before we can run the data setup script
# Used to wait until started before we can run the data setup script
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
#swift template additions
# Swift template additions
# TODO: get rid of these
SWIFT_TEMPL_ADDS = ['catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
'catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
'catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/',
'catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
"catalog.RegionOne.object_store.name = 'Swift Service'"]
#quantum template additions
# Quantum template additions
# TODO: get rid of these
QUANTUM_TEMPL_ADDS = ['catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/',
'catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/',
'catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/',
@ -135,9 +137,9 @@ class KeystoneInstaller(comp.PythonInstallComponent):
def _config_adjust(self, contents, name):
if name == ROOT_CONF:
#use config parser and
#then extract known configs that
#will need locations/directories/files made (or touched)...
# Use config parser and
# then extract known configs that
# ill need locations/directories/files made (or touched)...
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
config.readfp(stream)
@ -148,7 +150,7 @@ class KeystoneInstaller(comp.PythonInstallComponent):
if log_dir:
LOG.info("Ensuring log directory %s exists." % (log_dir))
self.tracewriter.dirs_made(*sh.mkdirslist(log_dir))
#destroy then recreate it (the log file)
# Destroy then recreate it (the log file)
sh.unlink(log_filename)
self.tracewriter.file_touched(sh.touch_file(log_filename))
elif name == CATALOG_CONF:
@ -182,8 +184,8 @@ class KeystoneInstaller(comp.PythonInstallComponent):
get_shared_params(self.cfg, self.pw_gen)
def _get_param_map(self, config_fn):
#these be used to fill in the configuration/cmds +
#params with actual values
# These be used to fill in the configuration/cmds +
# params with actual values
mp = dict()
mp['SERVICE_HOST'] = self.cfg.get('host', 'ip')
mp['DEST'] = self.app_dir
@ -207,9 +209,9 @@ class KeystoneRuntime(comp.PythonRuntime):
def post_start(self):
tgt_fn = sh.joinpths(self.bin_dir, MANAGE_DATA_CONF)
if sh.isfile(tgt_fn):
#still there, run it
#these environment additions are important
#in that they eventually affect how this script runs
# If its still there, run it
# these environment additions are important
# in that they eventually affect how this script runs
LOG.info("Waiting %s seconds so that keystone can start up before running first time init." % (WAIT_ONLINE_TO))
sh.sleep(WAIT_ONLINE_TO)
env = dict()
@ -238,7 +240,7 @@ def get_shared_params(config, pw_gen, service_user_name=None):
mp = dict()
host_ip = config.get('host', 'ip')
#these match what is in keystone_init.sh
# These match what is in keystone_init.sh
mp['SERVICE_TENANT_NAME'] = 'service'
if service_user_name:
mp['SERVICE_USERNAME'] = str(service_user_name)
@ -247,12 +249,12 @@ def get_shared_params(config, pw_gen, service_user_name=None):
mp['ADMIN_TENANT_NAME'] = mp['ADMIN_USER_NAME']
mp['DEMO_TENANT_NAME'] = mp['DEMO_USER_NAME']
#tokens and passwords
# Tokens and passwords
mp['SERVICE_TOKEN'] = pw_gen.get_password("service_token")
mp['ADMIN_PASSWORD'] = pw_gen.get_password('horizon_keystone_admin', length=20)
mp['SERVICE_PASSWORD'] = pw_gen.get_password('service_password')
#components of the auth endpoint
# Components of the auth endpoint
keystone_auth_host = config.getdefaulted('keystone', 'keystone_auth_host', host_ip)
mp['KEYSTONE_AUTH_HOST'] = keystone_auth_host
keystone_auth_port = config.getdefaulted('keystone', 'keystone_auth_port', '35357')
@ -260,7 +262,7 @@ def get_shared_params(config, pw_gen, service_user_name=None):
keystone_auth_proto = config.getdefaulted('keystone', 'keystone_auth_protocol', 'http')
mp['KEYSTONE_AUTH_PROTOCOL'] = keystone_auth_proto
#components of the service endpoint
# Components of the service endpoint
keystone_service_host = config.getdefaulted('keystone', 'keystone_service_host', host_ip)
mp['KEYSTONE_SERVICE_HOST'] = keystone_service_host
keystone_service_port = config.getdefaulted('keystone', 'keystone_service_port', '5000')
@ -268,7 +270,7 @@ def get_shared_params(config, pw_gen, service_user_name=None):
keystone_service_proto = config.getdefaulted('keystone', 'keystone_service_protocol', 'http')
mp['KEYSTONE_SERVICE_PROTOCOL'] = keystone_service_proto
#http/https endpoints
# Uri's of the http/https endpoints
mp['AUTH_ENDPOINT'] = urlunparse((keystone_auth_proto,
"%s:%s" % (keystone_auth_host, keystone_auth_port),
"v2.0", "", "", ""))

View File

@ -28,37 +28,37 @@ from devstack.components import db
LOG = logging.getLogger("devstack.components.melange")
#this db will be dropped then created
# This db will be dropped then created
DB_NAME = 'melange'
#subdirs of the checkout/download
# Subdirs of the checkout/download
BIN_DIR = 'bin'
#configs
# Basic configs
ROOT_CONF = 'melange.conf.sample'
ROOT_CONF_REAL_NAME = 'melange.conf'
CONFIGS = [ROOT_CONF]
CFG_LOC = ['etc', 'melange']
#sensible defaults
# Sensible defaults
DEF_CIDR_RANGE = 'FE-EE-DD-00-00-00/24'
#how we sync melange with the db
# How we sync melange with the db
DB_SYNC_CMD = [
{'cmd': ['%BIN_DIR%/melange-manage', '--config-file=%CFG_FILE%', 'db_sync']},
]
#???
# TODO: ???
CIDR_CREATE_CMD = [
{'cmd': ['melange', 'mac_address_range', 'create', 'cidr', '%CIDR_RANGE%']},
]
#what to start
# What to start
APP_OPTIONS = {
'melange-server': ['--config-file', '%CFG_FILE%'],
}
#subcomponent that specifies we should make the network cidr using melange
# Special option that specifies we should make the network cidr using melange
CREATE_CIDR = "create-cidr"
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
@ -160,8 +160,8 @@ class MelangeRuntime(comp.PythonRuntime):
def post_start(self):
comp.PythonRuntime.post_start(self)
# FIXME: This is a bit of a hack. How do we document "flags" like this?
flags = self.component_opts.get('flags', [])
if CREATE_CIDR in flags or not flags:
flags = []
if CREATE_CIDR in flags:
LOG.info("Waiting %s seconds so that the melange server can start up before cidr range creation." % (WAIT_ONLINE_TO))
sh.sleep(WAIT_ONLINE_TO)
mp = dict()

View File

@ -81,7 +81,7 @@ VG_LVREMOVE_CMD = [
]
# NCPU, NVOL, NAPI ... are here as possible subcomponents of nova
# NCPU, NVOL, NAPI ... are here as possible subsystems of nova
NCPU = "cpu"
NVOL = "vol"
NAPI = "api"
@ -91,7 +91,7 @@ NCERT = "cert"
NSCHED = "sched"
NCAUTH = "cauth"
NXVNC = "xvnc"
SUBCOMPONENTS = [NCPU, NVOL, NAPI,
SUBSYSTEMS = [NCPU, NVOL, NAPI,
NOBJ, NNET, NCERT, NSCHED, NCAUTH, NXVNC]
# What to start
@ -121,17 +121,17 @@ SUB_COMPONENT_NAME_MAP = {
NXVNC: 'nova-xvpvncproxy',
}
#subdirs of the checkout/download
# Subdirs of the checkout/download
BIN_DIR = 'bin'
CONFIG_DIR = "etc"
#network class/driver/manager templs
# Network class/driver/manager templs
QUANTUM_MANAGER = 'nova.network.quantum.manager.QuantumManager'
QUANTUM_IPAM_LIB = 'nova.network.quantum.melange_ipam_lib'
NET_MANAGER_TEMPLATE = 'nova.network.manager.%s'
FIRE_MANAGER_TEMPLATE = 'nova.virt.libvirt.firewall.%s'
#sensible defaults
# Sensible defaults
DEF_IMAGE_SERVICE = 'nova.image.glance.GlanceImageService'
DEF_SCHEDULER = 'nova.scheduler.simple.SimpleScheduler'
DEF_GLANCE_PORT = 9292
@ -144,11 +144,11 @@ DEF_NET_MANAGER = 'FlatDHCPManager'
DEF_VOL_PREFIX = 'volume-'
DEF_VOL_TEMPL = DEF_VOL_PREFIX + '%08x'
#default virt types
# Default virt types
DEF_VIRT_DRIVER = 'libvirt'
DEF_VIRT_TYPE = 'qemu'
#virt drivers to there connection name
# Virt drivers map -> to there connection name
VIRT_DRIVER_CON_MAP = {
'libvirt': 'libvirt',
'xenserver': 'xenapi',
@ -156,7 +156,7 @@ VIRT_DRIVER_CON_MAP = {
'baremetal': 'baremetal',
}
#only turned on if vswitch enabled
# Only turned on if openvswitch enabled
QUANTUM_OPENSWITCH_OPS = {
'libvirt_vif_type': 'ethernet',
'libvirt_vif_driver': 'nova.virt.libvirt.vif.LibvirtOpenVswitchDriver',
@ -164,10 +164,11 @@ QUANTUM_OPENSWITCH_OPS = {
'quantum_use_dhcp': True,
}
#this is a special conf
# This is a special conf
CLEANER_DATA_CONF = 'nova-clean.sh'
CLEANER_CMD_ROOT = [sh.joinpths("/", "bin", 'bash')]
# FIXME:
#rhel6/fedora libvirt policy
#http://wiki.libvirt.org/page/SSHPolicyKitSetup
#LIBVIRT_POLICY_FN = "/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla"
@ -180,7 +181,7 @@ CLEANER_CMD_ROOT = [sh.joinpths("/", "bin", 'bash')]
#ResultActive=yes
#"""
#xenserver specific defaults
# Xenserver specific defaults
XS_DEF_INTERFACE = 'eth1'
XA_CONNECTION_ADDR = '169.254.0.1'
XS_VNC_ADDR = XA_CONNECTION_ADDR
@ -189,19 +190,19 @@ XA_CONNECTION_PORT = 80
XA_DEF_USER = 'root'
XA_DEF_CONNECTION_URL = urlunparse(('http', "%s:%s" % (XA_CONNECTION_ADDR, XA_CONNECTION_PORT), "", '', '', ''))
#vnc specific defaults
# Vnc specific defaults
VNC_DEF_ADDR = '127.0.0.1'
#std compute extensions
# Nova std compute extensions
STD_COMPUTE_EXTS = 'nova.api.openstack.compute.contrib.standard_extensions'
#config keys we warm up so u won't be prompted later
# Config keys we warm up so u won't be prompted later
WARMUP_PWS = ['rabbit']
#used to wait until started before we can run the data setup script
# Used to wait until started before we can run the data setup script
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
#nova conf default section
# Nova conf default section
NV_CONF_DEF_SECTION = "[DEFAULT]"
@ -230,15 +231,18 @@ class NovaUninstaller(comp.PythonUninstallComponent):
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
def known_subsystems(self):
return SUBSYSTEMS
def pre_uninstall(self):
self._clear_libvirt_domains()
self._clean_it()
def _clean_it(self):
#these environment additions are important
#in that they eventually affect how this script runs
# These environment additions are important
# in that they eventually affect how this script runs
env = dict()
env['ENABLED_SERVICES'] = ",".join(self.active_subsystems)
env['ENABLED_SERVICES'] = ",".join(self.desired_subsystems)
env['BIN_DIR'] = self.bin_dir
env['VOLUME_NAME_PREFIX'] = self.cfg.getdefaulted('nova', 'volume_name_prefix', DEF_VOL_PREFIX)
cleaner_fn = sh.joinpths(self.bin_dir, CLEANER_DATA_CONF)
@ -262,12 +266,15 @@ class NovaInstaller(comp.PythonInstallComponent):
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.paste_conf_fn = self._get_target_config_name(PASTE_CONF)
self.volumes_enabled = False
if NVOL in self.active_subsystems:
if NVOL in self.desired_subsystems:
self.volumes_enabled = True
self.xvnc_enabled = False
if NXVNC in self.active_subsystems:
if NXVNC in self.desired_subsystems:
self.xvnc_enabled = True
def known_subsystems(self):
return SUBSYSTEMS
def _get_symlinks(self):
links = comp.PythonInstallComponent._get_symlinks(self)
source_fn = sh.joinpths(self.cfg_dir, API_CONF)
@ -283,11 +290,12 @@ class NovaInstaller(comp.PythonInstallComponent):
return places
def warm_configs(self):
for pw_key in WARMUP_PWS:
self.cfg.get("passwords", pw_key)
warm_pws = list(WARMUP_PWS)
driver_canon = _canon_virt_driver(self.cfg.get('nova', 'virt_driver'))
if driver_canon == 'xenserver':
self.cfg.get("passwords", "xenapi_connection")
warm_pws.append('xenapi_connection')
for pw_key in warm_pws:
self.pw_gen.get_password(pw_key)
def _get_config_files(self):
return list(CONFIGS)
@ -311,12 +319,12 @@ class NovaInstaller(comp.PythonInstallComponent):
def post_install(self):
comp.PythonInstallComponent.post_install(self)
#extra actions to do nova setup
# Extra actions to do nova setup
self._setup_db()
self._sync_db()
self._setup_cleaner()
self._setup_network_initer()
#check if we need to do the vol subcomponent
# Check if we need to do the vol subsystem
if self.volumes_enabled:
vol_maker = NovaVolumeConfigurator(self)
vol_maker.setup_volumes()
@ -397,9 +405,9 @@ class NovaRuntime(comp.PythonRuntime):
tgt_fn = sh.joinpths(self.bin_dir, NET_INIT_CONF)
if sh.isfile(tgt_fn):
LOG.info("Creating your nova network to be used with instances.")
#still there, run it
#these environment additions are important
#in that they eventually affect how this script runs
# If still there, run it
# these environment additions are important
# in that they eventually affect how this script runs
if utils.service_enabled(settings.QUANTUM, self.instances, False):
LOG.info("Waiting %s seconds so that quantum can start up before running first time init." % (WAIT_ONLINE_TO))
sh.sleep(WAIT_ONLINE_TO)
@ -414,13 +422,17 @@ class NovaRuntime(comp.PythonRuntime):
def post_start(self):
self._setup_network_init()
def known_subsystems(self):
return SUBSYSTEMS
def _get_apps_to_start(self):
result = [{'name': app_name,
'path': sh.joinpths(self.bin_dir, app_name),
}
for app_name in sorted(APP_OPTIONS.keys())
]
return result
apps = list()
for subsys in self.desired_subsystems:
app = dict()
app['name'] = SUB_COMPONENT_NAME_MAP[subsys]
app['path'] = sh.joinpths(self.bin_dir, app['name'])
apps.append(app)
return apps
def pre_start(self):
# Let the parent class do its thing
@ -444,8 +456,8 @@ class NovaRuntime(comp.PythonRuntime):
return APP_OPTIONS.get(app)
#this will configure nova volumes which in a developer box
#is a volume group (lvm) that are backed by a loopback file
# This will configure nova volumes which in a developer box
# is a volume group (lvm) that are backed by a loopback file
class NovaVolumeConfigurator(object):
def __init__(self, ni):
self.cfg = ni.cfg
@ -485,10 +497,9 @@ class NovaVolumeConfigurator(object):
# logical volumes
self._process_lvs(mp)
# Finish off by restarting tgt, and ignore any errors
iscsi_cmds = self.distro.get_command('iscsi', quiet=True)
if iscsi_cmds:
restart_cmd = iscsi_cmds['restart']
utils.execute_template(*restart_cmd, run_as_root=True, check_exit_code=False)
iscsi_restart = self.distro.get_command('iscsi', 'restart', quiet=True)
if iscsi_restart:
utils.execute_template(*iscsi_restart, run_as_root=True, check_exit_code=False)
def _process_lvs(self, mp):
LOG.info("Attempting to setup logical volumes for nova volume management.")
@ -541,13 +552,12 @@ class NovaConfConfigurator(object):
return self.cfg.getdefaulted('nova', name, default)
def configure(self):
#everything built goes in here
# Everything built goes in here
nova_conf = NovaConf()
#used more than once
# Used more than once so we calculate it ahead of time
hostip = self.cfg.get('host', 'ip')
#verbose on?
if self._getbool('verbose'):
nova_conf.add('verbose', True)
@ -565,79 +575,79 @@ class NovaConfConfigurator(object):
sh.mkdir(full_logdir)
sh.chmod(full_logdir, 0777)
#allow the admin api?
# Allow the admin api?
if self._getbool('allow_admin_api'):
nova_conf.add('allow_admin_api', True)
#??
# FIXME: ??
nova_conf.add('allow_resize_to_same_host', True)
#which scheduler do u want?
# Which scheduler do u want?
nova_conf.add('compute_scheduler_driver', self._getstr('scheduler', DEF_SCHEDULER))
#setup network settings
# Setup any network settings
self._configure_network_settings(nova_conf)
#setup nova volume settings
# Setup nova volume settings
if self.volumes_enabled:
self._configure_vols(nova_conf)
#where we are running
# The ip of where we are running
nova_conf.add('my_ip', hostip)
#setup your sql connection
# Setup your sql connection
db_dsn = cfg_helpers.fetch_dbdsn(self.cfg, self.pw_gen, DB_NAME)
nova_conf.add('sql_connection', db_dsn)
#configure anything libvirt releated?
# Configure anything libvirt related?
virt_driver = _canon_virt_driver(self._getstr('virt_driver'))
if virt_driver == 'libvirt':
libvirt_type = _canon_libvirt_type(self._getstr('libvirt_type'))
self._configure_libvirt(libvirt_type, nova_conf)
#how instances will be presented
# How instances will be presented
instance_template = self._getstr('instance_name_prefix') + self._getstr('instance_name_postfix')
if not instance_template:
instance_template = DEF_INSTANCE_TEMPL
nova_conf.add('instance_name_template', instance_template)
#enable the standard extensions
# Enable the standard extensions
nova_conf.add('osapi_compute_extension', STD_COMPUTE_EXTS)
#auth will be using keystone
# Auth will be using keystone
nova_conf.add('auth_strategy', 'keystone')
#vnc settings setup
# Vnc settings setup
self._configure_vnc(nova_conf)
#where our paste config is
# Where our paste config is
nova_conf.add('api_paste_config', self.paste_conf_fn)
#what our imaging service will be
# What our imaging service will be
self._configure_image_service(nova_conf, hostip)
#ec2 / s3 stuff
# Configs for ec2 / s3 stuff
nova_conf.add('ec2_dmz_host', self._getstr('ec2_dmz_host', hostip))
nova_conf.add('s3_host', hostip)
#how is your rabbit setup?
# How is your rabbit setup?
nova_conf.add('rabbit_host', self.cfg.getdefaulted('default', 'rabbit_host', hostip))
nova_conf.add('rabbit_password', self.cfg.get("passwords", "rabbit"))
#where instances will be stored
# Where instances will be stored
instances_path = self._getstr('instances_path', sh.joinpths(self.component_dir, 'instances'))
self._configure_instances_path(instances_path, nova_conf)
#is this a multihost setup?
# Is this a multihost setup?
self._configure_multihost(nova_conf)
#enable syslog??
# Enable syslog??
self._configure_syslog(nova_conf)
#handle any virt driver specifics
# Handle any virt driver specifics
self._configure_virt_driver(nova_conf)
#and extract to finish
# Annnnnd extract to finish
return self._get_content(nova_conf)
def _get_extra(self, key):
@ -684,11 +694,11 @@ class NovaConfConfigurator(object):
return generated_content
def _configure_image_service(self, nova_conf, hostip):
#what image service we will use
# What image service we will u be using sir?
img_service = self._getstr('img_service', DEF_IMAGE_SERVICE)
nova_conf.add('image_service', img_service)
#where is glance located?
# If glance then where is it?
if img_service.lower().find("glance") != -1:
glance_api_server = self._getstr('glance_server', (DEF_GLANCE_SERVER % (hostip)))
nova_conf.add('glance_api_servers', glance_api_server)
@ -724,7 +734,7 @@ class NovaConfConfigurator(object):
nova_conf.add('iscsi_helper', 'tgtadm')
def _configure_network_settings(self, nova_conf):
#TODO this might not be right....
# TODO this might not be right....
if utils.service_enabled(settings.QUANTUM, self.instances, False):
nova_conf.add('network_manager', QUANTUM_MANAGER)
hostip = self.cfg.get('host', 'ip')
@ -741,10 +751,11 @@ class NovaConfConfigurator(object):
else:
nova_conf.add('network_manager', NET_MANAGER_TEMPLATE % (self._getstr('network_manager', DEF_NET_MANAGER)))
#dhcp bridge stuff???
# Configs dhcp bridge stuff???
# TODO: why is this the same as the nova.conf?
nova_conf.add('dhcpbridge_flagfile', sh.joinpths(self.cfg_dir, API_CONF))
#Network prefix for the IP network that all the projects for future VM guests reside on. Example: 192.168.0.0/12
# Network prefix for the IP network that all the projects for future VM guests reside on. Example: 192.168.0.0/12
nova_conf.add('fixed_range', self._getstr('fixed_range'))
# The value for vlan_interface may default to the the current value
@ -752,7 +763,7 @@ class NovaConfConfigurator(object):
public_interface = self._getstr('public_interface')
vlan_interface = self._getstr('vlan_interface', public_interface)
#do a little check to make sure actually have that interface set...
# Do a little check to make sure actually have that interface/s
if not utils.is_interface(public_interface):
msg = "Public interface %s is not a known interface" % (public_interface)
raise exceptions.ConfigException(msg)
@ -764,7 +775,7 @@ class NovaConfConfigurator(object):
nova_conf.add('public_interface', public_interface)
nova_conf.add('vlan_interface', vlan_interface)
#This forces dnsmasq to update its leases table when an instance is terminated.
# This forces dnsmasq to update its leases table when an instance is terminated.
nova_conf.add('force_dhcp_release', True)
def _configure_syslog(self, nova_conf):
@ -789,11 +800,11 @@ class NovaConfConfigurator(object):
pass
nova_conf.add('libvirt_type', virt_type)
#configures any virt driver settings
# Configures any virt driver settings
def _configure_virt_driver(self, nova_conf):
drive_canon = _canon_virt_driver(self._getstr('virt_driver'))
nova_conf.add('connection_type', VIRT_DRIVER_CON_MAP.get(drive_canon, drive_canon))
#special driver settings
# Special driver settings
if drive_canon == 'xenserver':
nova_conf.add('xenapi_connection_url', self._getstr('xa_connection_url', XA_DEF_CONNECTION_URL))
nova_conf.add('xenapi_connection_username', self._getstr('xa_connection_username', XA_DEF_USER))
@ -817,7 +828,7 @@ class NovaConfConfigurator(object):
nova_conf.add('flat_interface', flat_interface)
# This class represents the data in the nova config file
# This class represents the data/format of the nova config file
class NovaConf(object):
def __init__(self):
self.lines = list()

View File

@ -24,13 +24,13 @@ from devstack.components import nova
LOG = logging.getLogger("devstack.components.novnc")
#where the application is really
# Where the application is really
UTIL_DIR = 'utils'
VNC_PROXY_APP = 'nova-novncproxy'
APP_OPTIONS = {
#this reaches into the nova configuration file
#TODO can we stop that?
# This reaches into the nova configuration file
# TODO can we stop that?
VNC_PROXY_APP: ['--flagfile', '%NOVA_CONF%', '--web', '.'],
}
@ -72,7 +72,7 @@ class NoVNCRuntime(comp.ProgramRuntime):
def _get_param_map(self, app_name):
root_params = comp.ProgramRuntime._get_param_map(self, app_name)
if app_name == VNC_PROXY_APP and utils.service_enabled(settings.NOVA, self.instances, False):
#have to reach into the nova conf (puke)
# FIXME: Have to reach into the nova conf (puke)
nova_runtime = self.instances[settings.NOVA]
root_params['NOVA_CONF'] = sh.joinpths(nova_runtime.cfg_dir, nova.API_CONF)
return root_params

View File

@ -28,12 +28,11 @@ from devstack.components import db
LOG = logging.getLogger("devstack.components.quantum")
#vswitch pkgs
# Openvswitch special settings
VSWITCH_PLUGIN = 'openvswitch'
PKG_VSWITCH = "quantum-openvswitch.json"
V_PROVIDER = "quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin"
#config files (some only modified if running as openvswitch)
# Config files (some only modified if running as openvswitch)
PLUGIN_CONF = "plugins.ini"
QUANTUM_CONF = 'quantum.conf'
PLUGIN_LOC = ['etc']
@ -42,24 +41,20 @@ AGENT_LOC = ["etc", "quantum", "plugins", "openvswitch"]
AGENT_BIN_LOC = ["quantum", "plugins", "openvswitch", 'agent']
CONFIG_FILES = [PLUGIN_CONF, AGENT_CONF]
#this db will be dropped and created
# This db will be dropped and created
DB_NAME = 'ovs_quantum'
#opensvswitch bridge setup/teardown/name commands
# Opensvswitch bridge setup/teardown/name commands
OVS_BRIDGE_DEL = ['ovs-vsctl', '--no-wait', '--', '--if-exists', 'del-br', '%OVS_BRIDGE%']
OVS_BRIDGE_ADD = ['ovs-vsctl', '--no-wait', 'add-br', '%OVS_BRIDGE%']
OVS_BRIDGE_EXTERN_ID = ['ovs-vsctl', '--no-wait', 'br-set-external-id', '%OVS_BRIDGE%', 'bridge-id', '%OVS_EXTERNAL_ID%']
OVS_BRIDGE_CMDS = [OVS_BRIDGE_DEL, OVS_BRIDGE_ADD, OVS_BRIDGE_EXTERN_ID]
#special component options
QUANTUM_SERVICE = 'q-svc'
QUANTUM_AGENT = 'q-agt'
#subdirs of the downloaded
# Subdirs of the downloaded
CONFIG_DIR = 'etc'
BIN_DIR = 'bin'
#what to start (only if openvswitch enabled)
# What to start (only if openvswitch enabled)
APP_Q_SERVER = 'quantum-server'
APP_Q_AGENT = 'ovs_quantum_agent.py'
APP_OPTIONS = {
@ -106,7 +101,7 @@ class QuantumInstaller(comp.PkgInstallComponent):
def _config_adjust(self, contents, config_fn):
if config_fn == PLUGIN_CONF and self.q_vswitch_service:
#need to fix the "Quantum plugin provider module"
# Need to fix the "Quantum plugin provider module"
newcontents = contents
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
@ -120,7 +115,7 @@ class QuantumInstaller(comp.PkgInstallComponent):
newcontents = cfg.add_header(config_fn, outputstream.getvalue())
return newcontents
elif config_fn == AGENT_CONF and self.q_vswitch_agent:
#Need to adjust the sql connection
# Need to adjust the sql connection
newcontents = contents
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
@ -189,15 +184,9 @@ class QuantumRuntime(comp.ProgramRuntime):
self.q_vswitch_service = False
plugin = self.cfg.getdefaulted("quantum", "q_plugin", VSWITCH_PLUGIN)
if plugin == VSWITCH_PLUGIN:
#default to on if not specified
# Default to on if not specified
self.q_vswitch_agent = True
self.q_vswitch_service = True
# else:
# #only turn on if requested
# if QUANTUM_SERVICE in self.component_opts:
# self.q_vswitch_service = True
# if QUANTUM_AGENT in self.component_opts:
# self.q_vswitch_agent = True
def _get_apps_to_start(self):
app_list = comp.ProgramRuntime._get_apps_to_start(self)

View File

@ -23,23 +23,23 @@ from devstack import shell as sh
LOG = logging.getLogger("devstack.components.rabbit")
#hopefully these are distro independent..
# So far these are distro independent..
START_CMD = ['service', "rabbitmq-server", "start"]
STOP_CMD = ['service', "rabbitmq-server", "stop"]
STATUS_CMD = ['service', "rabbitmq-server", "status"]
RESTART_CMD = ['service', "rabbitmq-server", "restart"]
PWD_CMD = ['rabbitmqctl', 'change_password', 'guest']
#default password
# Default password (guest)
RESET_BASE_PW = ''
#how long we wait for rabbitmq to start up before doing commands on it
# How long we wait for rabbitmq to start up before doing commands on it
WAIT_ON_TIME = settings.WAIT_ALIVE_SECS
#config keys we warm up so u won't be prompted later
# Config keys we warm up so u won't be prompted later
WARMUP_PWS = ['rabbit']
#partial of rabbit user prompt
# Partial of rabbit user prompt
PW_USER_PROMPT = 'the rabbit user'
@ -94,8 +94,9 @@ class RabbitRuntime(comp.EmptyRuntime):
return 0
def status(self):
#this has got to be the worst status output
#i have ever seen (its like a weird mix json+crap)
# This has got to be the worst status output.
#
# I have ever seen (its like a weird mix json+crap)
run_result = sh.execute(*STATUS_CMD,
check_exit_code=False,
run_as_root=True)
@ -112,12 +113,15 @@ class RabbitRuntime(comp.EmptyRuntime):
return comp.STATUS_UNKNOWN
def _run_cmd(self, cmd, check_exit=True):
#this seems to fix one of the bugs with rabbit mq starting and stopping
#not cool, possibly connected to the following bugs:
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
# This seems to fix one of the bugs with rabbit mq starting and stopping
# not cool, possibly connected to the following bugs:
#
#rhel seems to have this bug also...
# See: https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
# See: https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
#
# RHEL seems to have this bug also...
#
# TODO: Move to distro dir...
with TemporaryFile() as f:
return sh.execute(*cmd, run_as_root=True,
stdout_fh=f, stderr_fh=f,

View File

@ -23,7 +23,7 @@ from devstack import utils
LOG = logging.getLogger("devstack.components.swift")
#swift has alot of config files!
# Swift has alot of config files!
SWIFT_CONF = 'swift.conf'
PROXY_SERVER_CONF = 'proxy-server.conf'
ACCOUNT_SERVER_CONF = 'account-server.conf'
@ -42,26 +42,26 @@ CONFIGS = [SWIFT_CONF, PROXY_SERVER_CONF, ACCOUNT_SERVER_CONF,
SWIFT_RSYNC_LOC = '/etc/rsyslog.d/10-swift.conf'
DEF_LOOP_SIZE = 1000000
#adjustments to rsync/rsyslog
# Adjustments to rsync/rsyslog
RSYNC_CONF_LOC = '/etc/default/rsync'
RSYNCD_CONF_LOC = '/etc/rsyncd.conf'
RSYNC_SERVICE_RESTART = ['service', 'rsync', 'restart']
RSYSLOG_SERVICE_RESTART = ['service', 'rsyslog', 'restart']
RSYNC_ON_OFF_RE = re.compile(r'^\s*RSYNC_ENABLE\s*=\s*(.*)$', re.I)
#defines our auth service type
# Defines our auth service type
AUTH_SERVICE = 'keystone'
#defines what type of loopback filesystem we will make
#xfs is preferred due to its extended attributes
# Defines what type of loopback filesystem we will make
# xfs is preferred due to its extended attributes
FS_TYPE = "xfs"
#subdirs of the git checkout
# Subdirs of the git checkout
BIN_DIR = 'bin'
CONFIG_DIR = 'etc'
LOG_DIR = 'logs'
#config keys we warm up so u won't be prompted later
# Config keys we warm up so u won't be prompted later
WARMUP_PWS = ['service_token', 'swift_hash']
@ -108,7 +108,7 @@ class SwiftInstaller(comp.PythonInstallComponent):
def warm_configs(self):
for pw_key in WARMUP_PWS:
self.cfg.get("passwords", pw_key)
self.pw_gen.get_password(pw_key)
def _get_param_map(self, config_fn):
return {
@ -121,7 +121,7 @@ class SwiftInstaller(comp.PythonInstallComponent):
'SWIFT_HASH': self.cfg.get('passwords', 'swift_hash'),
'SWIFT_LOGDIR': self.logdir,
'SWIFT_PARTITION_POWER_SIZE': self.cfg.getdefaulted('swift', 'partition_power_size', '9'),
#leave these alone, will be adjusted later
# Note: leave these alone, will be adjusted later
'NODE_PATH': '%NODE_PATH%',
'BIND_PORT': '%BIND_PORT%',
'LOG_FACILITY': '%LOG_FACILITY%',

View File

@ -69,48 +69,49 @@ class Distro(object):
def __init__(self, name, distro_pattern, packager_name, commands, components):
self.name = name
self.distro_pattern = re.compile(distro_pattern, re.IGNORECASE)
self.packager_name = packager_name
self.commands = commands
self.components = components
self._distro_pattern = re.compile(distro_pattern, re.IGNORECASE)
self._packager_name = packager_name
self._commands = commands
self._components = components
def __repr__(self):
return "\"%s\" using packager \"%s\"" % (self.name, self.packager_name)
return "\"%s\" using packager \"%s\"" % (self.name, self._packager_name)
def get_command(self, key, *args, **kargs):
place = self.commands
acutal_keys = [key] + list(args)
def get_command(self, key, *more_keys, **kargs):
""" Gets a end object for a given set of keys """
root = self._commands
acutal_keys = [key] + list(more_keys)
run_over_keys = acutal_keys[0:-1]
end_key = acutal_keys[-1]
quiet = kargs.get('quiet', False)
for k in run_over_keys:
if quiet:
place = place.get(k)
if place is None:
root = root.get(k)
if root is None:
return None
else:
place = place[k]
root = root[k]
if not quiet:
return place[end_key]
return root[end_key]
else:
return place.get(end_key)
return root.get(end_key)
def supports_distro(self, distro_name):
"""Does this distro support the named Linux distro?
:param distro_name: Return value from platform.linux_distribution().
"""
return bool(self.distro_pattern.search(distro_name))
return bool(self._distro_pattern.search(distro_name))
def get_packager_factory(self):
"""Return a factory for a package manager."""
return importer.import_entry_point(self.packager_name)
return importer.import_entry_point(self._packager_name)
def extract_component(self, name, action):
"""Return the class + component info to use for doing the action w/the component."""
try:
# Use a copy instead of the original
component_info = dict(self.components[name])
component_info = dict(self._components[name])
entry_point = component_info[action]
cls = importer.import_entry_point(entry_point)
# Knock all action class info (and any other keys)

View File

@ -51,7 +51,6 @@ def _gitdownload(storewhere, uri, branch=None):
def download(storewhere, uri, branch=None):
#figure out which type
up = urlparse(uri)
if up and up.scheme.lower() == "git" or GIT_EXT_REG.match(up.path):
return _gitdownload(storewhere, uri, branch)

View File

@ -26,9 +26,9 @@ def get():
def set(key, value):
#this is really screwy, python is really odd in this area
#from http://docs.python.org/library/os.html
#Calling putenv() directly does not change os.environ, so it's better to modify os.environ.
# This is really screwy, python is really odd in this area
# See: from http://docs.python.org/library/os.html
# Calling putenv() directly does not change os.environ, so it's better to modify os.environ.
if key is not None:
LOG.audit("Setting environment key [%s] to value [%s]" % (key, value))
os.environ[str(key)] = str(value)

View File

@ -28,13 +28,13 @@ from devstack.components import keystone
LOG = logging.getLogger('devstack.env_rc')
#general extraction cfg keys+section
# General extraction cfg keys + sections
CFG_MAKE = {
'FLAT_INTERFACE': ('nova', 'flat_interface'),
'HOST_IP': ('host', 'ip'),
}
#general password keys
# General password keys
PASSWORDS_MAKES = {
'ADMIN_PASSWORD': 'horizon_keystone_admin',
'SERVICE_PASSWORD': 'service_password',
@ -43,17 +43,17 @@ PASSWORDS_MAKES = {
'MYSQL_PASSWORD': 'sql',
}
#install root
# Install root output name and env variable name
INSTALL_ROOT = 'INSTALL_ROOT'
#default ports
# Default ports
EC2_PORT = 8773
S3_PORT = 3333
#how we know if a line is an export or if it isn't (simpe edition)
# How we know if a line is an export or if it isn't (simpe edition)
EXP_PAT = re.compile("^\s*export\s+(.*?)=(.*?)$", re.IGNORECASE)
#how we unquote a string (simple edition)
# How we unquote a string (simple edition)
QUOTED_PAT = re.compile(r"^\s*[\"](.*)[\"]\s*$")
@ -182,8 +182,8 @@ class RcWriter(object):
to_set['OS_PASSWORD'] = key_params['ADMIN_PASSWORD']
to_set['OS_TENANT_NAME'] = key_params['DEMO_TENANT_NAME']
to_set['OS_USERNAME'] = key_params['DEMO_USER_NAME']
# this seems named weirdly the OS_AUTH_URL is the keystone SERVICE_ENDPOINT endpoint
# todo: describe more why this is the case
# This seems named weirdly the OS_AUTH_URL is the keystone SERVICE_ENDPOINT endpoint
# Todo: describe more why this is the case...
to_set['OS_AUTH_URL'] = key_params['SERVICE_ENDPOINT']
return to_set
@ -254,8 +254,6 @@ class RcReader(object):
def extract(self, fn):
extracted_vars = dict()
contents = ''
#not using shell here since
#we don't want this to be "nulled" in a dry-run
LOG.audit("Loading rc file [%s]" % (fn))
try:
with open(fn, 'r') as fh:

View File

@ -236,16 +236,16 @@ class ImageCreationService:
}
})
# prepare the request
# Prepare the request
request = urllib2.Request(keystone_token_url)
# post body
# Post body
request.add_data(data)
# content type
# Content type
request.add_header('Content-Type', 'application/json')
# make the request
# Make the request
LOG.info("Getting your token from url [%s], please wait..." % (keystone_token_url))
LOG.debug("With post json data %s" % (data))
response = urllib2.urlopen(request)
@ -258,7 +258,7 @@ class ImageCreationService:
msg = "Response from url [%s] did not match expected json format." % (keystone_token_url)
raise IOError(msg)
#basic checks passed, extract it!
# Basic checks passed, extract it!
tok = token['access']['token']['id']
LOG.debug("Got token %s" % (tok))
return tok
@ -268,7 +268,7 @@ class ImageCreationService:
token = None
LOG.info("Setting up any specified images in glance.")
#extract them from the config
# Extract the urls from the config
try:
flat_urls = self.cfg.getdefaulted('img', 'image_urls', [])
expanded_urls = [x.strip() for x in flat_urls.split(',')]
@ -278,7 +278,7 @@ class ImageCreationService:
except(ConfigParser.Error):
LOG.warn("No image configuration keys found, skipping glance image install!")
#install them in glance
# Install them in glance
am_installed = 0
if urls:
LOG.info("Attempting to download & extract and upload (%s) images." % (", ".join(urls)))

View File

@ -25,15 +25,15 @@ def partition(fullname):
"""
if ':' not in fullname:
raise ValueError('Invalid entry point specifier %r' % fullname)
module_name, ignore, classname = fullname.partition(':')
return (module_name, ignore, classname)
(module_name, _, classname) = fullname.partition(':')
return (module_name, classname)
def import_entry_point(fullname):
"""
Given a name import the class and return it.
"""
(module_name, _, classname) = partition(fullname)
(module_name, classname) = partition(fullname)
try:
module = utils.import_module(module_name, False)
for submodule in module_name.split('.')[1:]:

View File

@ -22,7 +22,7 @@ from devstack import utils
LOG = logging.getLogger('devstack.libvirt')
#http://libvirt.org/uri.html
# See: http://libvirt.org/uri.html
LIBVIRT_PROTOCOL_MAP = {
'qemu': "qemu:///system",
'kvm': "qemu:///system",
@ -32,28 +32,28 @@ LIBVIRT_PROTOCOL_MAP = {
}
VIRT_LIB = 'libvirt'
#how libvirt is restarted
# How libvirt is restarted
LIBVIRT_RESTART_CMD = ['service', '%SERVICE%', 'restart']
#how we check its status
# How we check its status
LIBVIRT_STATUS_CMD = ['service', '%SERVICE%', 'status']
#this is just used to check that libvirt will work with
#a given protocol, may not be ideal but does seem to crap
#out if it won't work, so thats good
# This is just used to check that libvirt will work with
# a given protocol, may not be ideal but does seem to crap
# out if it won't work, so thats good...
VIRSH_SANITY_CMD = ['virsh', '-c', '%VIRT_PROTOCOL%', 'uri']
#status is either dead or alive!
# Status is either dead or alive!
_DEAD = 'DEAD'
_ALIVE = 'ALIVE'
#alive wait time, just a sleep we put into so that the service can start up
# Alive wait time, just a sleep we put into so that the service can start up
WAIT_ALIVE_TIME = settings.WAIT_ALIVE_SECS
def _get_virt_lib():
#late import so that we don't always need this library to be active
#ie if u aren't using libvirt in the first place
# Late import so that we don't always need this library to be active
# ie if u aren't using libvirt in the first place...
return utils.import_module(VIRT_LIB)

View File

@ -24,7 +24,7 @@ import pprint
from logging.handlers import SysLogHandler
from logging.handlers import WatchedFileHandler
# a list of things we want to replicate from logging levels
# Alist of things we want to replicate from logging levels
CRITICAL = logging.CRITICAL
FATAL = logging.FATAL
ERROR = logging.ERROR
@ -34,13 +34,13 @@ INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# our new audit level
# http://docs.python.org/howto/logging.html#logging-levels
# Our new audit level
# See: http://docs.python.org/howto/logging.html#logging-levels
logging.AUDIT = logging.DEBUG + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
AUDIT = logging.AUDIT
# methods
# Methods
debug = logging.debug
info = logging.info
warning = logging.warning
@ -50,11 +50,11 @@ exception = logging.exception
critical = logging.critical
log = logging.log
# classes
# Classes
root = logging.root
Formatter = logging.Formatter
# handlers
# Handlers
StreamHandler = logging.StreamHandler
WatchedFileHandler = WatchedFileHandler
SysLogHandler = SysLogHandler

View File

@ -36,7 +36,7 @@ def parse():
help_formatter = IndentedHelpFormatter(width=HELP_WIDTH)
parser = OptionParser(version=version_str, formatter=help_formatter)
#root options
# Root options
parser.add_option("-v", "--verbose",
action="append_const",
const=1,
@ -50,7 +50,7 @@ def parse():
help=("perform ACTION but do not actually run any of the commands"
" that would normally complete ACTION: (default: %default)"))
#install/start/stop/uninstall specific options
# Install/start/stop/uninstall specific options
base_group = OptionGroup(parser, "Install & uninstall & start & stop specific options")
base_group.add_option("-p", "--persona",
action="store",
@ -79,7 +79,7 @@ def parse():
)
parser.add_option_group(base_group)
#uninstall and stop options
# Uninstall and stop options
stop_un_group = OptionGroup(parser, "Uninstall & stop specific options")
stop_un_group.add_option("-n", "--no-force",
action="store_true",
@ -96,7 +96,7 @@ def parse():
default=False)
parser.add_option_group(un_group)
#extract only what we care about
# Extract only what we care about
(options, args) = parser.parse_args()
output = dict()
output['dir'] = options.dir or ""

View File

@ -15,7 +15,6 @@
# under the License.
from devstack import log as logging
from devstack import settings
from devstack import utils
LOG = logging.getLogger("devstack.packager")
@ -32,11 +31,11 @@ class Packager(object):
def remove_batch(self, pkgs):
if not self.keep_packages:
return self._remove_batch(pkgs)
return []
return list()
def pre_install(self, pkgs, params=None):
for info in pkgs:
cmds = info.get(settings.PRE_INSTALL)
cmds = info.get('pre-install')
if cmds:
LOG.info("Running pre-install commands for package %s.",
info['name'])
@ -44,7 +43,7 @@ class Packager(object):
def post_install(self, pkgs, params=None):
for info in pkgs:
cmds = info.get(settings.POST_INSTALL)
cmds = info.get('post-install')
if cmds:
LOG.info("Running post-install commands for package %s.",
info['name'])

View File

@ -20,18 +20,18 @@ from devstack import shell as sh
LOG = logging.getLogger("devstack.packaging.yum")
#root yum command
# Root yum command
YUM_CMD = ['yum']
#tolerant is enabled since we might already have it installed/erased
# Tolerant is enabled since we might already have it installed/erased
YUM_INSTALL = ["install", "-y", "-t"]
YUM_REMOVE = ['erase', '-y', "-t"]
#yum separates its pkg names and versions with a dash
# Yum separates its pkg names and versions with a dash
VERSION_TEMPL = "%s-%s"
#need to relink for rhel (not a bug!)
#TODO: maybe this should be a subclass that handles these differences
# Need to relink for rhel (not a bug!)
# TODO: maybe this should be a subclass that handles these differences
RHEL_RELINKS = {
'python-webob1.0': {
"src": '/usr/lib/python2.6/site-packages/WebOb-1.0.8-py2.6.egg/webob/',
@ -60,14 +60,14 @@ class YumPackager(pack.Packager):
**kargs)
def _remove_special(self, pkgname, pkginfo):
#TODO: maybe this should be a subclass that handles these differences
#if self.distro.name == settings.RHEL6 and pkgname in RHEL_RELINKS:
# TODO: maybe this should be a subclass that handles these differences
# if self.distro.name == settings.RHEL6 and pkgname in RHEL_RELINKS:
# #we don't return true here so that
# #the normal package cleanup happens
# sh.unlink(RHEL_RELINKS.get(pkgname).get("tgt"))
return False
#TODO: maybe this should be a subclass that handles these differences
# TODO: maybe this should be a subclass that handles these differences
def _install_rhel_relinks(self, pkgname, pkginfo):
full_pkg_name = self._format_pkg_name(pkgname, pkginfo.get("version"))
install_cmd = YUM_CMD + YUM_INSTALL + [full_pkg_name]
@ -81,10 +81,10 @@ class YumPackager(pack.Packager):
sh.symlink(src, tgt)
return True
#TODO: maybe this should be a subclass that handles these differences
# TODO: maybe this should be a subclass that handles these differences
def _install_special(self, pkgname, pkginfo):
# FIXME
#if self.distro.name == settings.RHEL6 and pkgname in RHEL_RELINKS:
# FIXME:
# if self.distro.name == settings.RHEL6 and pkgname in RHEL_RELINKS:
# return self._install_rhel_relinks(pkgname, pkginfo)
return False

View File

@ -42,7 +42,7 @@ def install(pip, distro):
def uninstall_batch(pips, distro, skip_errors=True):
names = [p['name'] for p in pips]
names = set([p['name'] for p in pips])
root_cmd = distro.get_command('pip')
for name in names:
try:

View File

@ -150,10 +150,6 @@ class ActionRunner(object):
self.kargs = kargs
def _apply_reverse(self, action, component_order):
if not component_order:
component_order = list()
else:
component_order = list(component_order)
adjusted_order = list(component_order)
if action in REVERSE_ACTIONS:
adjusted_order.reverse()
@ -193,7 +189,7 @@ class ActionRunner(object):
cls_kvs = dict()
cls_kvs['runner'] = self
cls_kvs['component_dir'] = sh.joinpths(root_dir, c)
cls_kvs['active_subsystems'] = set(subsystems.get(c, list()))
cls_kvs['subsystem_info'] = subsystems.get(c, dict())
cls_kvs['all_instances'] = instances
cls_kvs['name'] = c
cls_kvs['keep_old'] = self.keep_old

View File

@ -23,17 +23,17 @@ class RunnerBase(object):
self.trace_dir = trace_dir
def unconfigure(self):
#cleans up anything configured by
#this runner for any apps for this component
#returns how many files unconfigured
# Cleans up anything configured by
# this runner for any apps for this component
# returns how many files unconfigured
return 0
def configure(self, app_name, runtime_info):
#returns how many files configured
# Returns how many files configured
return 0
def start(self, name, runtime_info):
#returns a file name that contains what was started
# Returns a file name that contains what was started
return None
def stop(self, name):

View File

@ -31,18 +31,18 @@ from devstack import trace as tr
LOG = logging.getLogger("devstack.runners.fork")
#maximum for the number of available file descriptors (when not found)
# Maximum for the number of available file descriptors (when not found)
MAXFD = 2048
#how many times we try to kill and how much sleep (seconds) between each try
# How many times we try to kill and how much sleep (seconds) between each try
MAX_KILL_TRY = 5
SLEEP_TIME = 1
#my type
# My runner type
RUN_TYPE = settings.RUN_TYPE_FORK
TYPE = settings.RUN_TYPE_TYPE
#trace constants
# Trace constants
PID_FN = "PID_FN"
STDOUT_FN = "STDOUT_FN"
STDERR_FN = "STDERR_FN"
@ -50,7 +50,7 @@ ARGS = "ARGS"
NAME = "NAME"
FORK_TEMPL = "%s.fork"
#run fork cmds as root?
# Run fork cmds as root?
ROOT_GO = True
@ -90,7 +90,7 @@ class ForkRunner(base.RunnerBase):
if sh.isfile(pid_file) and sh.isfile(trace_fn):
pid = int(sh.load_file(pid_file).strip())
(killed, attempts) = self._stop_pid(pid)
#trash the files
# Trash the files if it worked
if killed:
LOG.debug("Killed pid %s after %s attempts" % (pid, attempts))
LOG.debug("Removing pid file %s" % (pid_file))
@ -115,22 +115,22 @@ class ForkRunner(base.RunnerBase):
return (pidfile, stderr, stdout)
def _fork_start(self, program, appdir, pid_fn, stdout_fn, stderr_fn, *args):
#first child, not the real program
# First child, not the real program
pid = os.fork()
if pid == 0:
#upon return the calling process shall be the session
#leader of this new session,
#shall be the process group leader of a new process group,
#and shall have no controlling terminal.
# Upon return the calling process shall be the session
# leader of this new session,
# shall be the process group leader of a new process group,
# and shall have no controlling terminal.
os.setsid()
pid = os.fork()
#fork to get daemon out - this time under init control
#and now fully detached (no shell possible)
# Fork to get daemon out - this time under init control
# and now fully detached (no shell possible)
if pid == 0:
#move to where application should be
# Move to where application should be
if appdir:
os.chdir(appdir)
#close other fds
# Close other fds (or try)
limits = resource.getrlimit(resource.RLIMIT_NOFILE)
mkfd = limits[1]
if mkfd == resource.RLIM_INFINITY:
@ -141,27 +141,27 @@ class ForkRunner(base.RunnerBase):
except OSError:
#not open, thats ok
pass
#now adjust stderr and stdout
# Now adjust stderr and stdout
if stdout_fn:
stdoh = open(stdout_fn, "w")
os.dup2(stdoh.fileno(), sys.stdout.fileno())
if stderr_fn:
stdeh = open(stderr_fn, "w")
os.dup2(stdeh.fileno(), sys.stderr.fileno())
#now exec...
#the arguments to the child process should
#start with the name of the command being run
# Now exec...
# Note: The arguments to the child process should
# start with the name of the command being run
prog_little = os.path.basename(program)
actualargs = [prog_little] + list(args)
os.execlp(program, *actualargs)
else:
#write out the child pid
# Write out the child pid
contents = str(pid) + os.linesep
sh.write_file(pid_fn, contents, quiet=True)
#not exit or sys.exit, this is recommended
#since it will do the right cleanups that we want
#not calling any atexit functions, which would
#be bad right now
# Not exit or sys.exit, this is recommended
# since it will do the right cleanups that we want
# not calling any atexit functions, which would
# be bad right now
os._exit(0)
def _do_trace(self, fn, kvs):

View File

@ -30,25 +30,25 @@ from devstack import utils
LOG = logging.getLogger("devstack.runners.screen")
#my type
# My running type
RUN_TYPE = settings.RUN_TYPE_SCREEN
TYPE = settings.RUN_TYPE_TYPE
#trace constants
# Trace constants
SCREEN_TEMPL = "%s.screen"
ARGS = "ARGS"
NAME = "NAME"
SESSION_ID = 'SESSION_ID'
#screen session name
# Screen session name
SESSION_NAME = 'stack'
SESSION_DEF_TITLE = SESSION_NAME
SESSION_NAME_MTCHER = re.compile(r"^\s*([\d]+\.%s)\s*(.*)$" % (SESSION_NAME))
#how we setup screens status bar
# How we setup screens status bar
STATUS_BAR_CMD = r'hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"'
#cmds
# Screen commands/template commands
SESSION_INIT = ['screen', '-d', '-m', '-S', SESSION_NAME, '-t', SESSION_DEF_TITLE, '-s', "/bin/bash"]
BAR_INIT = ['screen', '-r', SESSION_NAME, '-X', STATUS_BAR_CMD]
CMD_INIT = ['screen', '-S', '%SESSION_NAME%', '-X', 'screen', '-t', "%NAME%"]
@ -58,17 +58,17 @@ CMD_START = ['screen', '-S', '%SESSION_NAME%', '-p', "%NAME%", '-X', 'stuff', "\
LIST_CMD = ['screen', '-ls']
SCREEN_KILLER = ['screen', '-X', '-S', '%SCREEN_ID%', 'quit']
#where our screen sockets will go
# Where our screen sockets will go
SCREEN_SOCKET_DIR_NAME = "devstack-screen-sockets"
SCREEN_SOCKET_PERM = 0700
#used to wait until started before we can run the actual start cmd
# Used to wait until started before we can run the actual start cmd
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
#run screen as root?
# Run screen as root?
ROOT_GO = True
#screen RC file
# Screen RC file
SCREEN_RC = settings.RC_FN_TEMPL % ('screen')
@ -104,7 +104,7 @@ class ScreenRunner(base.RunnerBase):
run_as_root=ROOT_GO,
env_overrides=self._get_env(),
check_exit_code=False)
#we have really no way of knowing if it worked or not, screen sux...
# We have really no way of knowing if it worked or not, screen sux...
wipe_cmd = self._gen_cmd(CMD_WIPE, mp)
sh.execute(*wipe_cmd,
shell=True,
@ -189,7 +189,7 @@ class ScreenRunner(base.RunnerBase):
shell=True,
run_as_root=ROOT_GO,
env_overrides=self._get_env())
#we have really no way of knowing if it worked or not, screen sux...
# We have really no way of knowing if it worked or not, screen sux...
def _do_socketdir_init(self, socketdir, perm):
LOG.debug("Making screen socket directory [%s] (with permissions %o)" % (socketdir, perm))

View File

@ -26,27 +26,27 @@ from devstack import utils
LOG = logging.getLogger("devstack.runners.upstart")
#my type
# My run type
RUN_TYPE = settings.RUN_TYPE_UPSTART
TYPE = settings.RUN_TYPE_TYPE
#trace constants
# Trace constants
UPSTART_TEMPL = "%s.upstart"
ARGS = "ARGS"
NAME = "NAME"
#upstart event namings
# Upstart event namings
START_EVENT_SUFFIX = "_start"
STOP_EVENT_SUFFIX = "_stop"
#where upstart configs go
# Where upstart configs go
CONF_ROOT = "/etc/init"
CONF_EXT = ".conf"
#shared template
# Shared template
UPSTART_CONF_TMPL = 'upstart.conf'
#how we emit events to upstart
# How we emit events to upstart
EMIT_BASE_CMD = ["/sbin/initctl", "emit"]

View File

@ -20,10 +20,6 @@ import sys
# What this program is called
PROG_NICE_NAME = "DEVSTACKpy"
# These 2 identify the json post and pre install sections
PRE_INSTALL = 'pre-install'
POST_INSTALL = 'post-install'
# Ip version constants for network ip detection
IPV4 = 'IPv4'
IPV6 = 'IPv6'
@ -43,7 +39,7 @@ KEYSTONE = "keystone"
KEYSTONE_CLIENT = 'keystone-client'
DB = "db"
RABBIT = "rabbit"
NOVNC = 'n-vnc'
NOVNC = 'no-vnc'
XVNC = 'xvnc'
MELANGE = 'melange'
MELANGE_CLIENT = 'melange-client'

View File

@ -178,11 +178,11 @@ def execute(*cmd, **kwargs):
raise excp.ProcessExecutionError(exit_code=rc, stdout=stdout, \
stderr=stderr, cmd=str_cmd)
else:
#log it anyway
# Log it anyway
if rc not in check_exit_code:
LOG.debug("A failure may of just happened when running command \"%s\" [%s] (%s, %s)", \
str_cmd, rc, stdout.strip(), stderr.strip())
#log for debugging figuring stuff out
# Log for debugging figuring stuff out
LOG.debug("Received stdout: %s" % (stdout.strip()))
LOG.debug("Received stderr: %s" % (stderr.strip()))
return (stdout, stderr)
@ -197,7 +197,7 @@ def abspth(path):
def shellquote(text):
#TODO since there doesn't seem to be a standard lib that actually works use this way...
# TODO since there doesn't seem to be a standard lib that actually works use this way...
do_adjust = False
for srch in SHELL_QUOTE_REPLACERS.keys():
if text.find(srch) != -1:
@ -369,16 +369,9 @@ def load_file(fn, quiet=False):
if not quiet:
LOG.audit("Loading data from file %s", fn)
data = ""
try:
if not DRYRUN_MODE:
with open(fn, "r") as f:
data = f.read()
except IOError as e:
if DRYRUN_MODE:
# We still need to actually load something (ie the json install files so thats)
# Why this is in the exception path.
LOG.audit("Passing on load exception since in dry-run mode")
else:
raise e
if not quiet:
LOG.audit("Loaded (%d) bytes from file %s", len(data), fn)
return data
@ -500,16 +493,16 @@ def create_loopback_file(fname, size, bsize=1024, fs_type='ext3', run_as_root=Fa
'count=0', 'seek=%d' % size]
mkfs_cmd = ['mkfs.%s' % fs_type, '-f', '-i', 'size=%d' % bsize, fname]
# make sure folder exists
# Make sure folder exists
files = mkdirslist(dirname(fname))
# create file
# Create file
touch_file(fname)
# fill with zeroes
# Fill with zeroes
execute(*dd_cmd, run_as_root=run_as_root)
# create fs on the file
# Create fs on the file
execute(*mkfs_cmd, run_as_root=run_as_root)
return files

View File

@ -15,16 +15,17 @@
# under the License.
import json
import os
from devstack import date
from devstack import exceptions as excp
from devstack import shell as sh
#trace per line output and file extension formats
TRACE_FMT = "%s - %s\n"
# Trace per line output format and file extension formats
TRACE_FMT = ("%s - %s" + os.linesep)
TRACE_EXT = ".trace"
#common trace actions
# Common trace actions
CFG_WRITING_FILE = "CFG_WRITING_FILE"
SYMLINK_MAKE = "SYMLINK_MAKE"
PKG_INSTALL = "PKG_INSTALL"
@ -35,12 +36,12 @@ DOWNLOADED = "DOWNLOADED"
AP_STARTED = "AP_STARTED"
PIP_INSTALL = 'PIP_INSTALL'
#trace file types
# Common trace file types (or the expected common ones)
PY_TRACE = "python"
IN_TRACE = "install"
START_TRACE = "start"
#used to note version of trace
# Used to note version of trace
TRACE_VERSION = "TRACE_VERSION"
TRACE_VER = 0x1
@ -181,7 +182,7 @@ class TraceReader(object):
return locations
def _sort_paths(self, pths):
#ensure in ok order (ie /tmp is before /)
# Ensure in correct order (ie /tmp is before /)
pths = list(set(pths))
pths.sort()
pths.reverse()
@ -215,14 +216,11 @@ class TraceReader(object):
def symlinks_made(self):
lines = self.read()
files = list()
links = list()
for (cmd, action) in lines:
if cmd == SYMLINK_MAKE and len(action):
files.append(action)
#ensure in ok order (ie /tmp is before /)
files.sort()
files.reverse()
return files
links.append(action)
return links
def files_configured(self):
lines = self.read()

View File

@ -50,7 +50,8 @@ ALL_NUMS = re.compile(r"^\d+$")
START_NUMS = re.compile(r"^(\d+)(\D+)")
STAR_VERSION = 0
#thx cowsay
# Thx cowsay
# See: http://www.nog.net/~tony/warez/cowsay.shtml
COWS = dict()
COWS['happy'] = r'''
{header}
@ -208,6 +209,8 @@ def get_host_ip():
were to be sent out to some well known address on the Internet. In this
case, a private address is used, but the specific address does not
matter much. No traffic is actually sent.
Adjusted from nova code...
"""
ip = None
try:
@ -218,7 +221,7 @@ def get_host_ip():
ip = addr
except socket.error:
pass
#attempt to find it
# Ettempt to find it
if not ip:
interfaces = get_interfaces()
for (_, net_info) in interfaces.items():
@ -229,7 +232,7 @@ def get_host_ip():
if first_oct and first_oct not in PRIVATE_OCTS:
ip = a_ip
break
#just return a localhost version then
# Just return a localhost version then
if not ip:
ip = DEF_IP
return ip
@ -248,13 +251,13 @@ def get_interfaces():
interface_addresses = netifaces.ifaddresses(intfc)
ip6 = interface_addresses.get(netifaces.AF_INET6)
if ip6 and len(ip6):
#just take the first
# Just take the first
interface_info[settings.IPV6] = ip6[0]
ip4 = interface_addresses.get(netifaces.AF_INET)
if ip4 and len(ip4):
#just take the first
# Just take the first
interface_info[settings.IPV4] = ip4[0]
#there are others but this is good for now
# Note: there are others but this is good for now..
interfaces[intfc] = interface_info
return interfaces
@ -351,7 +354,8 @@ def param_replace(text, replacements, ignore_missing=False):
def _get_welcome_stack():
possibles = list()
#thank you figlet ;)
# Thank you figlet ;)
# See: http://www.figlet.org/
possibles.append(r'''
___ ____ _____ _ _ ____ _____ _ ____ _ __
/ _ \| _ \| ____| \ | / ___|_ _|/ \ / ___| |/ /
@ -407,7 +411,7 @@ def center_text(text, fill, max_len):
def _welcome_slang():
potentials = list()
potentials.append("And now for something completely different!")
return random.choice(potentials).strip("\n\r")
return random.choice(potentials)
def color_text(text, color, bold=False,
@ -436,7 +440,8 @@ def _color_blob(text, text_color):
def _goodbye_header(worked):
#cowsay headers
# Cowsay headers
# See: http://www.nog.net/~tony/warez/cowsay.shtml
potentials_oks = list()
potentials_oks.append(r'''
___________
@ -649,8 +654,8 @@ def welcome(ident):
footer += color_text(lower, 'blue', True)
uncolored_footer = (settings.PROG_NICE_NAME + ": " + lower)
if max_line_len - len(uncolored_footer) > 0:
#this format string will center the uncolored text which
#we will then replace with the color text equivalent
# This format string will center the uncolored text which
# we will then replace with the color text equivalent.
centered_str = center_text(uncolored_footer, " ", max_line_len)
footer = centered_str.replace(uncolored_footer, footer)
print(welcome_header)