Merge branch 'new/persona' of git://github.com/harlowja/Openstack-DevstackPy into harlowja-new/persona

Conflicts:
	devstack/cfg_helpers.py
	devstack/components/db.py
	devstack/components/keystone.py
	devstack/components/nova.py
This commit is contained in:
Joshua Harlow 2012-03-16 10:59:33 -07:00
commit a849d7b257
70 changed files with 1183 additions and 3176 deletions

View File

@ -1,11 +1,9 @@
# Ubuntu 11 (Oneiric)
# FIXME: Component dependencies should go into personas, not distros.
name: ubuntu-oneiric
distro_pattern: Ubuntu(.*)oneiric
packager_name: devstack.packaging.apt:AptPackager
packager_name: devstack.distros.oneiric:OneiricAptPackager
commands:
@ -18,13 +16,15 @@ commands:
status: ["service", "apache2", "status"]
settings:
conf-link-target: /etc/apache2/sites-enabled/000-default
libvirt-daemon: 'libvirt-bin'
mysql:
start: ["service", "mysql", 'start']
stop: ["service", 'mysql', "stop"]
status: ["service", 'mysql', "status"]
restart: ["service", 'mysql', "restart"]
#NOTE: we aren't stopping any sql injection...
# NOTE: we aren't stopping any sql injection...
set_pwd: ['mysql', '--user=%USER%', '--password=%OLD_PASSWORD%', '-e',
"\"USE mysql; UPDATE user SET password=PASSWORD('%NEW_PASSWORD%') WHERE User='%USER%'; FLUSH PRIVILEGES;\""]
create_db: ['mysql', '--user=%USER%', '--password=%PASSWORD%', '-e', 'CREATE DATABASE %DB%;']
@ -32,6 +32,12 @@ commands:
grant_all: ["mysql", "--user=%USER%", "--password=%PASSWORD%", '-e',
"\"GRANT ALL PRIVILEGES ON *.* TO '%USER%'@'%' IDENTIFIED BY '%PASSWORD%'; FLUSH PRIVILEGES;\""]
iscsi:
start: ['service', 'tgt', 'start']
stop: ['service', 'tgt', 'stop']
restart: ['service', 'tgt', 'restart']
status: ['service', 'tgt', 'status']
components:
db:
@ -151,10 +157,6 @@ components:
uninstall: devstack.components.glance:GlanceUninstaller
start: devstack.components.glance:GlanceRuntime
stop: devstack.components.glance:GlanceRuntime
dependencies:
- general
- keystone
- db
packages:
- name: python-eventlet
version: 0.9*
@ -198,12 +200,6 @@ components:
uninstall: devstack.components.horizon:HorizonUninstaller
start: devstack.components.horizon:HorizonRuntime
stop: devstack.components.horizon:HorizonRuntime
dependencies:
- general
- keystone-client
- glance
- nova-client
- quantum-client
packages:
- name: apache2
removable: True
@ -278,8 +274,6 @@ components:
uninstall: devstack.components.keystone_client:KeyStoneClientUninstaller
start: devstack.components.keystone_client:KeyStoneClientRuntime
stop: devstack.components.keystone_client:KeyStoneClientRuntime
dependencies:
- general
packages:
- name: python-argparse
removable: True
@ -293,10 +287,6 @@ components:
uninstall: devstack.components.keystone:KeystoneUninstaller
start: devstack.components.keystone:KeystoneRuntime
stop: devstack.components.keystone:KeystoneRuntime
dependencies:
- general
- db
- keystone-client
packages:
- name: libldap2-dev
removable: True
@ -361,9 +351,6 @@ components:
uninstall: devstack.components.melange:MelangeUninstaller
start: devstack.components.melange:MelangeRuntime
stop: devstack.components.melange:MelangeRuntime
dependencies:
- general
- db
packages:
- name: python-eventlet
removable: True
@ -390,88 +377,21 @@ components:
removable: True
version: 1.0*
nova-api:
# FIXME: This will report that it is installing/uninstalling
# "general" instead of the right name.
install: devstack.components.pkglist:Installer
uninstall: devstack.components.pkglist:Uninstaller
start: devstack.component:EmptyRuntime
stop: devstack.component:EmptyRuntime
packages:
- name: python-dateutil
removable: True
version: 1.4*
nova-cpu:
# FIXME: This will report that it is installing/uninstalling
# "general" instead of the right name.
install: devstack.components.pkglist:Installer
uninstall: devstack.components.pkglist:Uninstaller
start: devstack.component:EmptyRuntime
stop: devstack.component:EmptyRuntime
packages:
- name: kvm
removable: True
version: 1:84*
- name: libvirt-bin
removable: True
version: 0.9*
- name: libvirt0
removable: True
version: 0.9*
- name: lvm2
removable: True
version: 2.02*
- name: open-iscsi
removable: True
version: 2.0*
- name: open-iscsi-utils
removable: True
version: 2.0*
- name: python-libvirt
removable: True
version: 0.9.2*
- name: qemu-kvm
removable: True
version: 0.14.*
n-vnc:
no-vnc:
install: devstack.components.novnc:NoVNCInstaller
uninstall: devstack.components.novnc:NoVNCUninstaller
start: devstack.components.novnc:NoVNCRuntime
stop: devstack.components.novnc:NoVNCRuntime
dependencies:
- general
packages:
- name: python-numpy
removable: True
version: 1:1.5*
nova-vol:
# FIXME: This will report that it is installing/uninstalling
# "general" instead of the right name.
install: devstack.components.pkglist:Installer
uninstall: devstack.components.pkglist:Uninstaller
start: devstack.component:EmptyRuntime
stop: devstack.component:EmptyRuntime
packages:
- name: iscsitarget
removable: True
version: 1.4*
- name: lvm2
removable: True
version: 2.02*
- name: tgt
removable: True
version: 1:1*
nova-client:
install: devstack.components.nova_client:NovaClientInstaller
uninstall: devstack.components.nova_client:NovaClientUninstaller
start: devstack.components.nova_client:NovaClientRuntime
stop: devstack.components.nova_client:NovaClientRuntime
dependencies:
- general
packages:
- name: python-argparse
removable: True
@ -485,16 +405,6 @@ components:
uninstall: devstack.components.nova:NovaUninstaller
start: devstack.components.nova:NovaRuntime
stop: devstack.components.nova:NovaRuntime
dependencies:
- general
- keystone
- glance
- db
- rabbit
- nova-client
- nova-cpu
- nova-vol
- nova-api
packages:
- name: dnsmasq-base
removable: True
@ -596,51 +506,65 @@ components:
pips:
- name: iso8601
version: 0.1.4
subsystems:
vol:
packages:
- name: iscsitarget
removable: True
version: 1.4*
- name: lvm2
removable: True
version: 2.02*
- name: tgt
removable: True
version: 1:1*
api:
packages:
- name: python-dateutil
removable: True
version: 1.4*
cpu:
packages:
- name: kvm
removable: True
version: 1:84*
- name: libvirt-bin
removable: True
version: 0.9*
- name: libvirt0
removable: True
version: 0.9*
- name: lvm2
removable: True
version: 2.02*
- name: open-iscsi
removable: True
version: 2.0*
- name: open-iscsi-utils
removable: True
version: 2.0*
- name: python-libvirt
removable: True
version: 0.9.2*
- name: qemu-kvm
removable: True
version: 0.14.*
quantum-client:
install: devstack.components.quantum_client:QuantumClientInstaller
uninstall: devstack.components.quantum_client:QuantumClientUninstaller
start: devstack.components.quantum_client:QuantumClientRuntime
stop: devstack.components.quantum_client:QuantumClientRuntime
dependencies:
- general
packages:
- name: python-gflags
removable: True
version: 1.5*
quantum-openvswitch:
# FIXME: This will report that it is installing/uninstalling
# "general" instead of the right name.
install: devstack.components.pkglist:Installer
uninstall: devstack.components.pkglist:Uninstaller
start: devstack.component:EmptyRuntime
stop: devstack.component:EmptyRuntime
packages:
- name: openvswitch-datapath-dkms
removable: True
version: 1.2*
- name: openvswitch-switch
removable: True
version: 1.2*
- name: python-mysqldb
removable: True
version: 1.2*
- name: python-sqlalchemy
removable: True
version: 0.6*
quantum:
install: devstack.components.quantum:QuantumInstaller
uninstall: devstack.components.quantum:QuantumUninstaller
start: devstack.components.quantum:QuantumRuntime
stop: devstack.components.quantum:QuantumRuntime
dependencies:
- general
- quantum-client
# Default is to include openvswitch so it is here until
# we add proper persona support.
- quantum-openvswitch
packages:
- name: python-eventlet
removable: True
@ -663,8 +587,23 @@ components:
- name: python-routes
removable: True
version: 1.12*
subsystems:
openvswitch:
packages:
- name: openvswitch-datapath-dkms
removable: True
version: 1.2*
- name: openvswitch-switch
removable: True
version: 1.2*
- name: python-mysqldb
removable: True
version: 1.2*
- name: python-sqlalchemy
removable: True
version: 0.6*
rabbit:
rabbit-mq:
install: devstack.components.rabbit:RabbitInstaller
uninstall: devstack.components.rabbit:RabbitUninstaller
start: devstack.components.rabbit:RabbitRuntime
@ -679,9 +618,6 @@ components:
uninstall: devstack.components.swift:SwiftUninstaller
start: devstack.components.swift:SwiftRuntime
stop: devstack.components.swift:SwiftRuntime
dependencies:
- general
- keystone-client
packages:
- name: memcached
removable: True

View File

@ -0,0 +1,31 @@
---
created_on: Wed, 14 Mar 2012 17:28:24 -0700
description: Devstack.sh matching component installation (as of the above date).
supports:
- rhel-6
- ubuntu-oneiric
- fedora-16
components:
- db
- rabbit-mq
- keystone-client
- keystone
- glance
- nova
- no-vnc
- quantum-client
- nova-client
- horizon
subsystems:
glance:
- api
- reg
nova:
- api
- cauth
- cert
- cpu
- net
- sched
- vol
- xvnc

View File

@ -1,15 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
"rhel-6": {
"coverage": {
"version": "3.5.1"
},
"nose": {
"version": "1.1.2"
},
"mock": {
"version": "0.8.0"
}
}
}

View File

@ -1,26 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
"rhel-6": {
"SQLAlchemy": {
"version": "0.7.5"
},
"Routes": {
"version": "1.12.3"
},
#the base is 2.0, need to upgrade
"pycrypto": {
"version": "2.5",
"options": "--upgrade"
},
#the newest we can get is 1.3.3 from epel/rhel
#this causes issues like the following
#https://answers.launchpad.net/nova/+question/174160
"PasteDeploy": {
"version": "1.5.0"
},
"iso8601": {
"version": "0.1.4"
}
}
}

View File

@ -1,68 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
"rhel-6": {
#the base is 2.0, need to upgrade
"pycrypto": {
"version": "2.5",
"options": "--upgrade"
},
#the newest we can get is 1.3.3 from epel/rhel
#this causes issues like the following
#https://answers.launchpad.net/nova/+question/174160
#https://answers.launchpad.net/nova/+question/185116
"PasteDeploy": {
"version": "1.5"
},
"PasteScript": {
"version": "1.7.5"
},
"Paste": {
"version": "1.7.5.1"
},
"SQLAlchemy": {
"version": "0.7.5"
},
"sqlalchemy-migrate": {
"version": "0.7.2"
},
"python-cloudfiles": {
"version": "1.7.9.3"
},
"CherryPy": {
"version": "3.2.2"
},
#the base os doesn't have static files which
#seems only in 1.3 or greater
"django" : {
"version": "1.3.1"
},
"django-nose" : {
"version": "0.1.3"
},
"django-nose-selenium": {
"version": "0.7.3"
},
"django-registration" : {
"version": "0.7"
},
"django-mailer" : {
"version": "0.1.0"
},
"django-staticfiles" : {
"version": "1.1.2"
},
"Routes": {
"version": "1.12.3"
}
},
"fedora-16": {
# rpm 1.7.9.1 does not have exceptions.Error
"python-cloudfiles": {
"version": "1.7.9.3"
},
"django-nose" : {
"version": "0.1.3"
}
}
}

View File

@ -1,36 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
"rhel-6": {
"SQLAlchemy": {
"version": "0.7.5"
},
"sqlalchemy-migrate": {
"version": "0.7.2"
},
#the base is 2.0, need to upgrade
"pycrypto": {
"version": "2.5",
"options": "--upgrade"
},
"py-bcrypt": {
"version": "0.2"
},
"Routes": {
"version": "1.12.3"
},
#the newest we can get is 1.3.3 from epel/rhel
#this causes issues like the following
#https://answers.launchpad.net/nova/+question/174160
#https://answers.launchpad.net/nova/+question/185116
"PasteDeploy": {
"version": "1.5"
},
"PasteScript": {
"version": "1.7.5"
},
"Paste": {
"version": "1.7.5.1"
}
}
}

View File

@ -1,9 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
"rhel-6": {
"numpy": {
"version": "1.5"
}
}
}

View File

@ -1,41 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
"rhel-6": {
"SQLAlchemy": {
"version": "0.7.5"
},
#the base is 0.4, need to upgrade (?)
"Tempita": {
"version": "0.5dev",
"options": "--upgrade"
},
"sqlalchemy-migrate": {
"version": "0.7.2"
},
#the base is 2.0, need to upgrade
"pycrypto": {
"version": "2.5",
"options": "--upgrade"
},
"Routes": {
"version": "1.12.3"
},
#the newest we can get is 1.3.3 from epel/rhel
#this causes issues like the following
#https://answers.launchpad.net/nova/+question/174160
#https://answers.launchpad.net/nova/+question/185116
"PasteDeploy": {
"version": "1.5"
},
"PasteScript": {
"version": "1.7.5"
},
"Paste": {
"version": "1.7.5.1"
},
"iso8601": {
"version": "0.1.4"
}
}
}

View File

@ -1,28 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
"rhel-6": {
"SQLAlchemy": {
"version": "0.7.5"
},
"sqlalchemy-migrate": {
"version": "0.7.2"
},
#the newest we can get is 1.3.3 from epel/rhel
#this causes issues like the following
#https://answers.launchpad.net/nova/+question/174160
#https://answers.launchpad.net/nova/+question/185116
"PasteDeploy": {
"version": "1.5"
},
"PasteScript": {
"version": "1.7.5"
},
"Paste": {
"version": "1.7.5.1"
},
"Routes": {
"version": "1.12.3"
}
}
}

View File

@ -1,59 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"ubuntu-oneiric": {
"mysql-client-5.1": {
"version": "5.1*",
"removable": true
},
"mysql-server-core-5.1": {
"version": "5.1*",
"removable": true
},
"mysql-common": {
"version": "5.1*",
"removable": true
},
"mysql-server-5.1": {
"version": "5.1*",
"removable": true,
"pre-install": [
{
# This apparently is a action needed for ubuntu/debian to set the password to something known....
"run_as_root": true,
"cmd": [
"debconf-set-selections"
],
"stdin": [
"mysql-server-5.1 mysql-server/root_password password %PASSWORD%",
"mysql-server-5.1 mysql-server/root_password_again password %PASSWORD%",
"mysql-server-5.1 mysql-server/start_on_boot boolean %BOOT_START%"
]
}
]
}
},
"rhel-6": {
"mysql-server": {
"version": "5.1*",
"removable": true
},
"mysql": {
"version": "5.1*",
"removable": true
}
},
"fedora-16": {
"mysql-server": {
"version": "5.5*",
"removable": true
},
"mysql": {
"version": "5.5*",
"removable": true
}
}
}

View File

@ -1,255 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
"curl": {
"removable": false,
"version": "7.19.7*"
},
"euca2ools": {
"removable": false,
"version": "1.3.1*",
"meta": {
"epel" : true
}
},
"git": {
"removable": false,
"version": "1.7*"
},
"gawk": {
"version": "3.1*",
"removable": false
},
"iputils": {
"removable": false,
"version": "20071127*"
},
"mlocate": {
"removable": false,
"version": "0.22*"
},
"lsof": {
"removable": false,
"version": "4.82*"
},
"openssh-server": {
"removable": false,
"version": "5.3*"
},
"python-pep8": {
"removable": false,
"version": "0.6*",
"meta": {
"epel" : true
}
},
"psmisc": {
"removable": false,
"version": "22.6*"
},
"pylint": {
"removable": false,
"version": "0.21*",
"meta": {
"epel" : true
}
},
"python": {
"removable": false,
"version": "2.6*"
},
"python-pip": {
"removable": false,
"version": "0.8*",
"meta": {
"epel" : true
}
},
"python-unittest2": {
"removable": false,
"version": "0.5.1*",
"meta": {
"epel" : true
}
},
"python-virtualenv": {
"removable": false,
"version": "1.7*",
"meta": {
"epel" : true
}
},
"python-setuptools": {
"version": "0.6.10*",
"removable": false
},
"python-distutils-extra": {
"version": "2.29*",
"removable": false,
"meta": {
"epel" : true
}
},
"python-devel": {
"version": "2.6*",
"removable": false
},
"python-mox": {
"version": "0.5.3*",
"removable": false,
"meta": {
"epel" : true
}
},
"screen": {
"removable": false,
"version": "4.0.3*"
},
"sudo": {
"removable": false,
"version" : "1.7.4*"
},
"tcpdump": {
"removable": false,
"version": "4.0*"
},
"unzip": {
"removable": false,
"version": "6.0*"
},
"wget": {
"removable": false,
"version": "1.12*"
},
"libxml2-devel": {
"removable": false,
"version": "2.7*"
},
"libxslt-devel": {
"removable": false,
"version": "1.1.26*"
},
"coreutils": {
"removable": false,
"version": "8.4*"
}
},
"fedora-16": {
"curl": {
"removable": false,
"version": "7.21.7*"
},
"euca2ools": {
"removable": false,
"version": "1.3.1*"
},
"git": {
"removable": false,
"version": "1.7*"
},
"gawk": {
"version": "4.0*",
"removable": false
},
"iputils": {
"removable": false,
"version": "20101006*"
},
"mlocate": {
"removable": false,
"version": "0.24*"
},
"lsof": {
"removable": false,
"version": "4.84*"
},
"openssh-server": {
"removable": false,
"version": "5.8*"
},
"python-pep8": {
"removable": false,
"version": "0.6*"
},
"psmisc": {
"removable": false,
"version": "22.13*"
},
"pylint": {
"removable": false,
"version": "0.24*"
},
"python": {
"removable": false,
"version": "2.7*"
},
"python-coverage": {
"removable": false,
"version": "3.5*"
},
"python-pip": {
"removable": false,
"version": "0.8*"
},
"python-unittest2": {
"removable": false,
"version": "0.5.1*"
},
"python-virtualenv": {
"removable": false,
"version": "1.7*"
},
"python-setuptools": {
"version": "0.6.24*",
"removable": false
},
"python-distutils-extra": {
"version": "2.29*",
"removable": false
},
"python-devel": {
"version": "2.7*",
"removable": false
},
"python-nose": {
"version": "1.1*",
"removable": false
},
"python-mox": {
"version": "0.5.3*",
"removable": false
},
"screen": {
"removable": false,
"version": "4.1.0*"
},
"sudo": {
"removable": false,
"version" : "1.8.3*"
},
"tcpdump": {
"removable": false,
"version": "4.1*"
},
"unzip": {
"removable": false,
"version": "6.0*"
},
"wget": {
"removable": false,
"version": "1.12*"
},
"libxml2-devel": {
"removable": false,
"version": "2.7*"
},
"libxslt-devel": {
"removable": false,
"version": "1.1.26*"
}
}
}

View File

@ -1,97 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
"python-eventlet": {
"version": "0.9*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-greenlet": {
"version": "0.3.1*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-argparse": {
"version": "1.2*",
"removable": true,
"meta": {
"epel" : true
}
},
"MySQL-python": {
"version": "1.2*",
"removable": true
},
"python-dateutil": {
"version": "1.4*",
"removable": true
},
"python-prettytable": {
"version": "0.5*",
"removable": true,
"meta": {
"epel" : true
}
},
"pyxattr": {
"version": "0.5*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-httplib2": {
"version": "0.4*",
"removable": true,
"meta": {
"epel" : true
}
}
},
"fedora-16": {
"python-eventlet": {
"version": "0.9*",
"removable": true
},
"python-routes": {
"version": "1.12*",
"removable": true
},
"python-greenlet": {
"version": "0.3.1*",
"removable": true
},
"python-argparse": {
"version": "1.1*",
"removable": true
},
"MySQL-python": {
"version": "1.2*",
"removable": true
},
"python-dateutil": {
"version": "1.5*",
"removable": true
},
"python-prettytable": {
"version": "0.5*",
"removable": true
},
"pyxattr": {
"version": "0.5*",
"removable": true
},
"python-httplib2": {
"version": "0.6*",
"removable": true
}
}
}

View File

@ -1,124 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
"httpd": {
"version": "2.2*",
"removable": true
},
"mod_wsgi": {
"version": "3.2*",
"removable": true
},
"python-dateutil": {
"version": "1.4*",
"removable": true
},
"python-webob1.0": {
"version": "1.0*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-kombu": {
"version": "1.1*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-eventlet": {
"version": "0.9*",
"removable": true,
"meta": {
"epel" : true
}
}
},
# initially stolen from ubuntu-oneric
"fedora-16": {
"httpd": {
"version": "2.2*",
"removable": true
},
"mod_wsgi": {
"version": "3.3*",
"removable": true
},
"python-dateutil": {
"version": "1.5*",
"removable": true
},
"python-paste": {
"version": "1.7.5*",
"removable": true
},
"python-paste-deploy": {
"version": "1.5.0*",
"removable": true
},
"python-routes": {
"version": "1.12*",
"removable": true
},
"pyxattr": {
"version": "0.5*",
"removable": true
},
"python-sqlalchemy": {
"version": "0.7*",
"removable": true
},
"python-webob": {
"version": "1.0*",
"removable": true
},
"python-kombu": {
"version": "1.1*",
"removable": true
},
"python-eventlet": {
"version": "0.9*",
"removable": true
},
"python-sphinx": {
"version": "1.0.7*",
"removable": true
},
"python-cherrypy": {
"version": "3.2*",
"removable": true
},
"Django": {
"version": "1.3*",
"removable": true
},
# Not packaged yet
# "python-django-mailer": {
# "version": "0.2*",
# "removable": true
# },
# Is this really needed?
# Only in F16 testing repo
# "python-django-nose": {
# "version": "0.1*",
# "removable": true
# },
"django-registration": {
"version": "0.7*",
"removable": true
},
"python-cloudfiles": {
"version": "1.7.9*",
"removable": true
},
"python-migrate": {
"version": "0.7*",
"removable": true
}
}
}

View File

@ -1,33 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
"python-argparse": {
"version": "1.2*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-prettytable": {
"version": "0.5*",
"removable": true,
"meta": {
"epel" : true
}
}
},
"fedora-16": {
"python-prettytable": {
"version": "0.5*",
"removable": true
},
"python-argparse": {
"version": "1.1*",
"removable": true
}
}
}

View File

@ -1,165 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
"python-eventlet": {
"version": "0.9*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-lxml": {
# Trashes IPA client, which is probably bad
"version": "2.2*",
"removable": true
},
"python-prettytable": {
"version": "0.5*",
"removable": true,
"meta": {
"epel" : true
}
},
"MySQL-python": {
"version": "1.2*",
"removable": true
},
"sqlite": {
"version": "3.6*",
# Trashes alot of the base os (so we don't allow it to be removed)
"removable": false
},
"python-sqlite2": {
"version": "2.3*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-webob1.0": {
"version": "1.0*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-argparse": {
"version": "1.2*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-greenlet": {
"version": "0.3.1*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-passlib": {
"version": "1.5*",
"removable": true,
"meta": {
"epel" : true
}
},
"openldap": {
# Trashes alot of the base os (so we don't allow it to be removed)
"removable": false,
"version": "2.4*"
},
"openldap-devel": {
# Trashes alot of the base os (so we don't allow it to be removed)
"removable": true,
"version": "2.4*"
},
"cyrus-sasl-lib": {
# Trashes alot of the base os (so we don't allow it to be removed)
"version": "2.1*",
"removable": false
}
},
"fedora-16": {
"python-eventlet": {
"version": "0.9*",
"removable": true
},
"python-lxml": {
# Trashes IPA client, which is probably bad
"version": "2.3*",
"removable": true
},
"python-paste-script": {
"version": "1.7*",
"removable": true
},
"python-prettytable": {
"version": "0.5*",
"removable": true
},
"MySQL-python": {
"version": "1.2*",
"removable": true
},
"python-migrate": {
"version": "0.7*",
"removable": true
},
"python-paste-deploy": {
"version": "1.5*",
"removable": true
},
"python-paste": {
"version": "1.7.5*",
"removable": true
},
"sqlite": {
"version": "3.7*",
# Trashes alot of the base os (so we don't allow it to be removed)
"removable": false
},
"python-sqlite2": {
"version": "2.3*",
"removable": true
},
"python-sqlalchemy": {
"version": "0.7*",
"removable": true
},
"python-webob": {
"version": "1.0*",
"removable": true
},
"python-argparse": {
"version": "1.1*",
"removable": true
},
"python-greenlet": {
"version": "0.3.1*",
"removable": true
},
"python-routes": {
"version": "1.12*",
"removable": true
},
"python-passlib": {
"version": "1.5*",
"removable": true
},
"openldap": {
# Trashes alot of the base os (so we don't allow it to be removed)
"removable": false,
"version": "2.4*"
},
"cyrus-sasl-lib": {
# Trashes alot of the base os (so we don't allow it to be removed)
"version": "2.1*",
"removable": false
}
}
}

View File

@ -1,10 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
# TBD
}
}

View File

@ -1,19 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
"python-dateutil": {
"version": "1.4*",
"removable": false
}
},
"fedora-16": {
"python-dateutil": {
"version": "1.5*",
"removable": false
}
}
}

View File

@ -1,80 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
"lvm2": {
"version": "2.02*",
"removable": true
},
"iscsi-initiator-utils": {
"version": "6.2*",
"removable": true
},
# Only really needed if you plan on using kvm
"qemu-kvm": {
"version": "2*",
"removable": true
},
"qemu-img": {
"version": "0.12*",
"removable": true
},
"libvirt-python": {
"version": "0.9.4*",
"removable": true
},
"libvirt-client": {
"version": "0.9.4*",
"removable": true
},
"libvirt": {
"version": "0.9.4*",
"removable": true
},
"libguestfs": {
"version": "1.2.7*",
"removable": true
},
"libguestfs-mount": {
"version": "1.7.17*",
"removable": true
},
"libguestfs-tools": {
"version": "1.7.17*",
"removable": true
}
},
"fedora-16": {
"lvm2": {
"version": "2.02*",
"removable": true
},
"iscsi-initiator-utils": {
"version": "6.2*",
"removable": true
},
"qemu-system-x86": {
"version": "0.15*",
"removable": true
},
"qemu-img": {
"version": "0.15*",
"removable": true
},
"libvirt-python": {
"version": "0.9.6*",
"removable": true
},
"libvirt-client": {
"version": "0.9.6*",
"removable": true
},
"libvirt": {
"version": "0.9.6*",
"removable": true
}
}
}

View File

@ -1,15 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
},
"fedora-16": {
"numpy": {
"version": "1.6*",
"removable": true
}
}
}

View File

@ -1,35 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
"lvm2": {
"version": "2.02*",
"removable": true
},
"scsi-target-utils": {
"version": "1.0*",
"removable": true
},
"iscsi-initiator-utils": {
"version": "6.2*",
"removable": true
}
},
"fedora-16": {
"lvm2": {
"version": "2.02*",
"removable": true
},
"scsi-target-utils": {
"version": "1.0*",
"removable": true
},
"iscsi-initiator-utils": {
"version": "6.2*",
"removable": true
}
}
}

View File

@ -1,33 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
"python-argparse": {
"version": "1.2*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-prettytable": {
"version": "0.5*",
"removable": true,
"meta": {
"epel" : true
}
}
},
"fedora-16": {
"python-argparse": {
"version": "1.1*",
"removable": true
},
"python-prettytable": {
"version": "0.5*",
"removable": true
}
}
}

View File

@ -1,264 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
# For dhcp_release
"dnsmasq": {
"version": "2.48*",
"removable": false
},
"kpartx": {
"version": "0.4*",
"removable": false
},
"parted": {
"version": "2.1*",
"removable": false
},
"iputils": {
"version": "20071127*",
"removable": false
},
"MySQL-python": {
"version": "1.2*",
"removable": true
},
# Needed for glance which is needed for nova --- this shouldn't be here
"pyxattr": {
"version": "0.5*",
"removable": true,
"meta": {
"epel" : true
}
},
# Needed for glance which is needed for nova --- this shouldn't be here
"python-lxml": {
"version": "2.2*",
"removable": true
},
"iptables": {
"version": "1.4*",
"removable": false
},
"ebtables": {
"version": "2.0*",
"removable": false
},
"sqlite": {
"version": "3.6*",
# Trashes alot of the base os (so we don't allow it to be removed)
"removable": false
},
# ???
# "vlan": {
# "version": "1.9*",
# "removable": true
# },
# Used by ajaxterm
"socat": {
"version": "1.7*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-gflags": {
"version": "1.4*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-greenlet": {
"version": "0.3.1*",
"removable": true,
"meta": {
"epel" : true
}
},
# This seems to kill RHN/yum if its removed ??
"libxml2-python": {
"version": "2.7*",
"removable": false
},
"python-netaddr": {
"version": "0.7*",
"removable": true
},
"python-eventlet": {
"version": "0.9*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-cheetah": {
"version": "2.4*",
"removable": true
},
"python-carrot": {
"version": "0.10*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-suds": {
"version": "0.4*",
"removable": true
},
"python-lockfile": {
"version": "0.8*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-crypto": {
"version": "2.0*",
"removable": false
},
"python-boto": {
"version": "2.0*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-kombu": {
"version": "1.1*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-feedparser": {
"version": "5.0*",
"removable": true,
"meta": {
"epel" : true
}
}
},
"fedora-16": {
# For dhcp_release
"dnsmasq": {
"version": "2.58*",
"removable": false
},
"kpartx": {
"version": "0.4*",
"removable": false
},
"parted": {
"version": "3.0*",
"removable": false
},
"iputils": {
"version": "20101006*",
"removable": false
},
"MySQL-python": {
"version": "1.2*",
"removable": true
},
# Needed for glance which is needed for nova --- this shouldn't be here
"pyxattr": {
"version": "0.5*",
"removable": true
},
# Needed for glance which is needed for nova --- this shouldn't be here
"python-lxml": {
"version": "2.3*",
"removable": true
},
"iptables": {
"version": "1.4*",
"removable": false
},
"ebtables": {
"version": "2.0*",
"removable": false
},
"sqlite": {
"version": "3.7*",
# Trashes alot of the base os (so we don't allow it to be removed)
"removable": false
},
# ???
# "vconfig": {
# "version": "1.9*",
# "removable": true
# },
# Used by ajaxterm
"socat": {
"version": "1.7*",
"removable": true
},
"python-paste": {
"version": "1.7.5*",
"removable": true
},
"python-gflags": {
"version": "1.5*",
"removable": true
},
"python-greenlet": {
"version": "0.3.1*",
"removable": true
},
# This seems to kill RHN/yum if its removed ??
# TODO verify...libxml2-python dependencies do not include yum
"libxml2-python": {
"version": "2.7*",
"removable": false
},
"python-routes": {
"version": "1.12*",
"removable": true
},
"python-netaddr": {
"version": "0.7*",
"removable": true
},
"python-eventlet": {
"version": "0.9*",
"removable": true
},
"python-cheetah": {
"version": "2.4*",
"removable": true
},
"python-carrot": {
"version": "0.10*",
"removable": true
},
"python-suds": {
"version": "0.4*",
"removable": true
},
"python-lockfile": {
"version": "0.9*",
"removable": true
},
"python-crypto": {
"version": "2.3*",
"removable": false
},
"python-boto": {
"version": "2.0*",
"removable": true
},
"python-kombu": {
"version": "1.1*",
"removable": true
},
"python-feedparser": {
"version": "5.0*",
"removable": true
}
}
}

View File

@ -1,22 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
"python-gflags": {
"version": "1.4*",
"removable": true,
"meta": {
"epel" : true
}
}
},
"fedora-16": {
"python-gflags": {
"version": "1.5*",
"removable": true
}
}
}

View File

@ -1,20 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
},
"fedora-16": {
# Seems needed for ovs_quantum_agent.py
"python-sqlalchemy": {
"version": "0.7*",
"removable": true
},
"MySQL-python": {
"version": "1.2*",
"removable": true
}
}
}

View File

@ -1,64 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
"python-greenlet": {
"version": "0.3.1*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-eventlet": {
"version": "0.9*",
"removable": true,
"meta": {
"epel" : true
}
},
"python-gflags": {
"version": "1.4*",
"removable": true,
"meta": {
"epel" : true
}
},
"libxml2-python": {
"version": "2.7*",
"removable": false
}
},
"fedora-16": {
"python-eventlet": {
"version": "0.9*",
"removable": true
},
"python-greenlet": {
"version": "0.3*",
"removable": true
},
"python-routes": {
"version": "1.12*",
"removable": true
},
"python-lxml": {
"version": "2.3*",
"removable": true
},
"python-paste": {
"version": "1.7*",
"removable": true
},
"python-gflags": {
"version": "1.5*",
"removable": true
},
"python-paste-deploy": {
"version": "1.5*",
"removable": true
}
}
}

View File

@ -1,65 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
"rabbitmq-server": {
"version": "2.6*",
"removable": true,
"meta": {
"epel" : true
},
"pre-install": [
{
# Qpidd runs on the same port
"run_as_root": true,
"ignore_failure": true,
"cmd": [
"service",
"qpidd",
"stop"
]
},
{
# Qpidd runs on the same port
"run_as_root": true,
"ignore_failure": true,
"cmd": [
"chkconfig",
"qpidd",
"off"
]
}
]
}
},
"fedora-16": {
"rabbitmq-server": {
"version": "2.6*",
"removable": true,
"pre-install": [
{
# Qpidd runs on the same port
"run_as_root": true,
"cmd": [
"service",
"qpidd",
"stop"
]
},
{
# Qpidd runs on the same port
"run_as_root": true,
"ignore_failure": true,
"cmd": [
"chkconfig",
"qpidd",
"off"
]
}
]
}
}
}

View File

@ -1,49 +0,0 @@
# This is a extended json package definition file
# We allow simple comments (lines starting with a hash symbol)
{
# This is currently our "master" repo set with versions
# that we are trying to align other distributions with.
# The versions should at least be major + minor!
"rhel-6": {
# TBD
},
"fedora-16": {
# installed for swift-keystone
"memcached": {
"version": "1.4*",
"removable": true
},
"python-configobj": {
"version": "4.7*",
"removable": true
},
"python-eventlet": {
"version": "0.9*",
"removable": true
},
"python-greenlet": {
"version": "0.3*",
"removable": true
},
"python-netifaces": {
"version": "0.5*",
"removable": true
},
"python-paste-deploy": {
"version": "1.5*",
"removable": true
},
"python-webob": {
"version": "1.0*",
"removable": true
},
"pyxattr": {
"version": "0.5*",
"removable": true
},
"sqlite": {
"version": "3.7*",
"removable": true
}
}
}

View File

@ -22,6 +22,7 @@ from devstack import date
from devstack import env
from devstack import exceptions as excp
from devstack import log as logging
from devstack import settings
from devstack import shell as sh
from devstack import utils
@ -31,6 +32,16 @@ SUB_MATCH = re.compile(r"(?:\$\(([\w\d]+):([\w\d]+))\)")
CACHE_MSG = "(value will now be internally cached)"
def get_config(cfg_fn=None, cfg_cls=None):
if not cfg_fn:
cfg_fn = sh.canon_path(settings.STACK_CONFIG_LOCATION)
if not cfg_cls:
cfg_cls = StackConfigParser
config_instance = cfg_cls()
config_instance.read(cfg_fn)
return config_instance
class IgnoreMissingConfigParser(ConfigParser.RawConfigParser):
DEF_INT = 0
DEF_FLOAT = 0.0

View File

@ -30,7 +30,5 @@ def make_id(section, option):
def fetch_run_type(config):
run_type = config.getdefaulted("default", "run_type",
settings.RUN_TYPE_DEF)
run_type = run_type.upper()
return run_type
run_type = config.getdefaulted("default", "run_type", settings.RUN_TYPE_DEF)
return run_type.upper()

View File

@ -32,93 +32,106 @@ from devstack.runners import screen
LOG = logging.getLogger("devstack.component")
#how we actually setup and unsetup python
# How we actually setup and unsetup python
PY_INSTALL = ['python', 'setup.py', 'develop']
PY_UNINSTALL = ['python', 'setup.py', 'develop', '--uninstall']
#runtime status constants (return by runtime status)
# Runtime status constants (return by runtime status)
# TODO: move...
STATUS_UNKNOWN = "unknown"
STATUS_STARTED = "started"
STATUS_STOPPED = "stopped"
#which run types to which runner class
# Which run types to which runner class
RUNNER_CLS_MAPPING = {
settings.RUN_TYPE_FORK: fork.ForkRunner,
settings.RUN_TYPE_UPSTART: upstart.UpstartRunner,
settings.RUN_TYPE_SCREEN: screen.ScreenRunner,
}
#where symlinks will go
# Where symlinks will go
BASE_LINK_DIR = "/etc"
class ComponentBase(object):
def __init__(self, component_name, runner,
root_dir, component_options,
instances=None,
**kwds):
self.component_name = component_name
def __init__(self,
desired_subsystems,
subsystem_info,
runner,
component_dir,
all_instances,
name,
*args,
**kargs):
self.desired_subsystems = desired_subsystems
self.instances = all_instances
self.component_name = name
self.subsystem_info = subsystem_info
# The runner has a reference to us, so use a weakref here to
# avoid breaking garbage collection.
self.runner = weakref.proxy(runner)
self.root = root_dir
self.component_opts = component_options or {}
self.instances = instances or {}
# Parts of the global runner context that we use
self.cfg = runner.cfg
self.pw_gen = runner.pw_gen
self.packager = runner.pkg_manager
self.distro = runner.distro
self.component_root = sh.joinpths(self.root, component_name)
self.tracedir = sh.joinpths(self.component_root,
# Required component directories
self.component_dir = component_dir
self.trace_dir = sh.joinpths(self.component_dir,
settings.COMPONENT_TRACE_DIR)
self.appdir = sh.joinpths(self.component_root,
self.app_dir = sh.joinpths(self.component_dir,
settings.COMPONENT_APP_DIR)
self.cfgdir = sh.joinpths(self.component_root,
self.cfg_dir = sh.joinpths(self.component_dir,
settings.COMPONENT_CONFIG_DIR)
self.kargs = kwds
def get_dependencies(self):
return self.runner.distro.components[self.component_name].get('dependencies', [])[:]
def verify(self):
pass
# Ensure subsystems are known...
knowns = self.known_subsystems()
for s in self.desired_subsystems:
if s not in knowns:
raise RuntimeError("Unknown subsystem %r requested" % (s))
for s in self.subsystem_info.keys():
if s not in knowns:
raise RuntimeError("Unknown subsystem %r provided" % (s))
def known_subsystems(self):
return list()
def warm_configs(self):
pass
def is_started(self):
reader = tr.TraceReader(tr.trace_fn(self.tracedir, tr.START_TRACE))
reader = tr.TraceReader(tr.trace_fn(self.trace_dir, tr.START_TRACE))
return reader.exists()
def is_installed(self):
return tr.TraceReader(tr.trace_fn(self.tracedir, tr.IN_TRACE)).exists()
return tr.TraceReader(tr.trace_fn(self.trace_dir, tr.IN_TRACE)).exists()
class PkgInstallComponent(ComponentBase):
def __init__(self, component_name, **kargs):
ComponentBase.__init__(self, component_name, **kargs)
self.tracewriter = tr.TraceWriter(tr.trace_fn(self.tracedir,
tr.IN_TRACE)
)
def __init__(self, *args, **kargs):
ComponentBase.__init__(self, *args, **kargs)
self.tracewriter = tr.TraceWriter(tr.trace_fn(self.trace_dir,
tr.IN_TRACE))
self.packages = kargs.get('packages', list())
def _get_download_locations(self):
return list()
def download(self):
locations = self._get_download_locations()
base_dir = self.appdir
base_dir = self.app_dir
for location_info in locations:
uri_tuple = location_info["uri"]
branch_tuple = location_info.get("branch")
subdir = location_info.get("subdir")
sub_dir = location_info.get("subdir")
target_loc = base_dir
if subdir:
target_loc = sh.joinpths(base_dir, subdir)
if sub_dir:
target_loc = sh.joinpths(base_dir, sub_dir)
branch = None
if branch_tuple:
(cfg_section, cfg_key) = branch_tuple
@ -135,11 +148,11 @@ class PkgInstallComponent(ComponentBase):
raise excp.ConfigException(msg)
self.tracewriter.download_happened(target_loc, uri)
dirs_made = down.download(target_loc, uri, branch)
#ensure this is always added so that
#if a keep old happens then this of course
#won't be recreated, but if u uninstall without keeping old
#then this won't be deleted this time around
#adding it in is harmless and willl make sure its removed
# Here we ensure this is always added so that
# if a keep old happens then this of course
# won't be recreated, but if u uninstall without keeping old
# then this won't be deleted this time around
# adding it in is harmless and will make sure its removed.
dirs_made.append(target_loc)
self.tracewriter.dirs_made(*dirs_made)
return len(locations)
@ -147,33 +160,39 @@ class PkgInstallComponent(ComponentBase):
def _get_param_map(self, config_fn):
return dict()
def _get_packages(self):
pkg_list = list(self.packages)
for name in self.desired_subsystems:
if name in self.subsystem_info:
# Todo handle duplicates/version differences?
LOG.debug("Extending package list with packages for subsystem %s" % (name))
subsystem_pkgs = self.subsystem_info[name].get('packages', list())
pkg_list.extend(subsystem_pkgs)
return pkg_list
def install(self):
LOG.debug('Preparing to install packages for %s',
self.component_name)
pkgs = self.component_opts.get('packages', [])
pkgs = self._get_packages()
if pkgs:
pkgnames = sorted([p['name'] for p in pkgs])
LOG.info("Installing packages (%s).", ", ".join(pkgnames))
# FIXME: We should only record the packages we actually
# install without error.
#do this before install just incase it craps out half way through
for pkg in pkgs:
self.tracewriter.package_installed(p['name'], pkg)
#now actually install
self.packager.install_batch(pkgs)
pkg_names = set([p['name'] for p in pkgs])
LOG.info("Setting up %s packages (%s)" % (len(pkg_names), ", ".join(pkg_names)))
for p in pkgs:
self.tracewriter.package_installed(p)
self.packager.install(p)
else:
LOG.info('No packages to install for %s',
self.component_name)
return self.tracedir
return self.trace_dir
def pre_install(self):
pkgs = self.component_opts.get('packages', [])
pkgs = self._get_packages()
if pkgs:
mp = self._get_param_map(None)
self.packager.pre_install(pkgs, mp)
def post_install(self):
pkgs = self.component_opts.get('packages', [])
pkgs = self._get_packages()
if pkgs:
mp = self._get_param_map(None)
self.packager.post_install(pkgs, mp)
@ -185,7 +204,7 @@ class PkgInstallComponent(ComponentBase):
return contents
def _get_target_config_name(self, config_fn):
return sh.joinpths(self.cfgdir, config_fn)
return sh.joinpths(self.cfg_dir, config_fn)
def _get_source_config(self, config_fn):
return utils.load_template(self.component_name, config_fn)
@ -205,13 +224,9 @@ class PkgInstallComponent(ComponentBase):
if configs:
LOG.info("Configuring %s files", len(configs))
for fn in configs:
#get the params and where it should come from and
#where it should go
parameters = self._get_param_map(fn)
tgt_fn = self._get_target_config_name(fn)
#ensure directory is there (if not created previously)
self.tracewriter.dirs_made(*sh.mkdirslist(sh.dirname(tgt_fn)))
#now configure it
LOG.info("Configuring file %s", fn)
(source_fn, contents) = self._get_source_config(fn)
LOG.debug("Replacing parameters in file %s", source_fn)
@ -220,13 +235,15 @@ class PkgInstallComponent(ComponentBase):
LOG.debug("Applying side-effects of param replacement for template %s", source_fn)
contents = self._config_adjust(contents, fn)
LOG.info("Writing configuration file %s", tgt_fn)
#this trace is used to remove the files configured
self.tracewriter.cfg_file_written(sh.write_file(tgt_fn,
contents))
return len(configs)
def _configure_symlinks(self):
links = self._get_symlinks()
# This sort happens so that we link in the correct order
# although it might not matter. Either way. We ensure that the right
# order happens. Ie /etc/blah link runs before /etc/blah/blah
link_srcs = sorted(links.keys())
link_srcs.reverse()
for source in link_srcs:
@ -235,8 +252,8 @@ class PkgInstallComponent(ComponentBase):
LOG.info("Symlinking %s => %s", link, source)
self.tracewriter.dirs_made(*sh.symlink(source, link))
self.tracewriter.symlink_made(link)
except OSError:
LOG.warn("Symlink %s => %s already exists.", link, source)
except OSError as e:
LOG.warn("Symlink (%s => %s) error (%s)", link, source, e)
return len(links)
def configure(self):
@ -246,26 +263,33 @@ class PkgInstallComponent(ComponentBase):
class PythonInstallComponent(PkgInstallComponent):
def __init__(self, component_name, *args, **kargs):
PkgInstallComponent.__init__(self, component_name, *args, **kargs)
def __init__(self, *args, **kargs):
PkgInstallComponent.__init__(self, *args, **kargs)
self.pips = kargs.get('pips', list())
def _get_python_directories(self):
py_dirs = dict()
py_dirs[self.component_name] = self.appdir
py_dirs[self.component_name] = self.app_dir
return py_dirs
def _get_pips(self):
pip_list = list(self.pips)
for name in self.desired_subsystems:
if name in self.subsystem_info:
# Todo handle duplicates/version differences?
LOG.debug("Extending pip list with pips for subsystem %s" % (name))
subsystem_pips = self.subsystem_info[name].get('pips', list())
pip_list.extend(subsystem_pips)
return pip_list
def _install_pips(self):
pips = dict((p['name'], p)
for p in self.component_opts.get('pips', [])
)
pips = self._get_pips()
if pips:
LOG.info("Setting up %s pips (%s)",
len(pips), ", ".join(pips.keys()))
#do this before install just incase it craps out half way through
for name in pips.keys():
self.tracewriter.pip_installed(name, pips.get(name))
#now install
pip.install(pips, self.distro)
pip_names = set([p['name'] for p in pips])
LOG.info("Setting up %s pips (%s)", len(pip_names), ", ".join(pip_names))
for p in pips:
self.tracewriter.pip_installed(p)
pip.install(p, self.distro)
def _install_python_setups(self):
pydirs = self._get_python_directories()
@ -273,18 +297,17 @@ class PythonInstallComponent(PkgInstallComponent):
LOG.info("Setting up %s python directories (%s)",
len(pydirs), pydirs)
for (name, wkdir) in pydirs.items():
working_dir = wkdir or self.appdir
#ensure working dir is there
working_dir = wkdir or self.app_dir
self.tracewriter.dirs_made(*sh.mkdirslist(working_dir))
#do this before write just incase it craps out half way through
self.tracewriter.py_installed(name, working_dir)
#now actually do it
(stdout, stderr) = sh.execute(*PY_INSTALL,
cwd=working_dir,
run_as_root=True)
py_trace_name = "%s-%s" % (tr.PY_TRACE, name)
py_writer = tr.TraceWriter(tr.trace_fn(self.tracedir,
py_writer = tr.TraceWriter(tr.trace_fn(self.trace_dir,
py_trace_name))
# Format or json encoding isn't really needed here since this is
# more just for information output/lookup if desired.
py_writer.trace("CMD", " ".join(PY_INSTALL))
py_writer.trace("STDOUT", stdout)
py_writer.trace("STDERR", stderr)
@ -301,18 +324,18 @@ class PythonInstallComponent(PkgInstallComponent):
class PkgUninstallComponent(ComponentBase):
def __init__(self, component_name, keep_old=None, **kargs):
ComponentBase.__init__(self, component_name, **kargs)
self.tracereader = tr.TraceReader(tr.trace_fn(self.tracedir,
def __init__(self, *args, **kargs):
ComponentBase.__init__(self, *args, **kargs)
self.tracereader = tr.TraceReader(tr.trace_fn(self.trace_dir,
tr.IN_TRACE))
self.keep_old = keep_old
self.keep_old = kargs.get('keep_old')
def unconfigure(self):
if not self.keep_old:
#TODO this may not be the best solution siance we might
#actually want to remove config files but since most
#config files can be regenerated this should be fine (some
#can not though) so this is why we need to keep them
# TODO this may not be the best solution siance we might
# actually want to remove config files but since most
# config files can be regenerated this should be fine (some
# can not though) so this is why we need to keep them.
self._unconfigure_files()
self._unconfigure_links()
self._unconfigure_runners()
@ -321,7 +344,7 @@ class PkgUninstallComponent(ComponentBase):
if RUNNER_CLS_MAPPING:
LOG.info("Unconfiguring %s runners.", len(RUNNER_CLS_MAPPING))
for (_, cls) in RUNNER_CLS_MAPPING.items():
instance = cls(self.cfg, self.component_name, self.tracedir)
instance = cls(self.cfg, self.component_name, self.trace_dir)
instance.unconfigure()
def _unconfigure_links(self):
@ -354,11 +377,11 @@ class PkgUninstallComponent(ComponentBase):
def _uninstall_pkgs(self):
pkgsfull = self.tracereader.packages_installed()
if pkgsfull:
LOG.info("Potentially removing %s packages (%s)",
len(pkgsfull), ", ".join(sorted(pkgsfull.keys())))
LOG.info("Potentially removing %s packages",
len(pkgsfull))
which_removed = self.packager.remove_batch(pkgsfull)
LOG.info("Actually removed %s packages (%s)",
len(which_removed), ", ".join(sorted(which_removed)))
len(which_removed), ", ".join(which_removed))
def _uninstall_touched_files(self):
filestouched = self.tracereader.files_touched()
@ -386,8 +409,8 @@ class PkgUninstallComponent(ComponentBase):
class PythonUninstallComponent(PkgUninstallComponent):
def __init__(self, component_name, *args, **kargs):
PkgUninstallComponent.__init__(self, component_name, *args, **kargs)
def __init__(self, *args, **kargs):
PkgUninstallComponent.__init__(self, *args, **kargs)
def uninstall(self):
self._uninstall_python()
@ -397,8 +420,9 @@ class PythonUninstallComponent(PkgUninstallComponent):
def _uninstall_pips(self):
pips = self.tracereader.pips_installed()
if pips:
LOG.info("Uninstalling %s pips.", len(pips))
pip.uninstall(pips, self.distro)
names = set([p['name'] for p in pips])
LOG.info("Uninstalling %s python packages (%s)" % (len(names), ", ".join(names)))
pip.uninstall_batch(pips, self.distro)
def _uninstall_python(self):
pylisting = self.tracereader.py_listing()
@ -409,10 +433,10 @@ class PythonUninstallComponent(PkgUninstallComponent):
class ProgramRuntime(ComponentBase):
def __init__(self, component_name, **kargs):
ComponentBase.__init__(self, component_name, **kargs)
self.tracewriter = tr.TraceWriter(tr.trace_fn(self.tracedir, tr.START_TRACE))
self.tracereader = tr.TraceReader(tr.trace_fn(self.tracedir, tr.START_TRACE))
def __init__(self, *args, **kargs):
ComponentBase.__init__(self, *args, **kargs)
self.tracewriter = tr.TraceWriter(tr.trace_fn(self.trace_dir, tr.START_TRACE))
self.tracereader = tr.TraceReader(tr.trace_fn(self.trace_dir, tr.START_TRACE))
def _get_apps_to_start(self):
return list()
@ -422,7 +446,7 @@ class ProgramRuntime(ComponentBase):
def _get_param_map(self, app_name):
return {
'ROOT': self.appdir,
'ROOT': self.app_dir,
}
def pre_start(self):
@ -435,12 +459,12 @@ class ProgramRuntime(ComponentBase):
# First make a pass and make sure all runtime (e.g. upstart)
# config files are in place....
cls = RUNNER_CLS_MAPPING[cfg_helpers.fetch_run_type(self.cfg)]
instance = cls(self.cfg, self.component_name, self.tracedir)
instance = cls(self.cfg, self.component_name, self.trace_dir)
tot_am = 0
for app_info in self._get_apps_to_start():
app_name = app_info["name"]
app_pth = app_info.get("path", app_name)
app_dir = app_info.get("app_dir", self.appdir)
app_dir = app_info.get("app_dir", self.app_dir)
# Adjust the program options now that we have real locations
program_opts = utils.param_replace_list(
self._get_app_options(app_name),
@ -458,12 +482,12 @@ class ProgramRuntime(ComponentBase):
def start(self):
# Select how we are going to start it
cls = RUNNER_CLS_MAPPING[cfg_helpers.fetch_run_type(self.cfg)]
instance = cls(self.cfg, self.component_name, self.tracedir)
instance = cls(self.cfg, self.component_name, self.trace_dir)
am_started = 0
for app_info in self._get_apps_to_start():
app_name = app_info["name"]
app_pth = app_info.get("path", app_name)
app_dir = app_info.get("app_dir", self.appdir)
app_dir = app_info.get("app_dir", self.app_dir)
# Adjust the program options now that we have real locations
program_opts = utils.param_replace_list(
self._get_app_options(app_name),
@ -500,7 +524,7 @@ class ProgramRuntime(ComponentBase):
else:
killer = killcls(self.cfg,
self.component_name,
self.tracedir,
self.trace_dir,
)
killer_instances[killcls] = killer
to_kill.append((app_name, killer))
@ -526,14 +550,14 @@ class ProgramRuntime(ComponentBase):
class PythonRuntime(ProgramRuntime):
def __init__(self, component_name, *args, **kargs):
ProgramRuntime.__init__(self, component_name, *args, **kargs)
def __init__(self, *args, **kargs):
ProgramRuntime.__init__(self, *args, **kargs)
class EmptyRuntime(ComponentBase):
def __init__(self, component_name, **kargs):
ComponentBase.__init__(self, component_name, **kargs)
self.tracereader = tr.TraceReader(tr.trace_fn(self.tracedir, tr.IN_TRACE))
def __init__(self, *args, **kargs):
ComponentBase.__init__(self, *args, **kargs)
self.tracereader = tr.TraceReader(tr.trace_fn(self.trace_dir, tr.IN_TRACE))
def configure(self):
return 0

View File

@ -21,29 +21,25 @@ from devstack import settings
from devstack import shell as sh
from devstack import utils
#id
# FIXME: This should probably come from the persona
TYPE = settings.DB
LOG = logging.getLogger("devstack.components.db")
#used for special setups
MYSQL = 'mysql'
# How long we wait before using the database after a restart
START_WAIT_TIME = settings.WAIT_ALIVE_SECS
#need to reset pw to blank since this distributions don't seem to
#always reset it when u uninstall the db
# Need to reset pw to blank since this distributions don't seem to
# always reset it when u uninstall the db
RESET_BASE_PW = ''
#links about how to reset if it fails
# Links about how to reset if we fail to set the PW
SQL_RESET_PW_LINKS = [
'https://help.ubuntu.com/community/MysqlPasswordReset',
'http://dev.mysql.com/doc/refman/5.0/en/resetting-permissions.html',
]
#used as a generic error message
# Used as a generic error message
BASE_ERROR = 'Currently we do not know how to [%s] for database type [%s]'
#config keys we warm up so u won't be prompted later
# PW keys we warm up so u won't be prompted later
PASSWORD_PROMPT = 'the database user'
WARMUP_PWS = [('sql', PASSWORD_PROMPT)]
@ -59,7 +55,7 @@ class DBUninstaller(comp.PkgUninstallComponent):
def pre_uninstall(self):
dbtype = self.cfg.get("db", "type")
dbactions = self.distro.commands[dbtype]
dbactions = self.distro.get_command(dbtype, quiet=True)
try:
if dbactions:
LOG.info(("Attempting to reset your db password to \"%s\" so"
@ -87,8 +83,8 @@ class DBInstaller(comp.PkgInstallComponent):
self.runtime = DBRuntime(*args, **kargs)
def _get_param_map(self, config_fn):
#this dictionary will be used for parameter replacement
#in pre-install and post-install sections
# This dictionary will be used for parameter replacement
# In pre-install and post-install sections
host_ip = self.cfg.get('host', 'ip')
out = {
'PASSWORD': self.pw_gen.get_password("sql", PASSWORD_PROMPT),
@ -104,38 +100,19 @@ class DBInstaller(comp.PkgInstallComponent):
self.pw_gen.get_password(key, prompt)
def _configure_db_confs(self):
dbtype = self.cfg.get("db", "type")
#TODO: use separate classes in devstack.distros.$distro.db and
# specify them in the yaml file
if self.distro.name == settings.RHEL6 and dbtype == MYSQL:
LOG.info("Fixing up %s mysql configs." % (settings.RHEL6))
fc = sh.load_file('/etc/my.cnf')
lines = fc.splitlines()
new_lines = list()
for line in lines:
if line.startswith('skip-grant-tables'):
line = '#' + line
new_lines.append(line)
fc = utils.joinlinesep(*new_lines)
with sh.Rooted(True):
sh.write_file('/etc/my.cnf', fc)
else:
raise NotImplementedError(
'Do not know how to configure db confs for %s' %
self.distro.name
)
pass
def post_install(self):
comp.PkgInstallComponent.post_install(self)
#fix up the db configs
# Fix up the db configs
self._configure_db_confs()
#extra actions to ensure we are granted access
# Extra actions to ensure we are granted access
dbtype = self.cfg.get("db", "type")
dbactions = self.distro.commands[dbtype]
dbactions = self.distro.get_command(dbtype, quiet=True)
#set your password
# Set your password
try:
if dbactions:
pwd_cmd = dbactions.get('set_pwd')
@ -155,7 +132,7 @@ class DBInstaller(comp.PkgInstallComponent):
LOG.warn(("Couldn't set your db password. It might have already been "
"set by a previous process."))
#ensure access granted
# Ensure access granted
if dbactions:
grant_cmd = dbactions.get('grant_all')
if grant_cmd:
@ -168,8 +145,8 @@ class DBInstaller(comp.PkgInstallComponent):
'USER': user,
}
cmds = [{'cmd': grant_cmd}]
#shell seems to be needed here
#since python escapes this to much...
# Shell seems to be needed here
# since python escapes this to much...
utils.execute_template(*cmds, params=params, shell=True)
@ -179,11 +156,7 @@ class DBRuntime(comp.EmptyRuntime):
def _get_run_actions(self, act, exception_cls):
dbtype = self.cfg.get("db", "type")
type_actions = self.distro.commands[dbtype]
if type_actions is None:
msg = BASE_ERROR % (act, dbtype)
raise NotImplementedError(msg)
distro_options = self.distro.commands[dbtype]
distro_options = self.distro.get_command(dbtype)
if distro_options is None:
msg = BASE_ERROR % (act, dbtype)
raise NotImplementedError(msg)
@ -241,7 +214,7 @@ class DBRuntime(comp.EmptyRuntime):
def drop_db(cfg, pw_gen, distro, dbname):
dbtype = cfg.get("db", "type")
dbactions = distro.commands[dbtype]
dbactions = distro.get_command(dbtype)
if dbactions and dbactions.get('drop_db'):
dropcmd = dbactions.get('drop_db')
params = dict()
@ -261,7 +234,7 @@ def drop_db(cfg, pw_gen, distro, dbname):
def create_db(cfg, pw_gen, distro, dbname):
dbtype = cfg.get("db", "type")
dbactions = distro.commands[dbtype]
dbactions = distro.get_command(dbtype)
if dbactions and dbactions.get('create_db'):
createcmd = dbactions.get('create_db')
params = dict()

View File

@ -27,11 +27,9 @@ from devstack.components import keystone
from devstack.image import creator
#id
TYPE = settings.GLANCE
LOG = logging.getLogger("devstack.components.glance")
#config files/sections
# Config files/sections
API_CONF = "glance-api.conf"
REG_CONF = "glance-registry.conf"
API_PASTE_CONF = 'glance-api-paste.ini'
@ -47,33 +45,32 @@ CONFIGS = [API_CONF, REG_CONF, API_PASTE_CONF,
READ_CONFIGS = [API_CONF, REG_CONF, API_PASTE_CONF,
REG_PASTE_CONF, SCRUB_CONF, SCRUB_PASTE_CONF]
#reg, api are here as possible subcomponents
# Reg, api, scrub are here as possible subsystems
GAPI = "api"
GREG = "reg"
GSCR = 'scrub'
#this db will be dropped and created
# This db will be dropped and created
DB_NAME = "glance"
#special subcomponents/options that are used in starting to know that images should be uploaded
NO_IMG_START = "no-image-upload"
# How long to wait before attempting image upload
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
#what to start
# What applications to start
APP_OPTIONS = {
'glance-api': ['--config-file', sh.joinpths('%ROOT%', "etc", API_CONF)],
'glance-registry': ['--config-file', sh.joinpths('%ROOT%', "etc", REG_CONF)],
'glance-scrubber': ['--config-file', sh.joinpths('%ROOT%', "etc", REG_CONF)],
}
#how the subcompoent small name translates to an actual app
# How the subcompoent small name translates to an actual app
SUB_TO_APP = {
GAPI: 'glance-api',
GREG: 'glance-registry',
GSCR: 'glance-scrubber',
}
#subdirs of the downloaded
# Subdirs of the downloaded (we are overriding the original)
CONFIG_DIR = 'etc'
BIN_DIR = 'bin'
@ -81,13 +78,16 @@ BIN_DIR = 'bin'
class GlanceUninstaller(comp.PythonUninstallComponent):
def __init__(self, *args, **kargs):
comp.PythonUninstallComponent.__init__(self, *args, **kargs)
self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
def known_subsystems(self):
return SUB_TO_APP.keys()
class GlanceInstaller(comp.PythonInstallComponent):
def __init__(self, *args, **kargs):
comp.PythonInstallComponent.__init__(self, *args, **kargs)
self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
def _get_download_locations(self):
places = list()
@ -97,6 +97,9 @@ class GlanceInstaller(comp.PythonInstallComponent):
})
return places
def known_subsystems(self):
return SUB_TO_APP.keys()
def _get_config_files(self):
return list(CONFIGS)
@ -111,22 +114,22 @@ class GlanceInstaller(comp.PythonInstallComponent):
def _get_source_config(self, config_fn):
if config_fn == POLICY_JSON:
fn = sh.joinpths(self.cfgdir, POLICY_JSON)
fn = sh.joinpths(self.cfg_dir, POLICY_JSON)
contents = sh.load_file(fn)
return (fn, contents)
elif config_fn == LOGGING_CONF:
fn = sh.joinpths(self.cfgdir, LOGGING_SOURCE_FN)
fn = sh.joinpths(self.cfg_dir, LOGGING_SOURCE_FN)
contents = sh.load_file(fn)
return (fn, contents)
return comp.PythonInstallComponent._get_source_config(self, config_fn)
def _config_adjust(self, contents, name):
#even bother opening??
# Even bother opening??
if name not in READ_CONFIGS:
return contents
#use config parser and
#then extract known configs that
#will need locations/directories/files made (or touched)...
# Use config parser and
# then extract known configs that
# will need locations/directories/files made (or touched)...
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
config.readfp(stream)
@ -135,15 +138,15 @@ class GlanceInstaller(comp.PythonInstallComponent):
if cache_dir:
LOG.info("Ensuring image cache data directory %s exists "\
"(and is empty)" % (cache_dir))
#destroy then recreate the image cache directory
# Destroy then recreate the image cache directory
sh.deldir(cache_dir)
self.tracewriter.dirs_made(*sh.mkdirslist(cache_dir))
if config.get('default', 'default_store') == 'file':
file_dir = config.get('default', 'filesystem_store_datadir')
if file_dir:
LOG.info("Ensuring file system store directory %s exists and is empty." % (file_dir))
#delete existing images
#and recreate the image directory
# Delete existing images
# and recreate the image directory
sh.deldir(file_dir)
self.tracewriter.dirs_made(*sh.mkdirslist(file_dir))
log_filename = config.get('default', 'log_file')
@ -153,24 +156,24 @@ class GlanceInstaller(comp.PythonInstallComponent):
if log_dir:
LOG.info("Ensuring log directory %s exists." % (log_dir))
self.tracewriter.dirs_made(*sh.mkdirslist(log_dir))
#destroy then recreate it (the log file)
# Destroy then recreate it (the log file)
sh.unlink(log_filename)
self.tracewriter.file_touched(sh.touch_file(log_filename))
if config.getboolean('default', 'delayed_delete'):
data_dir = config.get('default', 'scrubber_datadir')
if data_dir:
LOG.info("Ensuring scrubber data dir %s exists and is empty." % (data_dir))
#destroy then recreate the scrubber data directory
# Destroy then recreate the scrubber data directory
sh.deldir(data_dir)
self.tracewriter.dirs_made(*sh.mkdirslist(data_dir))
#nothing modified so just return the original
# Nothing modified so just return the original
return contents
def _get_param_map(self, config_fn):
#this dict will be used to fill in the configuration
#params with actual values
# This dict will be used to fill in the configuration
# params with actual values
mp = dict()
mp['DEST'] = self.appdir
mp['DEST'] = self.app_dir
mp['SYSLOG'] = self.cfg.getboolean("default", "syslog")
mp['SQL_CONN'] = db.fetch_dbdsn(self.cfg, self.pw_gen, DB_NAME)
mp['SERVICE_HOST'] = self.cfg.get('host', 'ip')
@ -182,14 +185,19 @@ class GlanceInstaller(comp.PythonInstallComponent):
class GlanceRuntime(comp.PythonRuntime):
def __init__(self, *args, **kargs):
comp.PythonRuntime.__init__(self, *args, **kargs)
self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
def known_subsystems(self):
return SUB_TO_APP.keys()
def _get_apps_to_start(self):
apps = [{'name': app_name,
'path': sh.joinpths(self.appdir, BIN_DIR, app_name),
}
for app_name in APP_OPTIONS.keys()
]
apps = list()
for subsys in self.desired_subsystems:
app = dict()
app['name'] = SUB_TO_APP[subsys]
app['path'] = sh.joinpths(self.bin_dir, app['name'])
apps.append(app)
return apps
def _get_app_options(self, app):
@ -197,7 +205,7 @@ class GlanceRuntime(comp.PythonRuntime):
def post_start(self):
comp.PythonRuntime.post_start(self)
#install any images that need activating...
# Install any images that need activating...
# TODO: make this less cheesy - need to wait till glance goes online
LOG.info("Waiting %s seconds so that glance can start up before image install." % (WAIT_ONLINE_TO))
sh.sleep(WAIT_ONLINE_TO)

View File

@ -21,50 +21,37 @@ from devstack import settings
from devstack import shell as sh
from devstack import utils
#id
TYPE = settings.HORIZON
LOG = logging.getLogger("devstack.components.horizon")
#actual dir names
# Actual dir names
ROOT_HORIZON = 'horizon'
ROOT_DASH = 'openstack_dashboard'
#name used for python install trace
# Name used for python install trace
HORIZON_NAME = ROOT_HORIZON
DASH_NAME = 'dashboard'
#config files messed with
# Config files messed with
HORIZON_PY_CONF = "horizon_settings.py"
HORIZON_PY_CONF_TGT = ['local', 'local_settings.py']
HORIZON_APACHE_CONF = '000-default'
CONFIGS = [HORIZON_PY_CONF, HORIZON_APACHE_CONF]
#http://wiki.apache.org/httpd/DistrosDefaultLayout
#db sync that needs to happen for horizon
# DB sync that needs to happen for horizon
DB_SYNC_CMD = ['python', 'manage.py', 'syncdb']
#special apache directory (TODO describe more about this)
# Special apache directory (TODO describe more about this)
BLACKHOLE_DIR = '.blackhole'
# Other apache settings
APACHE_ERROR_LOG_FN = "error.log"
APACHE_ACCESS_LOG_FN = "access.log"
APACHE_DEF_PORT = 80
#TODO: maybe this should be a subclass that handles these differences
APACHE_FIXUPS = {
'SOCKET_CONF': "/etc/httpd/conf.d/wsgi-socket-prefix.conf",
'HTTPD_CONF': '/etc/httpd/conf/httpd.conf',
}
APACHE_FIXUPS_DISTROS = [settings.RHEL6, settings.FEDORA16]
#for when quantum client is not need we need some fake files so python doesn't croak
FAKE_QUANTUM_FILES = ['__init__.py', 'client.py']
#users which apache may not like starting as
# Users which apache may not like starting as..
BAD_APACHE_USERS = ['root']
#apache logs will go here
# Apache logs will go here
LOGS_DIR = "logs"
@ -76,9 +63,9 @@ class HorizonUninstaller(comp.PythonUninstallComponent):
class HorizonInstaller(comp.PythonInstallComponent):
def __init__(self, *args, **kargs):
comp.PythonInstallComponent.__init__(self, *args, **kargs)
self.horizon_dir = sh.joinpths(self.appdir, ROOT_HORIZON)
self.dash_dir = sh.joinpths(self.appdir, ROOT_DASH)
self.log_dir = sh.joinpths(self.component_root, LOGS_DIR)
self.horizon_dir = sh.joinpths(self.app_dir, ROOT_HORIZON)
self.dash_dir = sh.joinpths(self.app_dir, ROOT_DASH)
self.log_dir = sh.joinpths(self.component_dir, LOGS_DIR)
def _get_download_locations(self):
places = list()
@ -89,16 +76,17 @@ class HorizonInstaller(comp.PythonInstallComponent):
return places
def verify(self):
comp.PythonInstallComponent.verify(self)
self._check_ug()
def _get_symlinks(self):
links = comp.PythonInstallComponent._get_symlinks(self)
src = self._get_target_config_name(HORIZON_APACHE_CONF)
links[src] = self.distro.commands['apache']['settings']['conf-link-target']
links[src] = self.distro.get_command('apache', 'settings', 'conf-link-target')
if utils.service_enabled(settings.QUANTUM_CLIENT, self.instances, False):
#TODO remove this junk, blah, puke that we have to do this
# TODO remove this junk, blah, puke that we have to do this
qc = self.instances[settings.QUANTUM_CLIENT]
src_pth = sh.joinpths(qc.appdir, 'quantum')
src_pth = sh.joinpths(qc.app_dir, 'quantum')
tgt_dir = sh.joinpths(self.dash_dir, 'quantum')
links[src_pth] = tgt_dir
return links
@ -125,17 +113,18 @@ class HorizonInstaller(comp.PythonInstallComponent):
return list(CONFIGS)
def _setup_blackhole(self):
#create an empty directory that apache uses as docroot
self.tracewriter.dirs_made(*sh.mkdirslist(sh.joinpths(self.appdir, BLACKHOLE_DIR)))
# Create an empty directory that apache uses as docroot
self.tracewriter.dirs_made(*sh.mkdirslist(sh.joinpths(self.app_dir, BLACKHOLE_DIR)))
def _sync_db(self):
#Initialize the horizon database (it stores sessions and notices shown to users).
#The user system is external (keystone).
# Initialize the horizon database (it stores sessions and notices shown to users).
# The user system is external (keystone).
LOG.info("Initializing the horizon database.")
sh.execute(*DB_SYNC_CMD, cwd=self.appdir)
sh.execute(*DB_SYNC_CMD, cwd=self.app_dir)
def _ensure_db_access(self):
# ../openstack-dashboard/local needs to be writeable by the runtime user
# Need db access:
# openstack-dashboard/local needs to be writeable by the runtime user
# since currently its storing the sql-lite databases there (TODO fix that)
path = sh.joinpths(self.dash_dir, 'local')
if sh.isdir(path):
@ -151,44 +140,10 @@ class HorizonInstaller(comp.PythonInstallComponent):
self.tracewriter.dirs_made(*sh.mkdirslist(self.log_dir))
def _config_fixups(self):
#currently just handling rhel fixups
#TODO: maybe this should be a subclass that handles these differences
if not (self.distro in APACHE_FIXUPS_DISTROS):
return
#it seems like to get this to work
#we need to do some conf.d/conf work which sort of sucks
(user, group) = self._get_apache_user_group()
socket_fn = APACHE_FIXUPS.get("SOCKET_CONF")
self.tracewriter.file_touched(socket_fn)
#not recorded since we aren't really creating this
httpd_fn = APACHE_FIXUPS.get("HTTPD_CONF")
with sh.Rooted(True):
#fix the socket prefix to someplace we can use
fc = "WSGISocketPrefix %s" % (sh.joinpths(self.log_dir, "wsgi-socket"))
sh.write_file(socket_fn, fc)
#now adjust the run user and group (of httpd.conf)
new_lines = list()
for line in sh.load_file(httpd_fn).splitlines():
if line.startswith("User "):
line = "User %s" % (user)
if line.startswith("Group "):
line = "Group %s" % (group)
new_lines.append(line)
sh.write_file(httpd_fn, utils.joinlinesep(*new_lines))
def _fix_quantum(self):
if not (utils.service_enabled(settings.QUANTUM_CLIENT, self.instances, False)):
#make the fake quantum (apparently needed so imports don't fail???)
#TODO remove this...
quantum_dir = sh.joinpths(self.dash_dir, 'quantum')
if not sh.isdir(quantum_dir):
self.tracewriter.dirs_made(*sh.mkdirslist(quantum_dir))
for fn in FAKE_QUANTUM_FILES:
self.tracewriter.file_touched(sh.touch_file(sh.joinpths(quantum_dir, fn)))
pass
def post_install(self):
comp.PythonInstallComponent.post_install(self)
self._fix_quantum()
self._sync_db()
self._setup_blackhole()
self._ensure_db_access()
@ -200,18 +155,18 @@ class HorizonInstaller(comp.PythonInstallComponent):
return (user, group)
def _get_param_map(self, config_fn):
#this dict will be used to fill in the configuration
#params with actual values
# This dict will be used to fill in the configuration
# params with actual values
mp = dict()
if config_fn == HORIZON_APACHE_CONF:
(user, group) = self._get_apache_user_group()
mp['GROUP'] = group
mp['USER'] = user
mp['ACCESS_LOG'] = sh.joinpths(self.log_dir, APACHE_ACCESS_LOG_FN)
mp['ERROR_LOG'] = sh.joinpths(self.log_dir, APACHE_ERROR_LOG_FN)
mp['GROUP'] = group
mp['HORIZON_DIR'] = self.appdir
mp['HORIZON_DIR'] = self.app_dir
mp['HORIZON_PORT'] = self.cfg.getdefaulted('horizon', 'port', APACHE_DEF_PORT)
mp['USER'] = user
mp['VPN_DIR'] = sh.joinpths(self.appdir, "vpn")
mp['VPN_DIR'] = sh.joinpths(self.app_dir, "vpn")
else:
mp['OPENSTACK_HOST'] = self.cfg.get('host', 'ip')
return mp
@ -227,7 +182,7 @@ class HorizonRuntime(comp.EmptyRuntime):
return self.restart()
else:
cmds = [{
'cmd': self.distro.commands['apache']['start'],
'cmd': self.distro.get_command('apache', 'start'),
'run_as_root': True,
}]
utils.execute_template(*cmds,
@ -237,7 +192,7 @@ class HorizonRuntime(comp.EmptyRuntime):
def restart(self):
cmds = [{
'cmd': self.distro.commands['apache']['restart'],
'cmd': self.distro.get_command('apache', 'restart'),
'run_as_root': True,
}]
utils.execute_template(*cmds,
@ -249,7 +204,7 @@ class HorizonRuntime(comp.EmptyRuntime):
curr_status = self.status()
if curr_status != comp.STATUS_STOPPED:
cmds = [{
'cmd': self.distro.commands['apache']['stop'],
'cmd': self.distro.get_command('apache', 'stop'),
'run_as_root': True,
}]
utils.execute_template(*cmds,
@ -260,7 +215,7 @@ class HorizonRuntime(comp.EmptyRuntime):
def status(self):
cmds = [{
'cmd': self.distro.commands['apache']['status'],
'cmd': self.distro.get_command('apache', 'status'),
'run_as_root': True,
}]
run_result = utils.execute_template(*cmds,

View File

@ -27,36 +27,34 @@ from devstack import utils
from devstack.components import db
#id
TYPE = settings.KEYSTONE
LOG = logging.getLogger("devstack.components.keystone")
#this db will be dropped then created
# This db will be dropped then created
DB_NAME = "keystone"
#subdirs of the git checkout
# Subdirs of the git checkout
BIN_DIR = "bin"
CONFIG_DIR = "etc"
#simple confs
# Simple confs
ROOT_CONF = "keystone.conf"
CATALOG_CONF = 'default_catalog.templates'
LOGGING_CONF = "logging.conf"
LOGGING_SOURCE_FN = 'logging.conf.sample'
CONFIGS = [ROOT_CONF, CATALOG_CONF, LOGGING_CONF]
#this is a special conf
# This is a special conf/init script
MANAGE_DATA_CONF = 'keystone_init.sh'
MANAGE_CMD_ROOT = [sh.joinpths("/", "bin", 'bash')]
MANAGE_ADMIN_USER = 'admin'
MANAGE_DEMO_USER = 'demo'
MANAGE_INVIS_USER = 'invisible_to_admin'
#sync db command
# Sync db command
MANAGE_APP_NAME = 'keystone-manage'
SYNC_DB_CMD = [sh.joinpths('%BINDIR%', MANAGE_APP_NAME), 'db_sync']
#what to start
# What to start
APP_NAME = 'keystone-all'
APP_OPTIONS = {
APP_NAME: ['--config-file', sh.joinpths('%ROOT%', CONFIG_DIR, ROOT_CONF),
@ -65,17 +63,19 @@ APP_OPTIONS = {
}
#used to wait until started before we can run the data setup script
# Used to wait until started before we can run the data setup script
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
#swift template additions
# Swift template additions
# TODO: get rid of these
SWIFT_TEMPL_ADDS = ['catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
'catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
'catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/',
'catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_$(tenant_id)s',
"catalog.RegionOne.object_store.name = 'Swift Service'"]
#quantum template additions
# Quantum template additions
# TODO: get rid of these
QUANTUM_TEMPL_ADDS = ['catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/',
'catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/',
'catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/',
@ -85,15 +85,15 @@ QUANTUM_TEMPL_ADDS = ['catalog.RegionOne.network.publicURL = http://%SERVICE_HOS
class KeystoneUninstaller(comp.PythonUninstallComponent):
def __init__(self, *args, **kargs):
comp.PythonUninstallComponent.__init__(self, *args, **kargs)
self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR)
self.bindir = sh.joinpths(self.appdir, BIN_DIR)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
class KeystoneInstaller(comp.PythonInstallComponent):
def __init__(self, *args, **kargs):
comp.PythonInstallComponent.__init__(self, *args, **kargs)
self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR)
self.bindir = sh.joinpths(self.appdir, BIN_DIR)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
def _get_download_locations(self):
places = list()
@ -112,9 +112,9 @@ class KeystoneInstaller(comp.PythonInstallComponent):
def _sync_db(self):
LOG.info("Syncing keystone to database named %s.", DB_NAME)
params = dict()
params['BINDIR'] = self.bindir
params['BINDIR'] = self.bin_dir
cmds = [{'cmd': SYNC_DB_CMD}]
utils.execute_template(*cmds, cwd=self.bindir, params=params)
utils.execute_template(*cmds, cwd=self.bin_dir, params=params)
def _get_config_files(self):
return list(CONFIGS)
@ -129,16 +129,16 @@ class KeystoneInstaller(comp.PythonInstallComponent):
(_, contents) = utils.load_template(self.component_name, MANAGE_DATA_CONF)
params = self._get_param_map(MANAGE_DATA_CONF)
contents = utils.param_replace(contents, params, True)
tgt_fn = sh.joinpths(self.bindir, MANAGE_DATA_CONF)
tgt_fn = sh.joinpths(self.bin_dir, MANAGE_DATA_CONF)
sh.write_file(tgt_fn, contents)
sh.chmod(tgt_fn, 0755)
self.tracewriter.file_touched(tgt_fn)
def _config_adjust(self, contents, name):
if name == ROOT_CONF:
#use config parser and
#then extract known configs that
#will need locations/directories/files made (or touched)...
# Use config parser and
# then extract known configs that
# ill need locations/directories/files made (or touched)...
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
config.readfp(stream)
@ -149,7 +149,7 @@ class KeystoneInstaller(comp.PythonInstallComponent):
if log_dir:
LOG.info("Ensuring log directory %s exists." % (log_dir))
self.tracewriter.dirs_made(*sh.mkdirslist(log_dir))
#destroy then recreate it (the log file)
# Destroy then recreate it (the log file)
sh.unlink(log_filename)
self.tracewriter.file_touched(sh.touch_file(log_filename))
elif name == CATALOG_CONF:
@ -174,7 +174,7 @@ class KeystoneInstaller(comp.PythonInstallComponent):
def _get_source_config(self, config_fn):
if config_fn == LOGGING_CONF:
fn = sh.joinpths(self.cfgdir, LOGGING_SOURCE_FN)
fn = sh.joinpths(self.cfg_dir, LOGGING_SOURCE_FN)
contents = sh.load_file(fn)
return (fn, contents)
return comp.PythonInstallComponent._get_source_config(self, config_fn)
@ -183,16 +183,16 @@ class KeystoneInstaller(comp.PythonInstallComponent):
get_shared_params(self.cfg, self.pw_gen)
def _get_param_map(self, config_fn):
#these be used to fill in the configuration/cmds +
#params with actual values
# These be used to fill in the configuration/cmds +
# params with actual values
mp = dict()
mp['SERVICE_HOST'] = self.cfg.get('host', 'ip')
mp['DEST'] = self.appdir
mp['BIN_DIR'] = self.bindir
mp['CONFIG_FILE'] = sh.joinpths(self.cfgdir, ROOT_CONF)
mp['DEST'] = self.app_dir
mp['BIN_DIR'] = self.bin_dir
mp['CONFIG_FILE'] = sh.joinpths(self.cfg_dir, ROOT_CONF)
if config_fn == ROOT_CONF:
mp['SQL_CONN'] = db.fetch_dbdsn(self.cfg, self.pw_gen, DB_NAME)
mp['KEYSTONE_DIR'] = self.appdir
mp['KEYSTONE_DIR'] = self.app_dir
mp.update(get_shared_params(self.cfg, self.pw_gen))
elif config_fn == MANAGE_DATA_CONF:
mp.update(get_shared_params(self.cfg, self.pw_gen))
@ -202,20 +202,20 @@ class KeystoneInstaller(comp.PythonInstallComponent):
class KeystoneRuntime(comp.PythonRuntime):
def __init__(self, *args, **kargs):
comp.PythonRuntime.__init__(self, *args, **kargs)
self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR)
self.bindir = sh.joinpths(self.appdir, BIN_DIR)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
def post_start(self):
tgt_fn = sh.joinpths(self.bindir, MANAGE_DATA_CONF)
tgt_fn = sh.joinpths(self.bin_dir, MANAGE_DATA_CONF)
if sh.isfile(tgt_fn):
#still there, run it
#these environment additions are important
#in that they eventually affect how this script runs
# If its still there, run it
# these environment additions are important
# in that they eventually affect how this script runs
LOG.info("Waiting %s seconds so that keystone can start up before running first time init." % (WAIT_ONLINE_TO))
sh.sleep(WAIT_ONLINE_TO)
env = dict()
env['ENABLED_SERVICES'] = ",".join(self.instances.keys())
env['BIN_DIR'] = self.bindir
env['BIN_DIR'] = self.bin_dir
setup_cmd = MANAGE_CMD_ROOT + [tgt_fn]
LOG.info("Running (%s) command to initialize keystone." % (" ".join(setup_cmd)))
sh.execute(*setup_cmd, env_overrides=env, run_as_root=False)
@ -227,7 +227,7 @@ class KeystoneRuntime(comp.PythonRuntime):
for app_name in APP_OPTIONS.keys():
apps.append({
'name': app_name,
'path': sh.joinpths(self.bindir, app_name),
'path': sh.joinpths(self.bin_dir, app_name),
})
return apps
@ -239,7 +239,7 @@ def get_shared_params(config, pw_gen, service_user_name=None):
mp = dict()
host_ip = config.get('host', 'ip')
#these match what is in keystone_init.sh
# These match what is in keystone_init.sh
mp['SERVICE_TENANT_NAME'] = 'service'
if service_user_name:
mp['SERVICE_USERNAME'] = str(service_user_name)
@ -248,7 +248,7 @@ def get_shared_params(config, pw_gen, service_user_name=None):
mp['ADMIN_TENANT_NAME'] = mp['ADMIN_USER_NAME']
mp['DEMO_TENANT_NAME'] = mp['DEMO_USER_NAME']
#tokens and passwords
# Tokens and passwords
mp['SERVICE_TOKEN'] = pw_gen.get_password(
"service_token",
'the service admin token',
@ -263,7 +263,7 @@ def get_shared_params(config, pw_gen, service_user_name=None):
'service authentication',
)
#components of the auth endpoint
# Components of the auth endpoint
keystone_auth_host = config.getdefaulted('keystone', 'keystone_auth_host', host_ip)
mp['KEYSTONE_AUTH_HOST'] = keystone_auth_host
keystone_auth_port = config.getdefaulted('keystone', 'keystone_auth_port', '35357')
@ -271,7 +271,7 @@ def get_shared_params(config, pw_gen, service_user_name=None):
keystone_auth_proto = config.getdefaulted('keystone', 'keystone_auth_protocol', 'http')
mp['KEYSTONE_AUTH_PROTOCOL'] = keystone_auth_proto
#components of the service endpoint
# Components of the service endpoint
keystone_service_host = config.getdefaulted('keystone', 'keystone_service_host', host_ip)
mp['KEYSTONE_SERVICE_HOST'] = keystone_service_host
keystone_service_port = config.getdefaulted('keystone', 'keystone_service_port', '5000')
@ -279,7 +279,7 @@ def get_shared_params(config, pw_gen, service_user_name=None):
keystone_service_proto = config.getdefaulted('keystone', 'keystone_service_protocol', 'http')
mp['KEYSTONE_SERVICE_PROTOCOL'] = keystone_service_proto
#http/https endpoints
# Uri's of the http/https endpoints
mp['AUTH_ENDPOINT'] = urlunparse((keystone_auth_proto,
"%s:%s" % (keystone_auth_host, keystone_auth_port),
"v2.0", "", "", ""))

View File

@ -16,10 +16,7 @@
from devstack import component as comp
from devstack import log as logging
from devstack import settings
#id
TYPE = settings.KEYSTONE_CLIENT
LOG = logging.getLogger("devstack.components.keystone_client")

View File

@ -25,41 +25,39 @@ from devstack import utils
from devstack.components import db
#id
TYPE = settings.MELANGE
LOG = logging.getLogger("devstack.components.melange")
#this db will be dropped then created
# This db will be dropped then created
DB_NAME = 'melange'
#subdirs of the checkout/download
# Subdirs of the checkout/download
BIN_DIR = 'bin'
#configs
# Basic configs
ROOT_CONF = 'melange.conf.sample'
ROOT_CONF_REAL_NAME = 'melange.conf'
CONFIGS = [ROOT_CONF]
CFG_LOC = ['etc', 'melange']
#sensible defaults
# Sensible defaults
DEF_CIDR_RANGE = 'FE-EE-DD-00-00-00/24'
#how we sync melange with the db
# How we sync melange with the db
DB_SYNC_CMD = [
{'cmd': ['%BINDIR%/melange-manage', '--config-file=%CFG_FILE%', 'db_sync']},
{'cmd': ['%BIN_DIR%/melange-manage', '--config-file=%CFG_FILE%', 'db_sync']},
]
#???
# TODO: ???
CIDR_CREATE_CMD = [
{'cmd': ['melange', 'mac_address_range', 'create', 'cidr', '%CIDR_RANGE%']},
]
#what to start
# What to start
APP_OPTIONS = {
'melange-server': ['--config-file', '%CFG_FILE%'],
}
#subcomponent that specifies we should make the network cidr using melange
# Special option that specifies we should make the network cidr using melange
CREATE_CIDR = "create-cidr"
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
@ -72,8 +70,8 @@ class MelangeUninstaller(comp.PythonUninstallComponent):
class MelangeInstaller(comp.PythonInstallComponent):
def __init__(self, *args, **kargs):
comp.PythonInstallComponent.__init__(self, *args, **kargs)
self.bindir = sh.joinpths(self.appdir, BIN_DIR)
self.cfgdir = sh.joinpths(self.appdir, *CFG_LOC)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
self.cfg_dir = sh.joinpths(self.app_dir, *CFG_LOC)
def _get_download_locations(self):
places = list()
@ -96,8 +94,8 @@ class MelangeInstaller(comp.PythonInstallComponent):
def _sync_db(self):
LOG.info("Syncing the database with melange.")
mp = dict()
mp['BINDIR'] = self.bindir
mp['CFG_FILE'] = sh.joinpths(self.cfgdir, ROOT_CONF_REAL_NAME)
mp['BIN_DIR'] = self.bin_dir
mp['CFG_FILE'] = sh.joinpths(self.cfg_dir, ROOT_CONF_REAL_NAME)
utils.execute_template(*DB_SYNC_CMD, params=mp)
def _get_config_files(self):
@ -122,7 +120,7 @@ class MelangeInstaller(comp.PythonInstallComponent):
def _get_source_config(self, config_fn):
if config_fn == ROOT_CONF:
srcfn = sh.joinpths(self.cfgdir, config_fn)
srcfn = sh.joinpths(self.cfg_dir, config_fn)
contents = sh.load_file(srcfn)
return (srcfn, contents)
else:
@ -130,7 +128,7 @@ class MelangeInstaller(comp.PythonInstallComponent):
def _get_target_config_name(self, config_fn):
if config_fn == ROOT_CONF:
return sh.joinpths(self.cfgdir, ROOT_CONF_REAL_NAME)
return sh.joinpths(self.cfg_dir, ROOT_CONF_REAL_NAME)
else:
return comp.PythonInstallComponent._get_target_config_name(self, config_fn)
@ -138,15 +136,15 @@ class MelangeInstaller(comp.PythonInstallComponent):
class MelangeRuntime(comp.PythonRuntime):
def __init__(self, *args, **kargs):
comp.PythonRuntime.__init__(self, *args, **kargs)
self.bindir = sh.joinpths(self.appdir, BIN_DIR)
self.cfgdir = sh.joinpths(self.appdir, *CFG_LOC)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
self.cfg_dir = sh.joinpths(self.app_dir, *CFG_LOC)
def _get_apps_to_start(self):
apps = list()
for app_name in APP_OPTIONS.keys():
apps.append({
'name': app_name,
'path': sh.joinpths(self.bindir, app_name),
'path': sh.joinpths(self.bin_dir, app_name),
})
return apps
@ -155,14 +153,14 @@ class MelangeRuntime(comp.PythonRuntime):
def _get_param_map(self, app_name):
pmap = comp.PythonRuntime._get_param_map(self, app_name)
pmap['CFG_FILE'] = sh.joinpths(self.cfgdir, ROOT_CONF_REAL_NAME)
pmap['CFG_FILE'] = sh.joinpths(self.cfg_dir, ROOT_CONF_REAL_NAME)
return pmap
def post_start(self):
comp.PythonRuntime.post_start(self)
# FIXME: This is a bit of a hack. How do we document "flags" like this?
flags = self.component_opts.get('flags', [])
if CREATE_CIDR in flags or not flags:
flags = []
if CREATE_CIDR in flags:
LOG.info("Waiting %s seconds so that the melange server can start up before cidr range creation." % (WAIT_ONLINE_TO))
sh.sleep(WAIT_ONLINE_TO)
mp = dict()

View File

@ -16,10 +16,7 @@
from devstack import component as comp
from devstack import log as logging
from devstack import settings
#id
TYPE = settings.MELANGE_CLIENT
LOG = logging.getLogger("devstack.components.melange_client")

View File

@ -28,17 +28,15 @@ from devstack import utils
from devstack.components import db
from devstack.components import keystone
#id
TYPE = settings.NOVA
LOG = logging.getLogger('devstack.components.nova')
#special generated conf
# Special generated conf
API_CONF = 'nova.conf'
#how we reference some config files (in applications)
# How we reference some config files (in applications)
CFG_FILE_OPT = '--config-file'
#normal conf
# Normal conf
PASTE_CONF = 'nova-api-paste.ini'
PASTE_SOURCE_FN = 'api-paste.ini'
POLICY_CONF = 'policy.json'
@ -47,19 +45,19 @@ LOGGING_CONF = "logging.conf"
CONFIGS = [PASTE_CONF, POLICY_CONF, LOGGING_CONF]
ADJUST_CONFIGS = [PASTE_CONF]
#this is a special conf
# This is a special conf
NET_INIT_CONF = 'nova-network-init.sh'
NET_INIT_CMD_ROOT = [sh.joinpths("/", "bin", 'bash')]
#this db will be dropped then created
# This db will be dropped then created
DB_NAME = 'nova'
#this makes the database be in sync with nova
# This makes the database be in sync with nova
DB_SYNC_CMD = [
{'cmd': ['%BINDIR%/nova-manage', CFG_FILE_OPT, '%CFGFILE%', 'db', 'sync']},
{'cmd': ['%BIN_DIR%/nova-manage', CFG_FILE_OPT, '%CFGFILE%', 'db', 'sync']},
]
#these are used for nova volumens
# These are used for nova volumes
VG_CHECK_CMD = [
{'cmd': ['vgs', '%VOLUME_GROUP%'],
'run_as_root': True}
@ -81,23 +79,8 @@ VG_LVREMOVE_CMD = [
'run_as_root': True}
]
# iscsi restart commands
RESTART_TGT_CMD = {
settings.UBUNTU11: [
{'cmd': ['stop', 'tgt'], 'run_as_root': True},
{'cmd': ['start', 'tgt'], 'run_as_root': True}
],
settings.RHEL6: [
{'cmd': ['service', 'tgtd', 'stop'], 'run_as_root': True},
{'cmd': ['service', 'tgtd', 'start'], 'run_as_root': True}
],
settings.FEDORA16: [
{'cmd': ['service', 'tgtd', 'stop'], 'run_as_root': True},
{'cmd': ['service', 'tgtd', 'start'], 'run_as_root': True}
],
}
# NCPU, NVOL, NAPI ... are here as possible subcomponents of nova
# NCPU, NVOL, NAPI ... are here as possible subsystems of nova
NCPU = "cpu"
NVOL = "vol"
NAPI = "api"
@ -107,7 +90,7 @@ NCERT = "cert"
NSCHED = "sched"
NCAUTH = "cauth"
NXVNC = "xvnc"
SUBCOMPONENTS = [NCPU, NVOL, NAPI,
SUBSYSTEMS = [NCPU, NVOL, NAPI,
NOBJ, NNET, NCERT, NSCHED, NCAUTH, NXVNC]
# What to start
@ -137,17 +120,17 @@ SUB_COMPONENT_NAME_MAP = {
NXVNC: 'nova-xvpvncproxy',
}
#subdirs of the checkout/download
# Subdirs of the checkout/download
BIN_DIR = 'bin'
CONFIG_DIR = "etc"
#network class/driver/manager templs
# Network class/driver/manager templs
QUANTUM_MANAGER = 'nova.network.quantum.manager.QuantumManager'
QUANTUM_IPAM_LIB = 'nova.network.quantum.melange_ipam_lib'
NET_MANAGER_TEMPLATE = 'nova.network.manager.%s'
FIRE_MANAGER_TEMPLATE = 'nova.virt.libvirt.firewall.%s'
#sensible defaults
# Sensible defaults
DEF_IMAGE_SERVICE = 'nova.image.glance.GlanceImageService'
DEF_SCHEDULER = 'nova.scheduler.simple.SimpleScheduler'
DEF_GLANCE_PORT = 9292
@ -160,11 +143,11 @@ DEF_NET_MANAGER = 'FlatDHCPManager'
DEF_VOL_PREFIX = 'volume-'
DEF_VOL_TEMPL = DEF_VOL_PREFIX + '%08x'
#default virt types
# Default virt types
DEF_VIRT_DRIVER = 'libvirt'
DEF_VIRT_TYPE = 'qemu'
#virt drivers to there connection name
# Virt drivers map -> to there connection name
VIRT_DRIVER_CON_MAP = {
'libvirt': 'libvirt',
'xenserver': 'xenapi',
@ -172,7 +155,7 @@ VIRT_DRIVER_CON_MAP = {
'baremetal': 'baremetal',
}
#only turned on if vswitch enabled
# Only turned on if openvswitch enabled
QUANTUM_OPENSWITCH_OPS = {
'libvirt_vif_type': 'ethernet',
'libvirt_vif_driver': 'nova.virt.libvirt.vif.LibvirtOpenVswitchDriver',
@ -180,24 +163,24 @@ QUANTUM_OPENSWITCH_OPS = {
'quantum_use_dhcp': True,
}
#this is a special conf
# This is a special conf
CLEANER_DATA_CONF = 'nova-clean.sh'
CLEANER_CMD_ROOT = [sh.joinpths("/", "bin", 'bash')]
# FIXME:
#rhel6/fedora libvirt policy
#http://wiki.libvirt.org/page/SSHPolicyKitSetup
LIBVIRT_POLICY_FN = "/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla"
LIBVIRT_POLICY_CONTENTS = """
[libvirt Management Access]
Identity=unix-group:libvirtd
Action=org.libvirt.unix.manage
ResultAny=yes
ResultInactive=yes
ResultActive=yes
"""
POLICY_DISTROS = [settings.RHEL6, settings.FEDORA16]
#LIBVIRT_POLICY_FN = "/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla"
#LIBVIRT_POLICY_CONTENTS = """
#[libvirt Management Access]
#Identity=unix-group:libvirtd
#Action=org.libvirt.unix.manage
#ResultAny=yes
#ResultInactive=yes
#ResultActive=yes
#"""
#xenserver specific defaults
# Xenserver specific defaults
XS_DEF_INTERFACE = 'eth1'
XA_CONNECTION_ADDR = '169.254.0.1'
XS_VNC_ADDR = XA_CONNECTION_ADDR
@ -206,19 +189,19 @@ XA_CONNECTION_PORT = 80
XA_DEF_USER = 'root'
XA_DEF_CONNECTION_URL = urlunparse(('http', "%s:%s" % (XA_CONNECTION_ADDR, XA_CONNECTION_PORT), "", '', '', ''))
#vnc specific defaults
# Vnc specific defaults
VNC_DEF_ADDR = '127.0.0.1'
#std compute extensions
# Nova std compute extensions
STD_COMPUTE_EXTS = 'nova.api.openstack.compute.contrib.standard_extensions'
#config keys we warm up so u won't be prompted later
# Config keys we warm up so u won't be prompted later
WARMUP_PWS = ['rabbit']
#used to wait until started before we can run the data setup script
# Used to wait until started before we can run the data setup script
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
#nova conf default section
# Nova conf default section
NV_CONF_DEF_SECTION = "[DEFAULT]"
@ -244,21 +227,24 @@ def _canon_libvirt_type(virt_type):
class NovaUninstaller(comp.PythonUninstallComponent):
def __init__(self, *args, **kargs):
comp.PythonUninstallComponent.__init__(self, *args, **kargs)
self.bindir = sh.joinpths(self.appdir, BIN_DIR)
self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
def known_subsystems(self):
return SUBSYSTEMS
def pre_uninstall(self):
self._clear_libvirt_domains()
self._clean_it()
def _clean_it(self):
#these environment additions are important
#in that they eventually affect how this script runs
# These environment additions are important
# in that they eventually affect how this script runs
env = dict()
env['ENABLED_SERVICES'] = ",".join(SUBCOMPONENTS)
env['BIN_DIR'] = self.bindir
env['ENABLED_SERVICES'] = ",".join(self.desired_subsystems)
env['BIN_DIR'] = self.bin_dir
env['VOLUME_NAME_PREFIX'] = self.cfg.getdefaulted('nova', 'volume_name_prefix', DEF_VOL_PREFIX)
cleaner_fn = sh.joinpths(self.bindir, CLEANER_DATA_CONF)
cleaner_fn = sh.joinpths(self.bin_dir, CLEANER_DATA_CONF)
if sh.isfile(cleaner_fn):
LOG.info("Cleaning up your system by running nova cleaner script [%s]." % (cleaner_fn))
cmd = CLEANER_CMD_ROOT + [cleaner_fn]
@ -269,28 +255,28 @@ class NovaUninstaller(comp.PythonUninstallComponent):
if virt_driver == 'libvirt':
inst_prefix = self.cfg.getdefaulted('nova', 'instance_name_prefix', DEF_INSTANCE_PREFIX)
libvirt_type = _canon_libvirt_type(self.cfg.get('nova', 'libvirt_type'))
virsh.clear_libvirt_domains(self.distro.name, libvirt_type, inst_prefix)
virsh.clear_libvirt_domains(self.distro, libvirt_type, inst_prefix)
class NovaInstaller(comp.PythonInstallComponent):
def __init__(self, *args, **kargs):
comp.PythonInstallComponent.__init__(self, *args, **kargs)
self.bindir = sh.joinpths(self.appdir, BIN_DIR)
self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.paste_conf_fn = self._get_target_config_name(PASTE_CONF)
self.volumes_enabled = False
package_names = [p['name']
for p in self.component_opts.get('packages', [])
]
if NVOL in package_names:
if NVOL in self.desired_subsystems:
self.volumes_enabled = True
self.xvnc_enabled = False
if NXVNC in package_names:
if NXVNC in self.desired_subsystems:
self.xvnc_enabled = True
def known_subsystems(self):
return SUBSYSTEMS
def _get_symlinks(self):
links = comp.PythonInstallComponent._get_symlinks(self)
source_fn = sh.joinpths(self.cfgdir, API_CONF)
source_fn = sh.joinpths(self.cfg_dir, API_CONF)
links[source_fn] = sh.joinpths(self._get_link_dir(), API_CONF)
return links
@ -303,11 +289,12 @@ class NovaInstaller(comp.PythonInstallComponent):
return places
def warm_configs(self):
for pw_key in WARMUP_PWS:
self.cfg.get("passwords", pw_key)
warm_pws = list(WARMUP_PWS)
driver_canon = _canon_virt_driver(self.cfg.get('nova', 'virt_driver'))
if driver_canon == 'xenserver':
self.cfg.get("passwords", "xenapi_connection")
warm_pws.append('xenapi_connection')
for pw_key in warm_pws:
self.pw_gen.get_password(pw_key)
def _get_config_files(self):
return list(CONFIGS)
@ -317,7 +304,7 @@ class NovaInstaller(comp.PythonInstallComponent):
(_, contents) = utils.load_template(self.component_name, NET_INIT_CONF)
params = self._get_param_map(NET_INIT_CONF)
contents = utils.param_replace(contents, params, True)
tgt_fn = sh.joinpths(self.bindir, NET_INIT_CONF)
tgt_fn = sh.joinpths(self.bin_dir, NET_INIT_CONF)
sh.write_file(tgt_fn, contents)
sh.chmod(tgt_fn, 0755)
self.tracewriter.file_touched(tgt_fn)
@ -325,18 +312,18 @@ class NovaInstaller(comp.PythonInstallComponent):
def _sync_db(self):
LOG.info("Syncing the database with nova.")
mp = dict()
mp['BINDIR'] = self.bindir
mp['CFGFILE'] = sh.joinpths(self.cfgdir, API_CONF)
mp['BIN_DIR'] = self.bin_dir
mp['CFGFILE'] = sh.joinpths(self.cfg_dir, API_CONF)
utils.execute_template(*DB_SYNC_CMD, params=mp)
def post_install(self):
comp.PythonInstallComponent.post_install(self)
#extra actions to do nova setup
# Extra actions to do nova setup
self._setup_db()
self._sync_db()
self._setup_cleaner()
self._setup_network_initer()
#check if we need to do the vol subcomponent
# Check if we need to do the vol subsystem
if self.volumes_enabled:
vol_maker = NovaVolumeConfigurator(self)
vol_maker.setup_volumes()
@ -344,7 +331,7 @@ class NovaInstaller(comp.PythonInstallComponent):
def _setup_cleaner(self):
LOG.info("Configuring cleaner template %s.", CLEANER_DATA_CONF)
(_, contents) = utils.load_template(self.component_name, CLEANER_DATA_CONF)
tgt_fn = sh.joinpths(self.bindir, CLEANER_DATA_CONF)
tgt_fn = sh.joinpths(self.bin_dir, CLEANER_DATA_CONF)
sh.write_file(tgt_fn, contents)
sh.chmod(tgt_fn, 0755)
self.tracewriter.file_touched(tgt_fn)
@ -371,15 +358,15 @@ class NovaInstaller(comp.PythonInstallComponent):
return comp.PythonInstallComponent._get_source_config(self, PASTE_SOURCE_FN)
elif config_fn == LOGGING_CONF:
name = LOGGING_SOURCE_FN
srcfn = sh.joinpths(self.cfgdir, "nova", name)
srcfn = sh.joinpths(self.cfg_dir, "nova", name)
contents = sh.load_file(srcfn)
return (srcfn, contents)
def _get_param_map(self, config_fn):
mp = dict()
if config_fn == NET_INIT_CONF:
mp['NOVA_DIR'] = self.appdir
mp['CFG_FILE'] = sh.joinpths(self.cfgdir, API_CONF)
mp['NOVA_DIR'] = self.app_dir
mp['CFG_FILE'] = sh.joinpths(self.cfg_dir, API_CONF)
mp['FLOATING_RANGE'] = self.cfg.getdefaulted('nova', 'floating_range', '172.24.4.224/28')
mp['TEST_FLOATING_RANGE'] = self.cfg.getdefaulted('nova', 'test_floating_range', '192.168.253.0/29')
mp['TEST_FLOATING_POOL'] = self.cfg.getdefaulted('nova', 'test_floating_pool', 'test')
@ -393,15 +380,16 @@ class NovaInstaller(comp.PythonInstallComponent):
configs_made = comp.PythonInstallComponent.configure(self)
self._generate_nova_conf()
configs_made += 1
# TODO: maybe this should be a subclass that handles these differences
driver_canon = _canon_virt_driver(self.cfg.get('nova', 'virt_driver'))
if (self.distro.name in POLICY_DISTROS) and driver_canon == 'libvirt':
# TODO maybe move this??
if driver_canon == 'libvirt' and self.distro.get_command('virt-policy', quiet=True):
(fn, contents) = self.distro.get_command('virt-policy')
dirs_made = list()
with sh.Rooted(True):
dirs_made = sh.mkdirslist(sh.dirname(LIBVIRT_POLICY_FN))
sh.write_file(LIBVIRT_POLICY_FN, LIBVIRT_POLICY_CONTENTS)
dirs_made = sh.mkdirslist(sh.dirname(fn))
sh.write_file(fn, contents)
self.tracewriter.dirs_made(*dirs_made)
self.tracewriter.cfg_file_written(LIBVIRT_POLICY_FN)
self.tracewriter.cfg_file_written(fn)
configs_made += 1
return configs_made
@ -409,16 +397,16 @@ class NovaInstaller(comp.PythonInstallComponent):
class NovaRuntime(comp.PythonRuntime):
def __init__(self, *args, **kargs):
comp.PythonRuntime.__init__(self, *args, **kargs)
self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR)
self.bindir = sh.joinpths(self.appdir, BIN_DIR)
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
def _setup_network_init(self):
tgt_fn = sh.joinpths(self.bindir, NET_INIT_CONF)
tgt_fn = sh.joinpths(self.bin_dir, NET_INIT_CONF)
if sh.isfile(tgt_fn):
LOG.info("Creating your nova network to be used with instances.")
#still there, run it
#these environment additions are important
#in that they eventually affect how this script runs
# If still there, run it
# these environment additions are important
# in that they eventually affect how this script runs
if utils.service_enabled(settings.QUANTUM, self.instances, False):
LOG.info("Waiting %s seconds so that quantum can start up before running first time init." % (WAIT_ONLINE_TO))
sh.sleep(WAIT_ONLINE_TO)
@ -433,20 +421,17 @@ class NovaRuntime(comp.PythonRuntime):
def post_start(self):
self._setup_network_init()
def get_dependencies(self):
deps = comp.PythonRuntime.get_dependencies(self)
# FIXME: This should come from a persona.
if utils.service_enabled(settings.QUANTUM, self.instances, False):
deps.append(settings.QUANTUM)
return deps
def known_subsystems(self):
return SUBSYSTEMS
def _get_apps_to_start(self):
result = [{'name': app_name,
'path': sh.joinpths(self.bindir, app_name),
}
for app_name in sorted(APP_OPTIONS.keys())
]
return result
apps = list()
for subsys in self.desired_subsystems:
app = dict()
app['name'] = SUB_COMPONENT_NAME_MAP[subsys]
app['path'] = sh.joinpths(self.bin_dir, app['name'])
apps.append(app)
return apps
def pre_start(self):
# Let the parent class do its thing
@ -455,27 +440,27 @@ class NovaRuntime(comp.PythonRuntime):
if virt_driver == 'libvirt':
virt_type = _canon_libvirt_type(self.cfg.get('nova', 'libvirt_type'))
LOG.info("Checking that your selected libvirt virtualization type [%s] is working and running." % (virt_type))
if not virsh.virt_ok(virt_type, self.distro.name):
msg = ("Libvirt type %s for distro %s does not seem to be active or configured correctly, "
"perhaps you should be using %s instead." % (virt_type, self.distro.name, DEF_VIRT_TYPE))
if not virsh.virt_ok(virt_type, self.distro):
msg = ("Libvirt type %s does not seem to be active or configured correctly, "
"perhaps you should be using %s instead." % (virt_type, DEF_VIRT_TYPE))
raise exceptions.StartException(msg)
virsh.restart(self.distro.name)
virsh.restart(self.distro)
def _get_param_map(self, app_name):
params = comp.PythonRuntime._get_param_map(self, app_name)
params['CFGFILE'] = sh.joinpths(self.cfgdir, API_CONF)
params['CFGFILE'] = sh.joinpths(self.cfg_dir, API_CONF)
return params
def _get_app_options(self, app):
return APP_OPTIONS.get(app)
#this will configure nova volumes which in a developer box
#is a volume group (lvm) that are backed by a loopback file
# This will configure nova volumes which in a developer box
# is a volume group (lvm) that are backed by a loopback file
class NovaVolumeConfigurator(object):
def __init__(self, ni):
self.cfg = ni.cfg
self.appdir = ni.appdir
self.app_dir = ni.app_dir
self.distro = ni.distro
def setup_volumes(self):
@ -484,7 +469,7 @@ class NovaVolumeConfigurator(object):
def _setup_vol_groups(self):
LOG.info("Attempting to setup volume groups for nova volume management.")
mp = dict()
backing_file = self.cfg.getdefaulted('nova', 'volume_backing_file', sh.joinpths(self.appdir, 'nova-volumes-backing-file'))
backing_file = self.cfg.getdefaulted('nova', 'volume_backing_file', sh.joinpths(self.app_dir, 'nova-volumes-backing-file'))
vol_group = self.cfg.getdefaulted('nova', 'volume_group', 'nova-volumes')
backing_file_size = utils.to_bytes(self.cfg.getdefaulted('nova', 'volume_backing_file_size', '2052M'))
mp['VOLUME_GROUP'] = vol_group
@ -511,7 +496,9 @@ class NovaVolumeConfigurator(object):
# logical volumes
self._process_lvs(mp)
# Finish off by restarting tgt, and ignore any errors
utils.execute_template(*RESTART_TGT_CMD[self.distro.name], check_exit_code=False)
cmdrestart = self.distro.get_command('iscsi', 'restart', quiet=True)
if cmdrestart:
sh.execute(*cmdrestart, run_as_root=True, check_exit_code=False)
def _process_lvs(self, mp):
LOG.info("Attempting to setup logical volumes for nova volume management.")
@ -547,12 +534,12 @@ class NovaConfConfigurator(object):
self.cfg = ni.cfg
self.pw_gen = ni.pw_gen
self.instances = ni.instances
self.component_root = ni.component_root
self.appdir = ni.appdir
self.component_dir = ni.component_dir
self.app_dir = ni.app_dir
self.tracewriter = ni.tracewriter
self.paste_conf_fn = ni.paste_conf_fn
self.distro = ni.distro
self.cfgdir = ni.cfgdir
self.cfg_dir = ni.cfg_dir
self.xvnc_enabled = ni.xvnc_enabled
self.volumes_enabled = ni.volumes_enabled
self.novnc_enabled = utils.service_enabled(settings.NOVNC, self.instances)
@ -564,13 +551,12 @@ class NovaConfConfigurator(object):
return self.cfg.getdefaulted('nova', name, default)
def configure(self):
#everything built goes in here
# Everything built goes in here
nova_conf = NovaConf()
#used more than once
# Used more than once so we calculate it ahead of time
hostip = self.cfg.get('host', 'ip')
#verbose on?
if self._getbool('verbose'):
nova_conf.add('verbose', True)
@ -588,79 +574,79 @@ class NovaConfConfigurator(object):
sh.mkdir(full_logdir)
sh.chmod(full_logdir, 0777)
#allow the admin api?
# Allow the admin api?
if self._getbool('allow_admin_api'):
nova_conf.add('allow_admin_api', True)
#??
# FIXME: ??
nova_conf.add('allow_resize_to_same_host', True)
#which scheduler do u want?
# Which scheduler do u want?
nova_conf.add('compute_scheduler_driver', self._getstr('scheduler', DEF_SCHEDULER))
#setup network settings
# Setup any network settings
self._configure_network_settings(nova_conf)
#setup nova volume settings
# Setup nova volume settings
if self.volumes_enabled:
self._configure_vols(nova_conf)
#where we are running
# The ip of where we are running
nova_conf.add('my_ip', hostip)
#setup your sql connection
# Setup your sql connection
db_dsn = db.fetch_dbdsn(self.cfg, self.pw_gen, DB_NAME)
nova_conf.add('sql_connection', db_dsn)
#configure anything libvirt releated?
# Configure anything libvirt related?
virt_driver = _canon_virt_driver(self._getstr('virt_driver'))
if virt_driver == 'libvirt':
libvirt_type = _canon_libvirt_type(self._getstr('libvirt_type'))
self._configure_libvirt(libvirt_type, nova_conf)
#how instances will be presented
# How instances will be presented
instance_template = self._getstr('instance_name_prefix') + self._getstr('instance_name_postfix')
if not instance_template:
instance_template = DEF_INSTANCE_TEMPL
nova_conf.add('instance_name_template', instance_template)
#enable the standard extensions
# Enable the standard extensions
nova_conf.add('osapi_compute_extension', STD_COMPUTE_EXTS)
#auth will be using keystone
# Auth will be using keystone
nova_conf.add('auth_strategy', 'keystone')
#vnc settings setup
# Vnc settings setup
self._configure_vnc(nova_conf)
#where our paste config is
# Where our paste config is
nova_conf.add('api_paste_config', self.paste_conf_fn)
#what our imaging service will be
# What our imaging service will be
self._configure_image_service(nova_conf, hostip)
#ec2 / s3 stuff
# Configs for ec2 / s3 stuff
nova_conf.add('ec2_dmz_host', self._getstr('ec2_dmz_host', hostip))
nova_conf.add('s3_host', hostip)
#how is your rabbit setup?
# How is your rabbit setup?
nova_conf.add('rabbit_host', self.cfg.getdefaulted('default', 'rabbit_host', hostip))
nova_conf.add('rabbit_password', self.cfg.get("passwords", "rabbit"))
#where instances will be stored
instances_path = self._getstr('instances_path', sh.joinpths(self.component_root, 'instances'))
# Where instances will be stored
instances_path = self._getstr('instances_path', sh.joinpths(self.component_dir, 'instances'))
self._configure_instances_path(instances_path, nova_conf)
#is this a multihost setup?
# Is this a multihost setup?
self._configure_multihost(nova_conf)
#enable syslog??
# Enable syslog??
self._configure_syslog(nova_conf)
#handle any virt driver specifics
# Handle any virt driver specifics
self._configure_virt_driver(nova_conf)
#and extract to finish
# Annnnnd extract to finish
return self._get_content(nova_conf)
def _get_extra(self, key):
@ -707,11 +693,11 @@ class NovaConfConfigurator(object):
return generated_content
def _configure_image_service(self, nova_conf, hostip):
#what image service we will use
# What image service we will u be using sir?
img_service = self._getstr('img_service', DEF_IMAGE_SERVICE)
nova_conf.add('image_service', img_service)
#where is glance located?
# If glance then where is it?
if img_service.lower().find("glance") != -1:
glance_api_server = self._getstr('glance_server', (DEF_GLANCE_SERVER % (hostip)))
nova_conf.add('glance_api_servers', glance_api_server)
@ -747,7 +733,7 @@ class NovaConfConfigurator(object):
nova_conf.add('iscsi_helper', 'tgtadm')
def _configure_network_settings(self, nova_conf):
#TODO this might not be right....
# TODO this might not be right....
if utils.service_enabled(settings.QUANTUM, self.instances, False):
nova_conf.add('network_manager', QUANTUM_MANAGER)
hostip = self.cfg.get('host', 'ip')
@ -764,10 +750,11 @@ class NovaConfConfigurator(object):
else:
nova_conf.add('network_manager', NET_MANAGER_TEMPLATE % (self._getstr('network_manager', DEF_NET_MANAGER)))
#dhcp bridge stuff???
nova_conf.add('dhcpbridge_flagfile', sh.joinpths(self.cfgdir, API_CONF))
# Configs dhcp bridge stuff???
# TODO: why is this the same as the nova.conf?
nova_conf.add('dhcpbridge_flagfile', sh.joinpths(self.cfg_dir, API_CONF))
#Network prefix for the IP network that all the projects for future VM guests reside on. Example: 192.168.0.0/12
# Network prefix for the IP network that all the projects for future VM guests reside on. Example: 192.168.0.0/12
nova_conf.add('fixed_range', self._getstr('fixed_range'))
# The value for vlan_interface may default to the the current value
@ -775,7 +762,7 @@ class NovaConfConfigurator(object):
public_interface = self._getstr('public_interface')
vlan_interface = self._getstr('vlan_interface', public_interface)
#do a little check to make sure actually have that interface set...
# Do a little check to make sure actually have that interface/s
if not utils.is_interface(public_interface):
msg = "Public interface %s is not a known interface" % (public_interface)
raise exceptions.ConfigException(msg)
@ -787,7 +774,7 @@ class NovaConfConfigurator(object):
nova_conf.add('public_interface', public_interface)
nova_conf.add('vlan_interface', vlan_interface)
#This forces dnsmasq to update its leases table when an instance is terminated.
# This forces dnsmasq to update its leases table when an instance is terminated.
nova_conf.add('force_dhcp_release', True)
def _configure_syslog(self, nova_conf):
@ -812,11 +799,11 @@ class NovaConfConfigurator(object):
pass
nova_conf.add('libvirt_type', virt_type)
#configures any virt driver settings
# Configures any virt driver settings
def _configure_virt_driver(self, nova_conf):
drive_canon = _canon_virt_driver(self._getstr('virt_driver'))
nova_conf.add('connection_type', VIRT_DRIVER_CON_MAP.get(drive_canon, drive_canon))
#special driver settings
# Special driver settings
if drive_canon == 'xenserver':
nova_conf.add('xenapi_connection_url', self._getstr('xa_connection_url', XA_DEF_CONNECTION_URL))
nova_conf.add('xenapi_connection_username', self._getstr('xa_connection_username', XA_DEF_USER))
@ -840,7 +827,7 @@ class NovaConfConfigurator(object):
nova_conf.add('flat_interface', flat_interface)
# This class represents the data in the nova config file
# This class represents the data/format of the nova config file
class NovaConf(object):
def __init__(self):
self.lines = list()

View File

@ -16,10 +16,7 @@
from devstack import component as comp
from devstack import log as logging
from devstack import settings
#id
TYPE = settings.NOVA_CLIENT
LOG = logging.getLogger("devstack.components.nova_client")

View File

@ -22,17 +22,15 @@ from devstack import utils
from devstack.components import nova
#id
TYPE = settings.NOVNC
LOG = logging.getLogger("devstack.components.novnc")
#where the application is really
# Where the application is really
UTIL_DIR = 'utils'
VNC_PROXY_APP = 'nova-novncproxy'
APP_OPTIONS = {
#this reaches into the nova configuration file
#TODO can we stop that?
# This reaches into the nova configuration file
# TODO can we stop that?
VNC_PROXY_APP: ['--flagfile', '%NOVA_CONF%', '--web', '.'],
}
@ -67,16 +65,16 @@ class NoVNCRuntime(comp.ProgramRuntime):
for app_name in APP_OPTIONS.keys():
apps.append({
'name': app_name,
'path': sh.joinpths(self.appdir, UTIL_DIR, app_name),
'path': sh.joinpths(self.app_dir, UTIL_DIR, app_name),
})
return apps
def _get_param_map(self, app_name):
root_params = comp.ProgramRuntime._get_param_map(self, app_name)
if app_name == VNC_PROXY_APP and utils.service_enabled(settings.NOVA, self.instances, False):
#have to reach into the nova conf (puke)
# FIXME: Have to reach into the nova conf (puke)
nova_runtime = self.instances[settings.NOVA]
root_params['NOVA_CONF'] = sh.joinpths(nova_runtime.cfgdir, nova.API_CONF)
root_params['NOVA_CONF'] = sh.joinpths(nova_runtime.cfg_dir, nova.API_CONF)
return root_params
def _get_app_options(self, app):

View File

@ -25,16 +25,13 @@ from devstack import utils
from devstack.components import db
#id
TYPE = settings.QUANTUM
LOG = logging.getLogger("devstack.components.quantum")
#vswitch pkgs
# Openvswitch special settings
VSWITCH_PLUGIN = 'openvswitch'
PKG_VSWITCH = "quantum-openvswitch.json"
V_PROVIDER = "quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin"
#config files (some only modified if running as openvswitch)
# Config files (some only modified if running as openvswitch)
PLUGIN_CONF = "plugins.ini"
QUANTUM_CONF = 'quantum.conf'
PLUGIN_LOC = ['etc']
@ -43,24 +40,20 @@ AGENT_LOC = ["etc", "quantum", "plugins", "openvswitch"]
AGENT_BIN_LOC = ["quantum", "plugins", "openvswitch", 'agent']
CONFIG_FILES = [PLUGIN_CONF, AGENT_CONF]
#this db will be dropped and created
# This db will be dropped and created
DB_NAME = 'ovs_quantum'
#opensvswitch bridge setup/teardown/name commands
# Opensvswitch bridge setup/teardown/name commands
OVS_BRIDGE_DEL = ['ovs-vsctl', '--no-wait', '--', '--if-exists', 'del-br', '%OVS_BRIDGE%']
OVS_BRIDGE_ADD = ['ovs-vsctl', '--no-wait', 'add-br', '%OVS_BRIDGE%']
OVS_BRIDGE_EXTERN_ID = ['ovs-vsctl', '--no-wait', 'br-set-external-id', '%OVS_BRIDGE%', 'bridge-id', '%OVS_EXTERNAL_ID%']
OVS_BRIDGE_CMDS = [OVS_BRIDGE_DEL, OVS_BRIDGE_ADD, OVS_BRIDGE_EXTERN_ID]
#special component options
QUANTUM_SERVICE = 'q-svc'
QUANTUM_AGENT = 'q-agt'
#subdirs of the downloaded
# Subdirs of the downloaded
CONFIG_DIR = 'etc'
BIN_DIR = 'bin'
#what to start (only if openvswitch enabled)
# What to start (only if openvswitch enabled)
APP_Q_SERVER = 'quantum-server'
APP_Q_AGENT = 'ovs_quantum_agent.py'
APP_OPTIONS = {
@ -81,15 +74,8 @@ class QuantumInstaller(comp.PkgInstallComponent):
self.q_vswitch_service = False
plugin = self.cfg.getdefaulted("quantum", "q_plugin", VSWITCH_PLUGIN)
if plugin == VSWITCH_PLUGIN:
#default to on if not specified
self.q_vswitch_agent = True
self.q_vswitch_service = True
# else:
# #only turn on if requested
# if QUANTUM_SERVICE in self.component_opts:
# self.q_vswitch_service = True
# if QUANTUM_AGENT in self.component_opts:
# self.q_vswitch_agent = True
def _get_download_locations(self):
places = list()
@ -99,34 +85,22 @@ class QuantumInstaller(comp.PkgInstallComponent):
})
return places
def get_dependencies(self):
common_deps = comp.PkgInstallComponent.get_dependencies(self)
add_deps = list()
# FIXME: This should come from a persona.
if self.q_vswitch_agent:
#just need the client
add_deps.append(settings.QUANTUM_CLIENT)
if self.q_vswitch_service:
#in this case we need the db
add_deps.append(settings.DB)
return common_deps + add_deps
def _get_config_files(self):
return list(CONFIG_FILES)
def _get_target_config_name(self, config_fn):
if config_fn == PLUGIN_CONF:
tgt_loc = [self.appdir] + PLUGIN_LOC + [config_fn]
tgt_loc = [self.app_dir] + PLUGIN_LOC + [config_fn]
return sh.joinpths(*tgt_loc)
elif config_fn == AGENT_CONF:
tgt_loc = [self.appdir] + AGENT_LOC + [config_fn]
tgt_loc = [self.app_dir] + AGENT_LOC + [config_fn]
return sh.joinpths(*tgt_loc)
else:
return comp.PkgInstallComponent._get_target_config_name(self, config_fn)
def _config_adjust(self, contents, config_fn):
if config_fn == PLUGIN_CONF and self.q_vswitch_service:
#need to fix the "Quantum plugin provider module"
# Need to fix the "Quantum plugin provider module"
newcontents = contents
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
@ -140,7 +114,7 @@ class QuantumInstaller(comp.PkgInstallComponent):
newcontents = cfg.add_header(config_fn, outputstream.getvalue())
return newcontents
elif config_fn == AGENT_CONF and self.q_vswitch_agent:
#Need to adjust the sql connection
# Need to adjust the sql connection
newcontents = contents
with io.BytesIO(contents) as stream:
config = cfg.IgnoreMissingConfigParser()
@ -189,12 +163,12 @@ class QuantumInstaller(comp.PkgInstallComponent):
def _get_source_config(self, config_fn):
if config_fn == PLUGIN_CONF:
srcloc = [self.appdir] + PLUGIN_LOC + [config_fn]
srcloc = [self.app_dir] + PLUGIN_LOC + [config_fn]
srcfn = sh.joinpths(*srcloc)
contents = sh.load_file(srcfn)
return (srcfn, contents)
elif config_fn == AGENT_CONF:
srcloc = [self.appdir] + AGENT_LOC + [config_fn]
srcloc = [self.app_dir] + AGENT_LOC + [config_fn]
srcfn = sh.joinpths(*srcloc)
contents = sh.load_file(srcfn)
return (srcfn, contents)
@ -209,25 +183,19 @@ class QuantumRuntime(comp.ProgramRuntime):
self.q_vswitch_service = False
plugin = self.cfg.getdefaulted("quantum", "q_plugin", VSWITCH_PLUGIN)
if plugin == VSWITCH_PLUGIN:
#default to on if not specified
# Default to on if not specified
self.q_vswitch_agent = True
self.q_vswitch_service = True
# else:
# #only turn on if requested
# if QUANTUM_SERVICE in self.component_opts:
# self.q_vswitch_service = True
# if QUANTUM_AGENT in self.component_opts:
# self.q_vswitch_agent = True
def _get_apps_to_start(self):
app_list = comp.ProgramRuntime._get_apps_to_start(self)
if self.q_vswitch_service:
app_list.append({
'name': APP_Q_SERVER,
'path': sh.joinpths(self.appdir, BIN_DIR, APP_Q_SERVER),
'path': sh.joinpths(self.app_dir, BIN_DIR, APP_Q_SERVER),
})
if self.q_vswitch_agent:
full_pth = [self.appdir] + AGENT_BIN_LOC + [APP_Q_AGENT]
full_pth = [self.app_dir] + AGENT_BIN_LOC + [APP_Q_AGENT]
app_list.append({
'name': APP_Q_AGENT,
'path': sh.joinpths(*full_pth)
@ -240,8 +208,8 @@ class QuantumRuntime(comp.ProgramRuntime):
def _get_param_map(self, app_name):
param_dict = comp.ProgramRuntime._get_param_map(self, app_name)
if app_name == APP_Q_AGENT:
tgt_loc = [self.appdir] + AGENT_LOC + [AGENT_CONF]
tgt_loc = [self.app_dir] + AGENT_LOC + [AGENT_CONF]
param_dict['OVS_CONFIG_FILE'] = sh.joinpths(*tgt_loc)
elif app_name == APP_Q_SERVER:
param_dict['QUANTUM_CONFIG_FILE'] = sh.joinpths(self.appdir, CONFIG_DIR, QUANTUM_CONF)
param_dict['QUANTUM_CONFIG_FILE'] = sh.joinpths(self.app_dir, CONFIG_DIR, QUANTUM_CONF)
return param_dict

View File

@ -16,10 +16,7 @@
from devstack import component as comp
from devstack import log as logging
from devstack import settings
#id
TYPE = settings.QUANTUM_CLIENT
LOG = logging.getLogger("devstack.components.quantum_client")

View File

@ -21,27 +21,25 @@ from devstack import log as logging
from devstack import settings
from devstack import shell as sh
#id
TYPE = settings.RABBIT
LOG = logging.getLogger("devstack.components.rabbit")
#hopefully these are distro independent..
# So far these are distro independent..
START_CMD = ['service', "rabbitmq-server", "start"]
STOP_CMD = ['service', "rabbitmq-server", "stop"]
STATUS_CMD = ['service', "rabbitmq-server", "status"]
RESTART_CMD = ['service', "rabbitmq-server", "restart"]
PWD_CMD = ['rabbitmqctl', 'change_password', 'guest']
#default password
# Default password (guest)
RESET_BASE_PW = ''
#how long we wait for rabbitmq to start up before doing commands on it
# How long we wait for rabbitmq to start up before doing commands on it
WAIT_ON_TIME = settings.WAIT_ALIVE_SECS
#config keys we warm up so u won't be prompted later
# Config keys we warm up so u won't be prompted later
WARMUP_PWS = ['rabbit']
#partial of rabbit user prompt
# Partial of rabbit user prompt
PW_USER_PROMPT = 'the rabbit user'
@ -96,8 +94,9 @@ class RabbitRuntime(comp.EmptyRuntime):
return 0
def status(self):
#this has got to be the worst status output
#i have ever seen (its like a weird mix json+crap)
# This has got to be the worst status output.
#
# I have ever seen (its like a weird mix json+crap)
run_result = sh.execute(*STATUS_CMD,
check_exit_code=False,
run_as_root=True)
@ -114,12 +113,15 @@ class RabbitRuntime(comp.EmptyRuntime):
return comp.STATUS_UNKNOWN
def _run_cmd(self, cmd, check_exit=True):
#this seems to fix one of the bugs with rabbit mq starting and stopping
#not cool, possibly connected to the following bugs:
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
# This seems to fix one of the bugs with rabbit mq starting and stopping
# not cool, possibly connected to the following bugs:
#
#rhel seems to have this bug also...
# See: https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
# See: https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
#
# RHEL seems to have this bug also...
#
# TODO: Move to distro dir...
with TemporaryFile() as f:
return sh.execute(*cmd, run_as_root=True,
stdout_fh=f, stderr_fh=f,

View File

@ -18,15 +18,12 @@ import re
from devstack import component as comp
from devstack import log as logging
from devstack import settings
from devstack import shell as sh
from devstack import utils
#id
TYPE = settings.SWIFT
LOG = logging.getLogger("devstack.components.swift")
#swift has alot of config files!
# Swift has alot of config files!
SWIFT_CONF = 'swift.conf'
PROXY_SERVER_CONF = 'proxy-server.conf'
ACCOUNT_SERVER_CONF = 'account-server.conf'
@ -45,35 +42,35 @@ CONFIGS = [SWIFT_CONF, PROXY_SERVER_CONF, ACCOUNT_SERVER_CONF,
SWIFT_RSYNC_LOC = '/etc/rsyslog.d/10-swift.conf'
DEF_LOOP_SIZE = 1000000
#adjustments to rsync/rsyslog
# Adjustments to rsync/rsyslog
RSYNC_CONF_LOC = '/etc/default/rsync'
RSYNCD_CONF_LOC = '/etc/rsyncd.conf'
RSYNC_SERVICE_RESTART = ['service', 'rsync', 'restart']
RSYSLOG_SERVICE_RESTART = ['service', 'rsyslog', 'restart']
RSYNC_ON_OFF_RE = re.compile(r'^\s*RSYNC_ENABLE\s*=\s*(.*)$', re.I)
#defines our auth service type
# Defines our auth service type
AUTH_SERVICE = 'keystone'
#defines what type of loopback filesystem we will make
#xfs is preferred due to its extended attributes
# Defines what type of loopback filesystem we will make
# xfs is preferred due to its extended attributes
FS_TYPE = "xfs"
#subdirs of the git checkout
# Subdirs of the git checkout
BIN_DIR = 'bin'
CONFIG_DIR = 'etc'
LOG_DIR = 'logs'
#config keys we warm up so u won't be prompted later
# Config keys we warm up so u won't be prompted later
WARMUP_PWS = ['service_token', 'swift_hash']
class SwiftUninstaller(comp.PythonUninstallComponent):
def __init__(self, *args, **kargs):
comp.PythonUninstallComponent.__init__(self, *args, **kargs)
self.datadir = sh.joinpths(self.appdir, self.cfg.getdefaulted('swift', 'data_location', 'data'))
self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR)
self.bindir = sh.joinpths(self.appdir, BIN_DIR)
self.datadir = sh.joinpths(self.app_dir, self.cfg.getdefaulted('swift', 'data_location', 'data'))
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
self.logdir = sh.joinpths(self.datadir, LOG_DIR)
def pre_uninstall(self):
@ -88,12 +85,12 @@ class SwiftUninstaller(comp.PythonUninstallComponent):
class SwiftInstaller(comp.PythonInstallComponent):
def __init__(self, *args, **kargs):
comp.PythonInstallComponent.__init__(self, *args, **kargs)
self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR)
self.bindir = sh.joinpths(self.appdir, BIN_DIR)
self.datadir = sh.joinpths(self.appdir, self.cfg.getdefaulted('swift', 'data_location', 'data'))
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
self.datadir = sh.joinpths(self.app_dir, self.cfg.getdefaulted('swift', 'data_location', 'data'))
self.logdir = sh.joinpths(self.datadir, LOG_DIR)
self.startmain_file = sh.joinpths(self.bindir, SWIFT_STARTMAIN)
self.makerings_file = sh.joinpths(self.bindir, SWIFT_MAKERINGS)
self.startmain_file = sh.joinpths(self.bin_dir, SWIFT_STARTMAIN)
self.makerings_file = sh.joinpths(self.bin_dir, SWIFT_MAKERINGS)
self.fs_dev = sh.joinpths(self.datadir, DEVICE_PATH)
self.fs_image = sh.joinpths(self.datadir, SWIFT_IMG)
self.auth_server = AUTH_SERVICE
@ -111,20 +108,20 @@ class SwiftInstaller(comp.PythonInstallComponent):
def warm_configs(self):
for pw_key in WARMUP_PWS:
self.cfg.get("passwords", pw_key)
self.pw_gen.get_password(pw_key)
def _get_param_map(self, config_fn):
return {
'USER': self.cfg.getdefaulted('swift', 'swift_user', sh.getuser()),
'GROUP': self.cfg.getdefaulted('swift', 'swift_group', sh.getgroupname()),
'SWIFT_DATA_LOCATION': self.datadir,
'SWIFT_CONFIG_LOCATION': self.cfgdir,
'SWIFT_CONFIG_LOCATION': self.cfg_dir,
'SERVICE_TOKEN': self.cfg.get('passwords', 'service_token'),
'AUTH_SERVER': self.auth_server,
'SWIFT_HASH': self.cfg.get('passwords', 'swift_hash'),
'SWIFT_LOGDIR': self.logdir,
'SWIFT_PARTITION_POWER_SIZE': self.cfg.getdefaulted('swift', 'partition_power_size', '9'),
#leave these alone, will be adjusted later
# Note: leave these alone, will be adjusted later
'NODE_PATH': '%NODE_PATH%',
'BIND_PORT': '%BIND_PORT%',
'LOG_FACILITY': '%LOG_FACILITY%',
@ -145,8 +142,8 @@ class SwiftInstaller(comp.PythonInstallComponent):
def _create_node_config(self, node_number, port):
for t in ['object', 'container', 'account']:
src_fn = sh.joinpths(self.cfgdir, '%s-server.conf' % t)
tgt_fn = sh.joinpths(self.cfgdir, '%s-server/%d.conf' % (t, node_number))
src_fn = sh.joinpths(self.cfg_dir, '%s-server.conf' % t)
tgt_fn = sh.joinpths(self.cfg_dir, '%s-server/%d.conf' % (t, node_number))
adjustments = {
'%NODE_PATH%': sh.joinpths(self.datadir, str(node_number)),
'%BIND_PORT%': str(port),
@ -157,7 +154,7 @@ class SwiftInstaller(comp.PythonInstallComponent):
def _delete_templates(self):
for t in ['object', 'container', 'account']:
sh.unlink(sh.joinpths(self.cfgdir, '%s-server.conf' % t))
sh.unlink(sh.joinpths(self.cfg_dir, '%s-server.conf' % t))
def _create_nodes(self):
for i in range(1, 5):
@ -170,20 +167,20 @@ class SwiftInstaller(comp.PythonInstallComponent):
self._delete_templates()
def _turn_on_rsync(self):
sh.symlink(sh.joinpths(self.cfgdir, RSYNC_CONF), RSYNCD_CONF_LOC)
sh.symlink(sh.joinpths(self.cfg_dir, RSYNC_CONF), RSYNCD_CONF_LOC)
self.tracewriter.symlink_made(RSYNCD_CONF_LOC)
sh.replace_in(RSYNC_CONF_LOC, RSYNC_ON_OFF_RE, 'RSYNC_ENABLE=true', True)
def _create_log_dirs(self):
self.tracewriter.dirs_made(*sh.mkdirslist(sh.joinpths(self.logdir, 'hourly')))
sh.symlink(sh.joinpths(self.cfgdir, SYSLOG_CONF), SWIFT_RSYNC_LOC)
sh.symlink(sh.joinpths(self.cfg_dir, SYSLOG_CONF), SWIFT_RSYNC_LOC)
self.tracewriter.symlink_made(SWIFT_RSYNC_LOC)
def _setup_binaries(self):
sh.move(sh.joinpths(self.cfgdir, SWIFT_MAKERINGS), self.makerings_file)
sh.move(sh.joinpths(self.cfg_dir, SWIFT_MAKERINGS), self.makerings_file)
sh.chmod(self.makerings_file, 0777)
self.tracewriter.file_touched(self.makerings_file)
sh.move(sh.joinpths(self.cfgdir, SWIFT_STARTMAIN), self.startmain_file)
sh.move(sh.joinpths(self.cfg_dir, SWIFT_STARTMAIN), self.startmain_file)
sh.chmod(self.startmain_file, 0777)
self.tracewriter.file_touched(self.startmain_file)
@ -202,21 +199,21 @@ class SwiftInstaller(comp.PythonInstallComponent):
class SwiftRuntime(comp.PythonRuntime):
def __init__(self, *args, **kargs):
comp.PythonRuntime.__init__(self, *args, **kargs)
self.datadir = sh.joinpths(self.appdir, self.cfg.getdefaulted('swift', 'data_location', 'data'))
self.cfgdir = sh.joinpths(self.appdir, CONFIG_DIR)
self.bindir = sh.joinpths(self.appdir, BIN_DIR)
self.datadir = sh.joinpths(self.app_dir, self.cfg.getdefaulted('swift', 'data_location', 'data'))
self.cfg_dir = sh.joinpths(self.app_dir, CONFIG_DIR)
self.bin_dir = sh.joinpths(self.app_dir, BIN_DIR)
self.logdir = sh.joinpths(self.datadir, LOG_DIR)
def start(self):
sh.execute(*RSYSLOG_SERVICE_RESTART, run_as_root=True)
sh.execute(*RSYNC_SERVICE_RESTART, run_as_root=True)
swift_start_cmd = [sh.joinpths(self.bindir, SWIFT_INIT)] + ['all', 'start']
swift_start_cmd = [sh.joinpths(self.bin_dir, SWIFT_INIT)] + ['all', 'start']
sh.execute(*swift_start_cmd, run_as_root=True)
def stop(self):
swift_stop_cmd = [sh.joinpths(self.bindir, SWIFT_INIT)] + ['all', 'stop']
swift_stop_cmd = [sh.joinpths(self.bin_dir, SWIFT_INIT)] + ['all', 'stop']
sh.execute(*swift_stop_cmd, run_as_root=True)
def restart(self):
swift_restart_cmd = [sh.joinpths(self.bindir, SWIFT_INIT)] + ['all', 'restart']
swift_restart_cmd = [sh.joinpths(self.bin_dir, SWIFT_INIT)] + ['all', 'restart']
sh.execute(*swift_restart_cmd, run_as_root=True)

View File

@ -16,7 +16,6 @@
# under the License.
import glob
import os
import platform
import re
@ -25,31 +24,28 @@ import yaml
from devstack import importer
from devstack import log as logging
from devstack import settings
from devstack import utils
from devstack import shell as sh
LOG = logging.getLogger('devstack.distro')
DISTRO_CONF_DIR = os.path.join(settings.STACK_CONFIG_DIR, 'distros')
class Distro(object):
@classmethod
def load_all(cls, path=DISTRO_CONF_DIR):
def load_all(cls, path=settings.STACK_DISTRO_DIR):
"""Returns a list of the known distros."""
results = []
input_files = glob.glob(os.path.join(DISTRO_CONF_DIR, '*.yaml'))
input_files = glob.glob(sh.joinpths(path, '*.yaml'))
if not input_files:
raise RuntimeError(
'Did not find any distro definition files in %s' %
DISTRO_CONF_DIR)
path)
for filename in input_files:
try:
with open(filename, 'r') as f:
data = yaml.load(f)
results.append(cls(**data))
except Exception as err:
except (IOError, yaml.YAMLError) as err:
LOG.warning('Could not load distro definition from %s: %s',
filename, err)
return results
@ -64,7 +60,7 @@ class Distro(object):
LOG.debug('Looking for distro data for %s (%s)', plt, distname)
for p in cls.load_all():
if p.supports_distro(plt):
LOG.info('Using distro "%s" for "%s"', p.name, plt)
LOG.info('Using distro "%s" for platform "%s"', p.name, plt)
return p
else:
raise RuntimeError(
@ -73,51 +69,60 @@ class Distro(object):
def __init__(self, name, distro_pattern, packager_name, commands, components):
self.name = name
self.distro_pattern = re.compile(distro_pattern, re.IGNORECASE)
self.packager_name = packager_name
self.commands = commands
self.components = components
self._distro_pattern = re.compile(distro_pattern, re.IGNORECASE)
self._packager_name = packager_name
self._commands = commands
self._components = components
def __repr__(self):
return "\"%s\" using packager \"%s\"" % (self.name, self._packager_name)
def get_command(self, key, *more_keys, **kargs):
""" Gets a end object for a given set of keys """
root = self._commands
acutal_keys = [key] + list(more_keys)
run_over_keys = acutal_keys[0:-1]
end_key = acutal_keys[-1]
quiet = kargs.get('quiet', False)
for k in run_over_keys:
if quiet:
root = root.get(k)
if root is None:
return None
else:
root = root[k]
if not quiet:
return root[end_key]
else:
return root.get(end_key)
def known_component(self, name):
return name in self._components
def supports_distro(self, distro_name):
"""Does this distro support the named Linux distro?
:param distro_name: Return value from platform.linux_distribution().
"""
return bool(self.distro_pattern.search(distro_name))
return bool(self._distro_pattern.search(distro_name))
def get_packager_factory(self):
"""Return a factory for a package manager."""
return importer.import_entry_point(self.packager_name)
return importer.import_entry_point(self._packager_name)
def get_component_action_class(self, name, action):
"""Return the class to use for doing the action w/the component."""
def extract_component(self, name, action):
"""Return the class + component info to use for doing the action w/the component."""
try:
entry_point = self.components[name][action]
# Use a copy instead of the original
component_info = dict(self._components[name])
entry_point = component_info[action]
cls = importer.import_entry_point(entry_point)
# Knock all action class info (and any other keys)
key_deletions = [action] + settings.ACTIONS
for k in key_deletions:
if k in component_info:
del component_info[k]
return (cls, component_info)
except KeyError:
raise RuntimeError('No class configured to %s %s on %s' %
(action, name, self.name))
return importer.import_entry_point(entry_point)
def resolve_component_dependencies(self, components):
"""Returns list of all components needed for the named components."""
all_components = {}
active_names = [(c, None) for c in components]
while active_names:
component, parent = active_names.pop()
try:
component_details = self.components[component]
except KeyError:
if parent:
raise RuntimeError(
'Could not find details about component %r, a dependency of %s, for %s' %
(component, parent, self.name))
else:
raise RuntimeError(
'Could not find details about component %r for %s' %
(component, self.name))
deps = set(component_details.get('dependencies', []))
all_components[component] = deps
for d in deps:
if d not in all_components and d not in active_names:
active_names.append((d, component))
return all_components

View File

@ -1,10 +1,31 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2012 Dreamhost Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Platform-specific logic for Ubunutu Oneiric components.
"""
import tempfile
import time
from devstack.components import db
from devstack import log as logging
from devstack import shell as sh
from devstack import utils
from devstack.packaging import apt
LOG = logging.getLogger(__name__)
@ -23,3 +44,35 @@ class OneiricDBInstaller(db.DBInstaller):
fc = utils.joinlinesep(*new_lines)
with sh.Rooted(True):
sh.write_file('/etc/mysql/my.cnf', fc)
class OneiricAptPackager(apt.AptPackager):
def _pkg_remove_special(self, name, pkginfo):
if name == 'rabbitmq-server':
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
LOG.info("Handling special remove of %s." % (name))
pkg_full = self._format_pkg(name, pkginfo.get("version"))
cmd = apt.APT_GET + apt.APT_REMOVE + [pkg_full]
self._execute_apt(cmd)
#probably useful to do this
time.sleep(1)
#purge
cmd = apt.APT_GET + apt.APT_PURGE + [pkg_full]
self._execute_apt(cmd)
return True
return False
def _pkg_install_special(self, name, pkginfo):
if name == 'rabbitmq-server':
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
LOG.info("Handling special install of %s." % (name))
#this seems to be a temporary fix for that bug
with tempfile.TemporaryFile() as f:
pkg_full = self._format_pkg(name, pkginfo.get("version"))
cmd = apt.APT_GET + apt.APT_INSTALL + [pkg_full]
self._execute_apt(cmd, stdout_fh=f, stderr_fh=f)
return True
return False

71
devstack/distros/rhel6.py Normal file
View File

@ -0,0 +1,71 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2012 Dreamhost Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Platform-specific logic for RHEL6 components.
"""
from devstack import log as logging
from devstack import shell as sh
from devstack import utils
from devstack.components import db
from devstack.components import horizon
LOG = logging.getLogger(__name__)
SOCKET_CONF = "/etc/httpd/conf.d/wsgi-socket-prefix.conf"
HTTPD_CONF = '/etc/httpd/conf/httpd.conf'
class Rhel6DBInstaller(db.DBInstaller):
def _configure_db_confs(self):
dbtype = self.cfg.get("db", "type")
if dbtype == 'mysql':
LOG.info("Fixing up mysql configs.")
fc = sh.load_file('/etc/my.cnf')
lines = fc.splitlines()
new_lines = list()
for line in lines:
if line.startswith('skip-grant-tables'):
line = '#' + line
new_lines.append(line)
fc = utils.joinlinesep(*new_lines)
with sh.Rooted(True):
sh.write_file('/etc/my.cnf', fc)
class Rhel6HorizonInstaller(horizon.HorizonInstaller):
def _config_fixups(self):
(user, group) = self._get_apache_user_group()
self.tracewriter.file_touched(SOCKET_CONF)
# Not recorded since we aren't really creating this
LOG.info("Fixing up %s and %s files" % (SOCKET_CONF, HTTPD_CONF))
with sh.Rooted(True):
# Fix the socket prefix to someplace we can use
fc = "WSGISocketPrefix %s" % (sh.joinpths(self.log_dir, "wsgi-socket"))
sh.write_file(SOCKET_CONF, fc)
# Now adjust the run user and group (of httpd.conf)
new_lines = list()
for line in sh.load_file(HTTPD_CONF).splitlines():
if line.startswith("User "):
line = "User %s" % (user)
if line.startswith("Group "):
line = "Group %s" % (group)
new_lines.append(line)
sh.write_file(HTTPD_CONF, utils.joinlinesep(*new_lines))

View File

@ -51,7 +51,6 @@ def _gitdownload(storewhere, uri, branch=None):
def download(storewhere, uri, branch=None):
#figure out which type
up = urlparse(uri)
if up and up.scheme.lower() == "git" or GIT_EXT_REG.match(up.path):
return _gitdownload(storewhere, uri, branch)

View File

@ -26,9 +26,9 @@ def get():
def set(key, value):
#this is really screwy, python is really odd in this area
#from http://docs.python.org/library/os.html
#Calling putenv() directly does not change os.environ, so it's better to modify os.environ.
# This is really screwy, python is really odd in this area
# See: from http://docs.python.org/library/os.html
# Calling putenv() directly does not change os.environ, so it's better to modify os.environ.
if key is not None:
LOG.audit("Setting environment key [%s] to value [%s]" % (key, value))
os.environ[str(key)] = str(value)
@ -43,5 +43,5 @@ def get_key(key, default_value=None):
LOG.debug("Could not find anything in environment variable [%s]" % (key))
value = default_value
else:
LOG.debug("Found [%s] in environment variable [%s]" % (value, key))
LOG.audit("Found [%s] in environment variable [%s]" % (value, key))
return value

View File

@ -28,13 +28,13 @@ from devstack.components import keystone
LOG = logging.getLogger('devstack.env_rc')
#general extraction cfg keys+section
# General extraction cfg keys + sections
CFG_MAKE = {
'FLAT_INTERFACE': ('nova', 'flat_interface'),
'HOST_IP': ('host', 'ip'),
}
#general password keys
# General password keys
PASSWORDS_MAKES = {
'ADMIN_PASSWORD': 'horizon_keystone_admin',
'SERVICE_PASSWORD': 'service_password',
@ -43,17 +43,17 @@ PASSWORDS_MAKES = {
'MYSQL_PASSWORD': 'sql',
}
#install root
# Install root output name and env variable name
INSTALL_ROOT = 'INSTALL_ROOT'
#default ports
# Default ports
EC2_PORT = 8773
S3_PORT = 3333
#how we know if a line is an export or if it isn't (simpe edition)
# How we know if a line is an export or if it isn't (simpe edition)
EXP_PAT = re.compile("^\s*export\s+(.*?)=(.*?)$", re.IGNORECASE)
#how we unquote a string (simple edition)
# How we unquote a string (simple edition)
QUOTED_PAT = re.compile(r"^\s*[\"](.*)[\"]\s*$")
@ -182,8 +182,8 @@ class RcWriter(object):
to_set['OS_PASSWORD'] = key_params['ADMIN_PASSWORD']
to_set['OS_TENANT_NAME'] = key_params['DEMO_TENANT_NAME']
to_set['OS_USERNAME'] = key_params['DEMO_USER_NAME']
# this seems named weirdly the OS_AUTH_URL is the keystone SERVICE_ENDPOINT endpoint
# todo: describe more why this is the case
# This seems named weirdly the OS_AUTH_URL is the keystone SERVICE_ENDPOINT endpoint
# Todo: describe more why this is the case...
to_set['OS_AUTH_URL'] = key_params['SERVICE_ENDPOINT']
return to_set
@ -246,11 +246,23 @@ class RcReader(object):
def __init__(self):
pass
def _is_comment(self, line):
if line.lstrip().startswith("#"):
return True
return False
def extract(self, fn):
extracted_vars = dict()
contents = sh.load_file(fn)
contents = ''
LOG.audit("Loading rc file [%s]" % (fn))
try:
with open(fn, 'r') as fh:
contents = fh.read()
except IOError as e:
LOG.warn("Failed extracting rc file [%s] due to [%s]" % (fn, e))
return extracted_vars
for line in contents.splitlines():
if line.lstrip().startswith("#"):
if self._is_comment(line):
continue
m = EXP_PAT.search(line)
if m:

View File

@ -236,16 +236,16 @@ class ImageCreationService:
}
})
# prepare the request
# Prepare the request
request = urllib2.Request(keystone_token_url)
# post body
# Post body
request.add_data(data)
# content type
# Content type
request.add_header('Content-Type', 'application/json')
# make the request
# Make the request
LOG.info("Getting your token from url [%s], please wait..." % (keystone_token_url))
LOG.debug("With post json data %s" % (data))
response = urllib2.urlopen(request)
@ -258,7 +258,7 @@ class ImageCreationService:
msg = "Response from url [%s] did not match expected json format." % (keystone_token_url)
raise IOError(msg)
#basic checks passed, extract it!
# Basic checks passed, extract it!
tok = token['access']['token']['id']
LOG.debug("Got token %s" % (tok))
return tok
@ -268,7 +268,7 @@ class ImageCreationService:
token = None
LOG.info("Setting up any specified images in glance.")
#extract them from the config
# Extract the urls from the config
try:
flat_urls = self.cfg.getdefaulted('img', 'image_urls', [])
expanded_urls = [x.strip() for x in flat_urls.split(',')]
@ -278,7 +278,7 @@ class ImageCreationService:
except(ConfigParser.Error):
LOG.warn("No image configuration keys found, skipping glance image install!")
#install them in glance
# Install them in glance
am_installed = 0
if urls:
LOG.info("Attempting to download & extract and upload (%s) images." % (", ".join(urls)))

View File

@ -1,13 +1,36 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2012 Dreamhost Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def import_entry_point(fullname):
"""Given a name import the class and return it.
def partition(fullname):
"""
The name should be in dotted.path:ClassName syntax.
"""
if ':' not in fullname:
raise ValueError('Invalid entry point specifier %r' % fullname)
module_name, ignore, classname = fullname.partition(':')
(module_name, _, classname) = fullname.partition(':')
return (module_name, classname)
def import_entry_point(fullname):
"""
Given a name import the class and return it.
"""
(module_name, classname) = partition(fullname)
try:
module = __import__(module_name)
for submodule in module_name.split('.')[1:]:

View File

@ -22,7 +22,7 @@ from devstack import utils
LOG = logging.getLogger('devstack.libvirt')
#http://libvirt.org/uri.html
# See: http://libvirt.org/uri.html
LIBVIRT_PROTOCOL_MAP = {
'qemu': "qemu:///system",
'kvm': "qemu:///system",
@ -32,35 +32,28 @@ LIBVIRT_PROTOCOL_MAP = {
}
VIRT_LIB = 'libvirt'
#distros name the libvirt service differently :-(
SV_NAME_MAP = {
settings.RHEL6: 'libvirtd',
settings.FEDORA16: 'libvirtd',
settings.UBUNTU11: 'libvirt-bin',
}
#how libvirt is restarted
# How libvirt is restarted
LIBVIRT_RESTART_CMD = ['service', '%SERVICE%', 'restart']
#how we check its status
# How we check its status
LIBVIRT_STATUS_CMD = ['service', '%SERVICE%', 'status']
#this is just used to check that libvirt will work with
#a given protocol, may not be ideal but does seem to crap
#out if it won't work, so thats good
# This is just used to check that libvirt will work with
# a given protocol, may not be ideal but does seem to crap
# out if it won't work, so thats good...
VIRSH_SANITY_CMD = ['virsh', '-c', '%VIRT_PROTOCOL%', 'uri']
#status is either dead or alive!
# Status is either dead or alive!
_DEAD = 'DEAD'
_ALIVE = 'ALIVE'
#alive wait time, just a sleep we put into so that the service can start up
# Alive wait time, just a sleep we put into so that the service can start up
WAIT_ALIVE_TIME = settings.WAIT_ALIVE_SECS
def _get_virt_lib():
#late import so that we don't always need this library to be active
#ie if u aren't using libvirt in the first place
# Late import so that we don't always need this library to be active
# ie if u aren't using libvirt in the first place...
return utils.import_module(VIRT_LIB)
@ -71,7 +64,7 @@ def _status(distro):
'run_as_root': True,
})
mp = dict()
mp['SERVICE'] = SV_NAME_MAP[distro]
mp['SERVICE'] = distro.get_command('libvirt-daemon')
result = utils.execute_template(*cmds,
check_exit_code=False,
params=mp)
@ -104,7 +97,7 @@ def restart(distro):
'run_as_root': True,
})
mp = dict()
mp['SERVICE'] = SV_NAME_MAP[distro]
mp['SERVICE'] = distro.get_command('libvirt-daemon')
utils.execute_template(*cmds, params=mp)
LOG.info("Restarting the libvirt service, please wait %s seconds until its started." % (WAIT_ALIVE_TIME))
sh.sleep(WAIT_ALIVE_TIME)
@ -117,7 +110,7 @@ def virt_ok(virt_type, distro):
try:
restart(distro)
except excp.ProcessExecutionError, e:
LOG.warn("Could not restart libvirt on distro [%s] due to [%s]" % (distro, e.message))
LOG.warn("Could not restart libvirt due to [%s]" % (e))
return False
try:
cmds = list()
@ -148,7 +141,7 @@ def clear_libvirt_domains(distro, virt_type, inst_prefix):
try:
restart(distro)
except excp.ProcessExecutionError, e:
LOG.warn("Could not restart libvirt on distro [%s] due to [%s]" % (distro, e.message))
LOG.warn("Could not restart libvirt due to [%s]" % (e))
return
try:
conn = libvirt.open(virt_protocol)

View File

@ -24,7 +24,7 @@ import pprint
from logging.handlers import SysLogHandler
from logging.handlers import WatchedFileHandler
# a list of things we want to replicate from logging levels
# Alist of things we want to replicate from logging levels
CRITICAL = logging.CRITICAL
FATAL = logging.FATAL
ERROR = logging.ERROR
@ -34,13 +34,13 @@ INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# our new audit level
# http://docs.python.org/howto/logging.html#logging-levels
# Our new audit level
# See: http://docs.python.org/howto/logging.html#logging-levels
logging.AUDIT = logging.DEBUG + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
AUDIT = logging.AUDIT
# methods
# Methods
debug = logging.debug
info = logging.info
warning = logging.warning
@ -50,11 +50,11 @@ exception = logging.exception
critical = logging.critical
log = logging.log
# classes
# Classes
root = logging.root
Formatter = logging.Formatter
# handlers
# Handlers
StreamHandler = logging.StreamHandler
WatchedFileHandler = WatchedFileHandler
SysLogHandler = SysLogHandler

View File

@ -35,10 +35,8 @@ def parse():
version_str = "%prog v" + version.version_string()
help_formatter = IndentedHelpFormatter(width=HELP_WIDTH)
parser = OptionParser(version=version_str, formatter=help_formatter)
parser.add_option("-c", "--component",
action="append",
dest="component",
help="openstack component: %s" % (_format_list(settings.COMPONENT_NAMES)))
# Root options
parser.add_option("-v", "--verbose",
action="append_const",
const=1,
@ -52,7 +50,14 @@ def parse():
help=("perform ACTION but do not actually run any of the commands"
" that would normally complete ACTION: (default: %default)"))
# Install/start/stop/uninstall specific options
base_group = OptionGroup(parser, "Install & uninstall & start & stop specific options")
base_group.add_option("-p", "--persona",
action="store",
type="string",
dest="persona_fn",
metavar="FILE",
help="required persona yaml file to apply")
base_group.add_option("-a", "--action",
action="store",
type="string",
@ -66,28 +71,15 @@ def parse():
metavar="DIR",
help=("empty root DIR for install or "
"DIR with existing components for start/stop/uninstall"))
base_group.add_option("-i", "--ignore-deps",
action="store_false",
dest="ensure_deps",
help="ignore dependencies when performing ACTION")
base_group.add_option("--no-prompt-passwords",
action="store_false",
dest="prompt_for_passwords",
default=True,
help="do not prompt the user for passwords",
)
base_group.add_option("-e", "--ensure-deps",
action="store_true",
dest="ensure_deps",
help="ensure dependencies when performing ACTION (default: %default)",
default=True)
base_group.add_option("-r", "--ref-component",
action="append",
dest="ref_components",
metavar="COMPONENT",
help="component which will not have ACTION applied but will be referenced as if it was (ACTION dependent)")
parser.add_option_group(base_group)
# Uninstall and stop options
stop_un_group = OptionGroup(parser, "Uninstall & stop specific options")
stop_un_group.add_option("-n", "--no-force",
action="store_true",
@ -104,18 +96,16 @@ def parse():
default=False)
parser.add_option_group(un_group)
#extract only what we care about
# Extract only what we care about
(options, args) = parser.parse_args()
output = dict()
output['components'] = options.component or list()
output['dir'] = options.dir or ""
output['dryrun'] = options.dryrun or False
output['ref_components'] = options.ref_components or list()
output['action'] = options.action or ""
output['force'] = not options.force
output['ignore_deps'] = not options.ensure_deps
output['keep_old'] = options.keep_old
output['extras'] = args
output['persona_fn'] = options.persona_fn
output['verbosity'] = len(options.verbosity)
output['prompt_for_passwords'] = options.prompt_for_passwords

View File

@ -15,7 +15,6 @@
# under the License.
from devstack import log as logging
from devstack import settings
from devstack import utils
LOG = logging.getLogger("devstack.packager")
@ -26,29 +25,29 @@ class Packager(object):
self.distro = distro
self.keep_packages = keep_packages
def install_batch(self, pkgs):
def install(self, pkg):
raise NotImplementedError()
def remove_batch(self, pkgs):
if not self.keep_packages:
return self._remove_batch(pkgs)
return []
return list()
def pre_install(self, pkgs, installparams=None):
for packageinfo in pkgs:
preinstallcmds = packageinfo.get(settings.PRE_INSTALL)
if preinstallcmds:
def pre_install(self, pkgs, params=None):
for info in pkgs:
cmds = info.get('pre-install')
if cmds:
LOG.info("Running pre-install commands for package %s.",
packageinfo['name'])
utils.execute_template(*preinstallcmds, params=installparams)
info['name'])
utils.execute_template(*cmds, params=params)
def post_install(self, pkgs, installparams=None):
for packageinfo in pkgs:
postinstallcmds = packageinfo.get(settings.POST_INSTALL)
if postinstallcmds and len(postinstallcmds):
def post_install(self, pkgs, params=None):
for info in pkgs:
cmds = info.get('post-install')
if cmds:
LOG.info("Running post-install commands for package %s.",
packageinfo['name'])
utils.execute_template(*postinstallcmds, params=installparams)
info['name'])
utils.execute_template(*cmds, params=params)
def _remove_batch(self, pkgs):
raise NotImplementedError()

View File

@ -14,30 +14,28 @@
# License for the specific language governing permissions and limitations
# under the License.
from tempfile import TemporaryFile
import time
from devstack import log as logging
from devstack import packager as pack
from devstack import settings
from devstack import shell as sh
LOG = logging.getLogger("devstack.packaging.apt")
#base apt commands
# Base apt commands
APT_GET = ['apt-get']
APT_PURGE = ["purge", "-y"]
APT_REMOVE = ["remove", "-y"]
APT_INSTALL = ["install", "-y"]
APT_AUTOREMOVE = ['autoremove', '-y']
#should we use remove or purge?
# Should we use remove or purge?
APT_DO_REMOVE = APT_PURGE
#make sure its non-interactive
# Make sure its non-interactive
# http://awaseconfigurations.wordpress.com/tag/debian_frontend/
ENV_ADDITIONS = {'DEBIAN_FRONTEND': 'noninteractive'}
#apt separates its pkg names and versions with a equal sign
# Apt separates its pkg names and versions with a equal sign
VERSION_TEMPL = "%s=%s"
@ -60,7 +58,6 @@ class AptPackager(pack.Packager):
**kargs)
def _remove_batch(self, pkgs):
#form the needed commands
cmds = []
which_removed = []
for info in pkgs:
@ -78,53 +75,22 @@ class AptPackager(pack.Packager):
if cmds:
cmd = APT_GET + APT_DO_REMOVE + cmds
self._execute_apt(cmd)
#clean them out (if we did anything)
if which_removed and self.auto_remove:
cmd = APT_GET + APT_AUTOREMOVE
self._execute_apt(cmd)
return which_removed
def install_batch(self, pkgs):
#form the needed commands
cmds = []
for info in pkgs:
name = info['name']
if self._pkg_install_special(name, info):
continue
pkg_full = self._format_pkg(name, info.get("version"))
cmds.append(pkg_full)
#install them
if cmds:
cmd = APT_GET + APT_INSTALL + cmds
def install(self, pkg):
name = pkg['name']
if self._pkg_install_special(name, pkg):
return
else:
pkg_full = self._format_pkg(name, pkg.get("version"))
cmd = APT_GET + APT_INSTALL + [pkg_full]
self._execute_apt(cmd)
def _pkg_remove_special(self, name, pkginfo):
#TODO: maybe this should be a subclass that handles these differences
if name == 'rabbitmq-server' and self.distro.name == settings.UBUNTU11:
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
LOG.info("Handling special remove of %s." % (name))
pkg_full = self._format_pkg(name, pkginfo.get("version"))
cmd = APT_GET + APT_REMOVE + [pkg_full]
self._execute_apt(cmd)
#probably useful to do this
time.sleep(1)
#purge
cmd = APT_GET + APT_PURGE + [pkg_full]
self._execute_apt(cmd)
return True
def _pkg_remove_special(self, name, info):
return False
def _pkg_install_special(self, name, pkginfo):
#TODO: maybe this should be a subclass that handles these differences
if name == 'rabbitmq-server' and self.distro.name == settings.UBUNTU11:
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
LOG.info("Handling special install of %s." % (name))
#this seems to be a temporary fix for that bug
with TemporaryFile() as f:
pkg_full = self._format_pkg(name, pkginfo.get("version"))
cmd = APT_GET + APT_INSTALL + [pkg_full]
self._execute_apt(cmd, stdout_fh=f, stderr_fh=f)
return True
def _pkg_install_special(self, name, info):
return False

View File

@ -16,23 +16,22 @@
from devstack import log as logging
from devstack import packager as pack
from devstack import settings
from devstack import shell as sh
LOG = logging.getLogger("devstack.packaging.yum")
#root yum command
# Root yum command
YUM_CMD = ['yum']
#tolerant is enabled since we might already have it installed/erased
# Tolerant is enabled since we might already have it installed/erased
YUM_INSTALL = ["install", "-y", "-t"]
YUM_REMOVE = ['erase', '-y', "-t"]
#yum separates its pkg names and versions with a dash
# Yum separates its pkg names and versions with a dash
VERSION_TEMPL = "%s-%s"
#need to relink for rhel (not a bug!)
#TODO: maybe this should be a subclass that handles these differences
# Need to relink for rhel (not a bug!)
# TODO: maybe this should be a subclass that handles these differences
RHEL_RELINKS = {
'python-webob1.0': {
"src": '/usr/lib/python2.6/site-packages/WebOb-1.0.8-py2.6.egg/webob/',
@ -61,14 +60,14 @@ class YumPackager(pack.Packager):
**kargs)
def _remove_special(self, pkgname, pkginfo):
#TODO: maybe this should be a subclass that handles these differences
if self.distro.name == settings.RHEL6 and pkgname in RHEL_RELINKS:
#we don't return true here so that
#the normal package cleanup happens
sh.unlink(RHEL_RELINKS.get(pkgname).get("tgt"))
# TODO: maybe this should be a subclass that handles these differences
# if self.distro.name == settings.RHEL6 and pkgname in RHEL_RELINKS:
# #we don't return true here so that
# #the normal package cleanup happens
# sh.unlink(RHEL_RELINKS.get(pkgname).get("tgt"))
return False
#TODO: maybe this should be a subclass that handles these differences
# TODO: maybe this should be a subclass that handles these differences
def _install_rhel_relinks(self, pkgname, pkginfo):
full_pkg_name = self._format_pkg_name(pkgname, pkginfo.get("version"))
install_cmd = YUM_CMD + YUM_INSTALL + [full_pkg_name]
@ -82,10 +81,11 @@ class YumPackager(pack.Packager):
sh.symlink(src, tgt)
return True
#TODO: maybe this should be a subclass that handles these differences
# TODO: maybe this should be a subclass that handles these differences
def _install_special(self, pkgname, pkginfo):
if self.distro.name == settings.RHEL6 and pkgname in RHEL_RELINKS:
return self._install_rhel_relinks(pkgname, pkginfo)
# FIXME:
# if self.distro.name == settings.RHEL6 and pkgname in RHEL_RELINKS:
# return self._install_rhel_relinks(pkgname, pkginfo)
return False
def install_batch(self, pkgs):

View File

@ -18,40 +18,33 @@
from devstack import exceptions as excp
from devstack import log as logging
from devstack import shell as sh
from devstack import settings
LOG = logging.getLogger("devstack.pip")
PIP_UNINSTALL_CMD_OPTS = ['-y', '-q']
PIP_INSTALL_CMD_OPTS = ['-q']
def install(pips, distro):
pipnames = sorted(pips.keys())
root_cmd = distro.commands.get('pip', 'pip')
LOG.info("Installing python packages (%s) using command (%s)" % (", ".join(pipnames), root_cmd))
for name in pipnames:
pipfull = name
pipinfo = pips.get(name)
if pipinfo and pipinfo.get('version'):
version = pipinfo.get('version')
if version is not None:
pipfull = pipfull + "==" + str(version)
LOG.debug("Installing python package (%s)" % (pipfull))
real_cmd = [root_cmd, 'install']
real_cmd += PIP_INSTALL_CMD_OPTS
options = pipinfo.get('options')
if options is not None:
LOG.debug("Using pip options: %s" % (str(options)))
real_cmd += [str(options)]
real_cmd += [pipfull]
sh.execute(*real_cmd, run_as_root=True)
def install(pip, distro):
name = pip['name']
root_cmd = distro.get_command('pip')
LOG.audit("Installing python package (%s) using pip command (%s)" % (name, root_cmd))
name_full = name
version = pip.get('version')
if version is not None:
name_full += "==" + str(version)
real_cmd = [root_cmd, 'install'] + PIP_INSTALL_CMD_OPTS
options = pip.get('options')
if options is not None:
LOG.debug("Using pip options: %s" % (str(options)))
real_cmd += [str(options)]
real_cmd += [name_full]
sh.execute(*real_cmd, run_as_root=True)
def uninstall(pips, distro, skip_errors=True):
pipnames = sorted(pips.keys())
root_cmd = distro.commands.get('pip', 'pip')
LOG.info("Uninstalling python packages (%s) using command (%s)" % (", ".join(pipnames), root_cmd))
for name in pipnames:
def uninstall_batch(pips, distro, skip_errors=True):
names = set([p['name'] for p in pips])
root_cmd = distro.get_command('pip')
for name in names:
try:
LOG.debug("Uninstalling python package (%s)" % (name))
cmd = [root_cmd, 'uninstall'] + PIP_UNINSTALL_CMD_OPTS + [str(name)]

View File

@ -14,22 +14,22 @@
# License for the specific language governing permissions and limitations
# under the License..
import yaml
from devstack import env_rc
from devstack import exceptions as excp
from devstack import log as logging
from devstack import settings
from devstack import shell as sh
from devstack import utils
from devstack.progs import common
from devstack import passwords
LOG = logging.getLogger("devstack.progs.actions")
# For actions in this list we will reverse the component order
_REVERSE_ACTIONS = [settings.UNINSTALL, settings.STOP]
REVERSE_ACTIONS = [settings.UNINSTALL, settings.STOP]
# For these actions we will attempt to make an rc file if it does not exist
_RC_FILE_MAKE_ACTIONS = [settings.INSTALL]
RC_FILE_MAKE_ACTIONS = [settings.INSTALL]
# The order of which uninstalls happen + message of what is happening
# (before and after)
@ -138,125 +138,96 @@ PREQ_ACTIONS = {
class ActionRunner(object):
def __init__(self, distro, action, directory, config,
pw_gen, pkg_manager,
**kargs):
def __init__(self, distro, action, cfg, **kargs):
self.distro = distro
self.action = action
self.directory = directory
self.cfg = config
self.pw_gen = pw_gen
self.pkg_manager = pkg_manager
self.kargs = kargs
self.components = dict()
def_components = common.get_default_components()
unclean_components = kargs.pop("components")
if not unclean_components:
self.components = def_components
else:
for (c, opts) in unclean_components.items():
if opts is None and c in def_components:
self.components[c] = def_components[c]
elif opts is None:
self.components[c] = list()
else:
self.components[c] = opts
self.cfg = cfg
self.pw_gen = passwords.PasswordGenerator(self.cfg, kargs.get('prompt_for_passwords', True))
pkg_cls = distro.get_packager_factory()
self.keep_old = kargs.get('keep_old')
self.pkg_manager = pkg_cls(self.distro, self.keep_old)
self.force = kargs.get('force', False)
self.ignore_deps = kargs.get('ignore_deps', False)
self.ref_components = kargs.get("ref_components")
self.gen_rc = action in _RC_FILE_MAKE_ACTIONS
self.kargs = kargs
def _get_components(self):
return dict(self.components)
def _apply_reverse(self, action, component_order):
adjusted_order = list(component_order)
if action in REVERSE_ACTIONS:
adjusted_order.reverse()
return adjusted_order
def _order_components(self, components):
adjusted_components = dict(components)
if self.ignore_deps:
return (adjusted_components, list(components.keys()))
all_components = self.distro.resolve_component_dependencies(
list(components.keys())
)
component_diff = set(all_components.keys()).difference(components.keys())
if component_diff:
LOG.info("Activating dependencies: [%s]",
", ".join(sorted(component_diff))
)
for new_component in component_diff:
adjusted_components[new_component] = []
return (adjusted_components, utils.get_components_order(all_components))
def _load_persona(self, persona_fn):
persona_fn = sh.abspth(persona_fn)
LOG.audit("Loading persona from file [%s]", persona_fn)
contents = ''
with open(persona_fn, "r") as fh:
contents = fh.read()
return self._verify_persona(yaml.load(contents), persona_fn)
def _inject_references(self, components):
ref_components = utils.parse_components(self.ref_components)
adjusted_components = dict(components)
for c in ref_components.keys():
if c not in components:
adjusted_components[c] = ref_components.get(c)
return adjusted_components
def _verify_persona(self, persona, fn):
# Some sanity checks
try:
if self.distro.name not in persona['supports']:
raise RuntimeError("Persona does not support distro %s"
% (self.distro.name))
for c in persona['components']:
if not self.distro.known_component(c):
raise RuntimeError("Distro %s does not support component %s" %
(self.distro.name, c))
except (KeyError, RuntimeError) as e:
msg = ("Could not validate persona defined in [%s] due to: %s"
% (fn, e))
raise excp.ConfigException(msg)
return persona
def _instantiate_components(self, components):
all_instances = dict()
for component in components.keys():
cls = self.distro.get_component_action_class(component,
self.action)
LOG.debug('instantiating %s to handle %s for %s',
cls, self.action, component)
instance = cls(component_name=component,
instances=all_instances,
runner=self,
root_dir=self.directory,
component_options=self.distro.components[component],
keep_old=self.kargs.get("keep_old")
)
all_instances[component] = instance
return all_instances
def _construct_instances(self, persona, action, root_dir):
components = persona['components'] # Required
desired_subsystems = persona.get('subsystems', dict()) # Not required
instances = dict()
for c in components:
(cls, my_info) = self.distro.extract_component(c, action)
LOG.debug("Constructing class %s" % (cls))
cls_kvs = dict()
cls_kvs['runner'] = self
cls_kvs['component_dir'] = sh.joinpths(root_dir, c)
cls_kvs['subsystem_info'] = my_info.pop('subsystems', dict())
cls_kvs['all_instances'] = instances
cls_kvs['name'] = c
cls_kvs['keep_old'] = self.keep_old
cls_kvs['desired_subsystems'] = set(desired_subsystems.get(c, list()))
# The above is not overrideable...
for (k, v) in my_info.items():
if k not in cls_kvs:
cls_kvs[k] = v
LOG.debug("Using arg map %s", cls_kvs)
cls_args = list()
LOG.debug("Using arg list %s", cls_args)
instances[c] = cls(*cls_args, **cls_kvs)
return instances
def _run_preqs(self, components, component_order):
if not (self.action in PREQ_ACTIONS):
return
(check_functor, preq_action) = PREQ_ACTIONS[self.action]
instances = self._instantiate_components(components)
preq_components = dict()
def _verify_components(self, component_order, instances):
LOG.info("Verifying that the components are ready to rock-n-roll.")
for c in component_order:
instance = instances[c]
if check_functor(instance):
preq_components[c] = components[c]
if preq_components:
LOG.info("Having to activate prerequisite action [%s] for %s components." % (preq_action, len(preq_components)))
preq_runner = ActionRunner(distro=self.distro,
action=preq_action,
directory=self.directory,
config=self.cfg,
pw_gen=self.pw_gen,
pkg_manager=self.pkg_manager,
components=preq_components,
**self.kargs)
preq_runner.run()
instance.verify()
def _pre_run(self, instances, component_order):
if not sh.isdir(self.directory):
sh.mkdir(self.directory)
LOG.info("Verifying that the components are ready to rock-n-roll.")
for component in component_order:
inst = instances[component]
inst.verify()
def _warm_components(self, component_order, instances):
LOG.info("Warming up your component configurations (ie so you won't be prompted later)")
for component in component_order:
inst = instances[component]
inst.warm_configs()
if self.gen_rc:
writer = env_rc.RcWriter(self.cfg, self.pw_gen, self.directory)
if not sh.isfile(settings.OSRC_FN):
LOG.info("Generating a file at [%s] that will contain your environment settings." % (settings.OSRC_FN))
writer.write(settings.OSRC_FN)
else:
LOG.info("Updating a file at [%s] that contains your environment settings." % (settings.OSRC_FN))
am_upd = writer.update(settings.OSRC_FN)
LOG.info("Updated [%s] settings in rc file [%s]" % (am_upd, settings.OSRC_FN))
for c in component_order:
instance = instances[c]
instance.warm_configs()
def _run_instances(self, instances, component_order):
component_order = self._apply_reverse(component_order)
LOG.info("Running in the following order: %s" % ("->".join(component_order)))
for (start_msg, functor, end_msg) in ACTION_MP[self.action]:
def _write_rc_file(self, root_dir):
writer = env_rc.RcWriter(self.cfg, self.pw_gen, root_dir)
if not sh.isfile(settings.OSRC_FN):
LOG.info("Generating a file at [%s] that will contain your environment settings." % (settings.OSRC_FN))
writer.write(settings.OSRC_FN)
else:
LOG.info("Updating a file at [%s] that contains your environment settings." % (settings.OSRC_FN))
am_upd = writer.update(settings.OSRC_FN)
LOG.info("Updated [%s] settings in rc file [%s]" % (am_upd, settings.OSRC_FN))
def _run_instances(self, action, component_order, instances):
for (start_msg, functor, end_msg) in ACTION_MP[action]:
for c in component_order:
instance = instances[c]
if start_msg:
@ -271,19 +242,33 @@ class ActionRunner(object):
else:
raise
def _apply_reverse(self, component_order):
adjusted_order = list(component_order)
if self.action in _REVERSE_ACTIONS:
adjusted_order.reverse()
return adjusted_order
def _run_action(self, persona, action, root_dir):
LOG.info("Running action [%s] using root directory [%s]" % (action, root_dir))
instances = self._construct_instances(persona, action, root_dir)
if action in PREQ_ACTIONS:
(check_functor, preq_action) = PREQ_ACTIONS[action]
checks_passed_components = list()
for (c, instance) in instances.items():
if check_functor(instance):
checks_passed_components.append(c)
if checks_passed_components:
LOG.info("Activating prerequisite action [%s] requested by (%s) components."
% (preq_action, ", ".join(checks_passed_components)))
self._run_action(persona, preq_action, root_dir)
component_order = self._apply_reverse(action, persona['components'])
LOG.info("Activating components [%s] (in that order) for action [%s]" %
("->".join(component_order), action))
self._verify_components(component_order, instances)
self._warm_components(component_order, instances)
if action in RC_FILE_MAKE_ACTIONS:
self._write_rc_file(root_dir)
self._run_instances(action, component_order, instances)
def _start(self, components, component_order):
LOG.info("Activating components required to complete action [%s]" % (self.action))
instances = self._instantiate_components(components)
self._pre_run(instances, component_order)
self._run_preqs(components, component_order)
self._run_instances(instances, component_order)
def _setup_root(self, root_dir):
if not sh.isdir(root_dir):
sh.mkdir(root_dir)
def run(self):
(components, component_order) = self._order_components(self._get_components())
self._start(self._inject_references(components), component_order)
def run(self, persona_fn, root_dir):
persona = self._load_persona(persona_fn)
self._setup_root(root_dir)
self._run_action(persona, self.action, root_dir)

View File

@ -1,190 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from devstack import cfg
from devstack import exceptions as excp
from devstack import settings
from devstack import shell as sh
from devstack.components import db
from devstack.components import glance
from devstack.components import horizon
from devstack.components import keystone
from devstack.components import keystone_client
from devstack.components import melange
from devstack.components import melange_client
from devstack.components import nova
from devstack.components import nova_client
from devstack.components import novnc
from devstack.components import quantum
from devstack.components import quantum_client
from devstack.components import rabbit
from devstack.components import swift
from devstack.packaging import apt
from devstack.packaging import yum
# This determines what classes to use to install/uninstall/...
# ACTION_CLASSES = {
# settings.INSTALL: {
# settings.DB: db.DBInstaller,
# settings.GLANCE: glance.GlanceInstaller,
# settings.HORIZON: horizon.HorizonInstaller,
# settings.KEYSTONE: keystone.KeystoneInstaller,
# settings.KEYSTONE_CLIENT: keystone_client.KeyStoneClientInstaller,
# settings.MELANGE: melange.MelangeInstaller,
# settings.MELANGE_CLIENT: melange_client.MelangeClientInstaller,
# settings.NOVA: nova.NovaInstaller,
# settings.NOVA_CLIENT: nova_client.NovaClientInstaller,
# settings.NOVNC: novnc.NoVNCInstaller,
# settings.QUANTUM: quantum.QuantumInstaller,
# settings.QUANTUM_CLIENT: quantum_client.QuantumClientInstaller,
# settings.RABBIT: rabbit.RabbitInstaller,
# settings.SWIFT: swift.SwiftInstaller,
# },
# settings.UNINSTALL: {
# settings.DB: db.DBUninstaller,
# settings.GLANCE: glance.GlanceUninstaller,
# settings.HORIZON: horizon.HorizonUninstaller,
# settings.KEYSTONE: keystone.KeystoneUninstaller,
# settings.KEYSTONE_CLIENT: keystone_client.KeyStoneClientUninstaller,
# settings.MELANGE: melange.MelangeUninstaller,
# settings.MELANGE_CLIENT: melange_client.MelangeClientUninstaller,
# settings.NOVA: nova.NovaUninstaller,
# settings.NOVA_CLIENT: nova_client.NovaClientUninstaller,
# settings.NOVNC: novnc.NoVNCUninstaller,
# settings.QUANTUM: quantum.QuantumUninstaller,
# settings.QUANTUM_CLIENT: quantum_client.QuantumClientUninstaller,
# settings.RABBIT: rabbit.RabbitUninstaller,
# settings.SWIFT: swift.SwiftUninstaller,
# },
# settings.START: {
# settings.DB: db.DBRuntime,
# settings.GLANCE: glance.GlanceRuntime,
# settings.HORIZON: horizon.HorizonRuntime,
# settings.KEYSTONE: keystone.KeystoneRuntime,
# settings.KEYSTONE_CLIENT: keystone_client.KeyStoneClientRuntime,
# settings.MELANGE: melange.MelangeRuntime,
# settings.MELANGE_CLIENT: melange_client.MelangeClientRuntime,
# settings.NOVA: nova.NovaRuntime,
# settings.NOVA_CLIENT: nova_client.NovaClientRuntime,
# settings.NOVNC: novnc.NoVNCRuntime,
# settings.QUANTUM: quantum.QuantumRuntime,
# settings.QUANTUM_CLIENT: quantum_client.QuantumClientRuntime,
# settings.RABBIT: rabbit.RabbitRuntime,
# settings.SWIFT: swift.SwiftRuntime,
# },
# }
# # Just a copy
# ACTION_CLASSES[settings.STOP] = ACTION_CLASSES[settings.START]
# Used only for figuring out deps
_FAKE_ROOT_DIR = tempfile.gettempdir()
# This map controls which distro has
# which package management class
_PKGR_MAP = {
settings.UBUNTU11: apt.AptPackager,
settings.RHEL6: yum.YumPackager,
settings.FEDORA16: yum.YumPackager,
}
def get_default_components():
def_components = dict()
def_components[settings.GLANCE] = [
glance.GAPI,
glance.GREG,
]
def_components[settings.KEYSTONE] = []
def_components[settings.NOVA] = [
nova.NAPI,
nova.NCAUTH,
nova.NCERT,
nova.NCPU,
nova.NNET,
nova.NOBJ,
nova.NSCHED,
nova.NXVNC,
nova.NVOL,
]
def_components[settings.NOVNC] = []
def_components[settings.HORIZON] = []
def_components[settings.DB] = []
def_components[settings.RABBIT] = []
return def_components
def format_secs_taken(secs):
output = "%.03f seconds" % (secs)
output += " or %.02f minutes" % (secs / 60.0)
return output
# def get_action_cls(action_name, component_name, distro=None):
# action_cls_map = ACTION_CLASSES.get(action_name)
# if not action_cls_map:
# raise excp.StackException("Action %s has no component to class mapping" % (action_name))
# cls = action_cls_map.get(component_name)
# if not cls:
# raise excp.StackException("Action %s has no class entry for component %s" % (action_name, component_name))
# return cls
def get_packager(distro, keep_packages):
cls = _PKGR_MAP.get(distro)
if not cls:
msg = "No package manager found for distro %s!" % (distro)
raise excp.StackException(msg)
return cls(distro, keep_packages)
def get_config(cfg_fn=None):
if not cfg_fn:
cfg_fn = sh.canon_path(settings.STACK_CONFIG_LOCATION)
config_instance = cfg.StackConfigParser()
config_instance.read(cfg_fn)
return config_instance
# def get_components_deps(runner,
# action_name,
# base_components,
# root_dir=None,
# distro=None,
# ):
# all_components = dict()
# active_names = list(base_components)
# root_dir = root_dir or _FAKE_ROOT_DIR
# while len(active_names):
# component = active_names.pop()
# component_opts = base_components.get(component) or list()
# cls = get_action_cls(action_name, component, distro)
# instance = cls(instances=list(),
# runner=runner,
# root_dir=root_dir,
# component_options=component_opts,
# keep_old=False
# )
# deps = instance.get_dependencies() or set()
# all_components[component] = set(deps)
# for d in deps:
# if d not in all_components and d not in active_names:
# active_names.append(d)
# return all_components

View File

@ -23,17 +23,17 @@ class RunnerBase(object):
self.trace_dir = trace_dir
def unconfigure(self):
#cleans up anything configured by
#this runner for any apps for this component
#returns how many files unconfigured
# Cleans up anything configured by
# this runner for any apps for this component
# returns how many files unconfigured
return 0
def configure(self, app_name, runtime_info):
#returns how many files configured
# Returns how many files configured
return 0
def start(self, name, runtime_info):
#returns a file name that contains what was started
# Returns a file name that contains what was started
return None
def stop(self, name):

View File

@ -31,18 +31,18 @@ from devstack import trace as tr
LOG = logging.getLogger("devstack.runners.fork")
#maximum for the number of available file descriptors (when not found)
# Maximum for the number of available file descriptors (when not found)
MAXFD = 2048
#how many times we try to kill and how much sleep (seconds) between each try
# How many times we try to kill and how much sleep (seconds) between each try
MAX_KILL_TRY = 5
SLEEP_TIME = 1
#my type
# My runner type
RUN_TYPE = settings.RUN_TYPE_FORK
TYPE = settings.RUN_TYPE_TYPE
#trace constants
# Trace constants
PID_FN = "PID_FN"
STDOUT_FN = "STDOUT_FN"
STDERR_FN = "STDERR_FN"
@ -50,7 +50,7 @@ ARGS = "ARGS"
NAME = "NAME"
FORK_TEMPL = "%s.fork"
#run fork cmds as root?
# Run fork cmds as root?
ROOT_GO = True
@ -90,7 +90,7 @@ class ForkRunner(base.RunnerBase):
if sh.isfile(pid_file) and sh.isfile(trace_fn):
pid = int(sh.load_file(pid_file).strip())
(killed, attempts) = self._stop_pid(pid)
#trash the files
# Trash the files if it worked
if killed:
LOG.debug("Killed pid %s after %s attempts" % (pid, attempts))
LOG.debug("Removing pid file %s" % (pid_file))
@ -115,22 +115,22 @@ class ForkRunner(base.RunnerBase):
return (pidfile, stderr, stdout)
def _fork_start(self, program, appdir, pid_fn, stdout_fn, stderr_fn, *args):
#first child, not the real program
# First child, not the real program
pid = os.fork()
if pid == 0:
#upon return the calling process shall be the session
#leader of this new session,
#shall be the process group leader of a new process group,
#and shall have no controlling terminal.
# Upon return the calling process shall be the session
# leader of this new session,
# shall be the process group leader of a new process group,
# and shall have no controlling terminal.
os.setsid()
pid = os.fork()
#fork to get daemon out - this time under init control
#and now fully detached (no shell possible)
# Fork to get daemon out - this time under init control
# and now fully detached (no shell possible)
if pid == 0:
#move to where application should be
# Move to where application should be
if appdir:
os.chdir(appdir)
#close other fds
# Close other fds (or try)
limits = resource.getrlimit(resource.RLIMIT_NOFILE)
mkfd = limits[1]
if mkfd == resource.RLIM_INFINITY:
@ -141,27 +141,27 @@ class ForkRunner(base.RunnerBase):
except OSError:
#not open, thats ok
pass
#now adjust stderr and stdout
# Now adjust stderr and stdout
if stdout_fn:
stdoh = open(stdout_fn, "w")
os.dup2(stdoh.fileno(), sys.stdout.fileno())
if stderr_fn:
stdeh = open(stderr_fn, "w")
os.dup2(stdeh.fileno(), sys.stderr.fileno())
#now exec...
#the arguments to the child process should
#start with the name of the command being run
# Now exec...
# Note: The arguments to the child process should
# start with the name of the command being run
prog_little = os.path.basename(program)
actualargs = [prog_little] + list(args)
os.execlp(program, *actualargs)
else:
#write out the child pid
# Write out the child pid
contents = str(pid) + os.linesep
sh.write_file(pid_fn, contents, quiet=True)
#not exit or sys.exit, this is recommended
#since it will do the right cleanups that we want
#not calling any atexit functions, which would
#be bad right now
# Not exit or sys.exit, this is recommended
# since it will do the right cleanups that we want
# not calling any atexit functions, which would
# be bad right now
os._exit(0)
def _do_trace(self, fn, kvs):

View File

@ -30,25 +30,25 @@ from devstack import utils
LOG = logging.getLogger("devstack.runners.screen")
#my type
# My running type
RUN_TYPE = settings.RUN_TYPE_SCREEN
TYPE = settings.RUN_TYPE_TYPE
#trace constants
# Trace constants
SCREEN_TEMPL = "%s.screen"
ARGS = "ARGS"
NAME = "NAME"
SESSION_ID = 'SESSION_ID'
#screen session name
# Screen session name
SESSION_NAME = 'stack'
SESSION_DEF_TITLE = SESSION_NAME
SESSION_NAME_MTCHER = re.compile(r"^\s*([\d]+\.%s)\s*(.*)$" % (SESSION_NAME))
#how we setup screens status bar
# How we setup screens status bar
STATUS_BAR_CMD = r'hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"'
#cmds
# Screen commands/template commands
SESSION_INIT = ['screen', '-d', '-m', '-S', SESSION_NAME, '-t', SESSION_DEF_TITLE, '-s', "/bin/bash"]
BAR_INIT = ['screen', '-r', SESSION_NAME, '-X', STATUS_BAR_CMD]
CMD_INIT = ['screen', '-S', '%SESSION_NAME%', '-X', 'screen', '-t', "%NAME%"]
@ -58,17 +58,17 @@ CMD_START = ['screen', '-S', '%SESSION_NAME%', '-p', "%NAME%", '-X', 'stuff', "\
LIST_CMD = ['screen', '-ls']
SCREEN_KILLER = ['screen', '-X', '-S', '%SCREEN_ID%', 'quit']
#where our screen sockets will go
# Where our screen sockets will go
SCREEN_SOCKET_DIR_NAME = "devstack-screen-sockets"
SCREEN_SOCKET_PERM = 0700
#used to wait until started before we can run the actual start cmd
# Used to wait until started before we can run the actual start cmd
WAIT_ONLINE_TO = settings.WAIT_ALIVE_SECS
#run screen as root?
# Run screen as root?
ROOT_GO = True
#screen RC file
# Screen RC file
SCREEN_RC = settings.RC_FN_TEMPL % ('screen')
@ -104,7 +104,7 @@ class ScreenRunner(base.RunnerBase):
run_as_root=ROOT_GO,
env_overrides=self._get_env(),
check_exit_code=False)
#we have really no way of knowing if it worked or not, screen sux...
# We have really no way of knowing if it worked or not, screen sux...
wipe_cmd = self._gen_cmd(CMD_WIPE, mp)
sh.execute(*wipe_cmd,
shell=True,
@ -189,7 +189,7 @@ class ScreenRunner(base.RunnerBase):
shell=True,
run_as_root=ROOT_GO,
env_overrides=self._get_env())
#we have really no way of knowing if it worked or not, screen sux...
# We have really no way of knowing if it worked or not, screen sux...
def _do_socketdir_init(self, socketdir, perm):
LOG.debug("Making screen socket directory [%s] (with permissions %o)" % (socketdir, perm))

View File

@ -26,27 +26,27 @@ from devstack import utils
LOG = logging.getLogger("devstack.runners.upstart")
#my type
# My run type
RUN_TYPE = settings.RUN_TYPE_UPSTART
TYPE = settings.RUN_TYPE_TYPE
#trace constants
# Trace constants
UPSTART_TEMPL = "%s.upstart"
ARGS = "ARGS"
NAME = "NAME"
#upstart event namings
# Upstart event namings
START_EVENT_SUFFIX = "_start"
STOP_EVENT_SUFFIX = "_stop"
#where upstart configs go
# Where upstart configs go
CONF_ROOT = "/etc/init"
CONF_EXT = ".conf"
#shared template
# Shared template
UPSTART_CONF_TMPL = 'upstart.conf'
#how we emit events to upstart
# How we emit events to upstart
EMIT_BASE_CMD = ["/sbin/initctl", "emit"]

View File

@ -15,28 +15,16 @@
# under the License.
import os
import re
import sys
# These also have meaning outside python,
# ie in the pkg/pip listings so update there also!
# FIXME: Delete
UBUNTU11 = "ubuntu-oneiric"
RHEL6 = "rhel-6"
FEDORA16 = "fedora-16"
# What this program is called
PROG_NICE_NAME = "DEVSTACKpy"
# These 2 identify the json post and pre install sections
PRE_INSTALL = 'pre-install'
POST_INSTALL = 'post-install'
# Ip version constants for network ip detection
IPV4 = 'IPv4'
IPV6 = 'IPv6'
#how long to wait for a service to startup
# How long to wait for a service to startup
WAIT_ALIVE_SECS = 5
# Component name mappings
@ -51,7 +39,7 @@ KEYSTONE = "keystone"
KEYSTONE_CLIENT = 'keystone-client'
DB = "db"
RABBIT = "rabbit"
NOVNC = 'n-vnc'
NOVNC = 'no-vnc'
XVNC = 'xvnc'
MELANGE = 'melange'
MELANGE_CLIENT = 'melange-client'
@ -68,29 +56,6 @@ COMPONENT_NAMES = [
MELANGE, MELANGE_CLIENT,
]
# When a component is asked for it may
# need another component, that dependency
# mapping is listed here. A topological sort
# will be applied to determine the exact order.
# COMPONENT_DEPENDENCIES = {
# DB: [],
# KEYSTONE_CLIENT: [],
# RABBIT: [],
# GLANCE: [KEYSTONE, DB],
# KEYSTONE: [DB, KEYSTONE_CLIENT],
# NOVA: [KEYSTONE, GLANCE, DB, RABBIT, NOVA_CLIENT],
# SWIFT: [KEYSTONE_CLIENT],
# NOVA_CLIENT: [],
# # Horizon depends on glances client (which should really be a client package)
# HORIZON: [KEYSTONE_CLIENT, GLANCE, NOVA_CLIENT, QUANTUM_CLIENT],
# # More of quantums deps come from its module function get_dependencies
# QUANTUM: [],
# NOVNC: [NOVA],
# QUANTUM_CLIENT: [],
# MELANGE: [DB],
# MELANGE_CLIENT: [],
# }
# Different run types supported
RUN_TYPE_FORK = "FORK"
RUN_TYPE_UPSTART = "UPSTART"
@ -125,7 +90,6 @@ ACTIONS = [INSTALL, UNINSTALL, START, STOP]
# Where the configs and templates should be at.
STACK_BIN_DIR = os.path.abspath(os.path.dirname(sys.argv[0]))
STACK_CONFIG_DIR = os.path.join(STACK_BIN_DIR, "conf")
STACK_DISTRO_DIR = os.path.join(STACK_CONFIG_DIR, "distros")
STACK_TEMPLATE_DIR = os.path.join(STACK_CONFIG_DIR, "templates")
STACK_PKG_DIR = os.path.join(STACK_CONFIG_DIR, "pkgs")
STACK_PIP_DIR = os.path.join(STACK_CONFIG_DIR, "pips")
STACK_CONFIG_LOCATION = os.path.join(STACK_CONFIG_DIR, "stack.ini")

View File

@ -178,11 +178,11 @@ def execute(*cmd, **kwargs):
raise excp.ProcessExecutionError(exit_code=rc, stdout=stdout, \
stderr=stderr, cmd=str_cmd)
else:
#log it anyway
# Log it anyway
if rc not in check_exit_code:
LOG.debug("A failure may of just happened when running command \"%s\" [%s] (%s, %s)", \
str_cmd, rc, stdout.strip(), stderr.strip())
#log for debugging figuring stuff out
# Log for debugging figuring stuff out
LOG.debug("Received stdout: %s" % (stdout.strip()))
LOG.debug("Received stderr: %s" % (stderr.strip()))
return (stdout, stderr)
@ -197,7 +197,7 @@ def abspth(path):
def shellquote(text):
#TODO since there doesn't seem to be a standard lib that actually works use this way...
# TODO since there doesn't seem to be a standard lib that actually works use this way...
do_adjust = False
for srch in SHELL_QUOTE_REPLACERS.keys():
if text.find(srch) != -1:
@ -369,16 +369,9 @@ def load_file(fn, quiet=False):
if not quiet:
LOG.audit("Loading data from file %s", fn)
data = ""
try:
if not DRYRUN_MODE:
with open(fn, "r") as f:
data = f.read()
except IOError as e:
if DRYRUN_MODE:
# We still need to actually load something (ie the json install files so thats)
# Why this is in the exception path.
LOG.audit("Passing on load exception since in dry-run mode")
else:
raise e
if not quiet:
LOG.audit("Loaded (%d) bytes from file %s", len(data), fn)
return data
@ -500,16 +493,16 @@ def create_loopback_file(fname, size, bsize=1024, fs_type='ext3', run_as_root=Fa
'count=0', 'seek=%d' % size]
mkfs_cmd = ['mkfs.%s' % fs_type, '-f', '-i', 'size=%d' % bsize, fname]
# make sure folder exists
# Make sure folder exists
files = mkdirslist(dirname(fname))
# create file
# Create file
touch_file(fname)
# fill with zeroes
# Fill with zeroes
execute(*dd_cmd, run_as_root=run_as_root)
# create fs on the file
# Create fs on the file
execute(*mkfs_cmd, run_as_root=run_as_root)
return files

View File

@ -15,16 +15,17 @@
# under the License.
import json
import os
from devstack import date
from devstack import exceptions as excp
from devstack import shell as sh
#trace per line output and file extension formats
TRACE_FMT = "%s - %s\n"
# Trace per line output format and file extension formats
TRACE_FMT = ("%s - %s" + os.linesep)
TRACE_EXT = ".trace"
#common trace actions
# Common trace actions
CFG_WRITING_FILE = "CFG_WRITING_FILE"
SYMLINK_MAKE = "SYMLINK_MAKE"
PKG_INSTALL = "PKG_INSTALL"
@ -35,12 +36,12 @@ DOWNLOADED = "DOWNLOADED"
AP_STARTED = "AP_STARTED"
PIP_INSTALL = 'PIP_INSTALL'
#trace file types
# Common trace file types (or the expected common ones)
PY_TRACE = "python"
IN_TRACE = "install"
START_TRACE = "start"
#used to note version of trace
# Used to note version of trace
TRACE_VERSION = "TRACE_VERSION"
TRACE_VER = 0x1
@ -95,12 +96,9 @@ class TraceWriter(object):
what['from'] = uri
self.trace(DOWNLOADED, json.dumps(what))
def pip_installed(self, name, pip_info):
def pip_installed(self, pip_info):
self._start()
what = dict()
what['name'] = name
what['pip_meta'] = pip_info
self.trace(PIP_INSTALL, json.dumps(what))
self.trace(PIP_INSTALL, json.dumps(pip_info))
def dirs_made(self, *dirs):
self._start()
@ -111,12 +109,9 @@ class TraceWriter(object):
self._start()
self.trace(FILE_TOUCHED, fn)
def package_installed(self, name, pkg_info):
def package_installed(self, pkg_info):
self._start()
what = dict()
what['name'] = name
what['pkg_meta'] = pkg_info
self.trace(PKG_INSTALL, json.dumps(what))
self.trace(PKG_INSTALL, json.dumps(pkg_info))
def started_info(self, name, info_fn):
self._start()
@ -187,7 +182,7 @@ class TraceReader(object):
return locations
def _sort_paths(self, pths):
#ensure in ok order (ie /tmp is before /)
# Ensure in correct order (ie /tmp is before /)
pths = list(set(pths))
pths.sort()
pths.reverse()
@ -221,14 +216,11 @@ class TraceReader(object):
def symlinks_made(self):
lines = self.read()
files = list()
links = list()
for (cmd, action) in lines:
if cmd == SYMLINK_MAKE and len(action):
files.append(action)
#ensure in ok order (ie /tmp is before /)
files.sort()
files.reverse()
return files
links.append(action)
return links
def files_configured(self):
lines = self.read()
@ -242,7 +234,7 @@ class TraceReader(object):
def pips_installed(self):
lines = self.read()
pips_installed = dict()
pips_installed = list()
pip_list = list()
for (cmd, action) in lines:
if cmd == PIP_INSTALL and len(action):
@ -250,14 +242,12 @@ class TraceReader(object):
for pip_data in pip_list:
pip_info_full = json.loads(pip_data)
if type(pip_info_full) is dict:
name = pip_info_full.get('name')
if name:
pips_installed[name] = pip_info_full.get('pip_meta')
pips_installed.append(pip_info_full)
return pips_installed
def packages_installed(self):
lines = self.read()
pkgs_installed = dict()
pkgs_installed = list()
pkg_list = list()
for (cmd, action) in lines:
if cmd == PKG_INSTALL and len(action):
@ -265,7 +255,5 @@ class TraceReader(object):
for pkg_data in pkg_list:
pkg_info = json.loads(pkg_data)
if type(pkg_info) is dict:
name = pkg_info.get('name')
if name:
pkgs_installed[name] = pkg_info.get('pkg_meta')
pkgs_installed.append(pkg_info)
return pkgs_installed

View File

@ -21,7 +21,6 @@ import distutils.version
import json
import netifaces
import os
import platform
import random
import re
import socket
@ -51,7 +50,8 @@ ALL_NUMS = re.compile(r"^\d+$")
START_NUMS = re.compile(r"^(\d+)(\D+)")
STAR_VERSION = 0
#thx cowsay
# Thx cowsay
# See: http://www.nog.net/~tony/warez/cowsay.shtml
COWS = dict()
COWS['happy'] = r'''
{header}
@ -139,12 +139,15 @@ def to_bytes(text):
return byte_val
def import_module(module_name):
def import_module(module_name, quiet=True):
try:
__import__(module_name)
return sys.modules.get(module_name, None)
except ImportError:
return None
if quiet:
return None
else:
raise
def load_json(fn):
@ -206,6 +209,8 @@ def get_host_ip():
were to be sent out to some well known address on the Internet. In this
case, a private address is used, but the specific address does not
matter much. No traffic is actually sent.
Adjusted from nova code...
"""
ip = None
try:
@ -216,7 +221,7 @@ def get_host_ip():
ip = addr
except socket.error:
pass
#attempt to find it
# Ettempt to find it
if not ip:
interfaces = get_interfaces()
for (_, net_info) in interfaces.items():
@ -227,7 +232,7 @@ def get_host_ip():
if first_oct and first_oct not in PRIVATE_OCTS:
ip = a_ip
break
#just return a localhost version then
# Just return a localhost version then
if not ip:
ip = DEF_IP
return ip
@ -246,90 +251,21 @@ def get_interfaces():
interface_addresses = netifaces.ifaddresses(intfc)
ip6 = interface_addresses.get(netifaces.AF_INET6)
if ip6 and len(ip6):
#just take the first
# Just take the first
interface_info[settings.IPV6] = ip6[0]
ip4 = interface_addresses.get(netifaces.AF_INET)
if ip4 and len(ip4):
#just take the first
# Just take the first
interface_info[settings.IPV4] = ip4[0]
#there are others but this is good for now
# Note: there are others but this is good for now..
interfaces[intfc] = interface_info
return interfaces
def get_components_order(components):
if not components:
return dict()
#deep copy so components isn't messed with
all_components = dict()
for (name, deps) in components.items():
all_components[name] = set(deps)
#figure out which ones have no one depending on them
no_deps_components = set()
for (name, deps) in all_components.items():
referenced = False
for (_name, _deps) in all_components.items():
if _name == name:
continue
else:
if name in _deps:
referenced = True
break
if not referenced:
no_deps_components.add(name)
if not no_deps_components:
msg = "Components specifed have no root components, there is most likely a dependency cycle!"
raise excp.DependencyException(msg)
#now we have to do a quick check to ensure no component is causing a cycle
for (root, deps) in all_components.items():
#DFS down through the "roots" deps and there deps and so on and
#ensure that nobody is referencing the "root" component name,
#that would mean there is a cycle if a dependency of the "root" is.
active_deps = list(deps)
checked_deps = dict()
while len(active_deps):
dep = active_deps.pop()
itsdeps = all_components.get(dep)
checked_deps[dep] = True
if root in itsdeps:
msg = "Circular dependency between component %s and component %s!" % (root, dep)
raise excp.DependencyException(msg)
else:
for d in itsdeps:
if d not in checked_deps and d not in active_deps:
active_deps.append(d)
#now form the order
#basically a topological sorting
#https://en.wikipedia.org/wiki/Topological_sorting
ordering = list()
no_edges = set(no_deps_components)
while len(no_edges):
node = no_edges.pop()
ordering.append(node)
its_deps = all_components.get(node)
while len(its_deps):
name = its_deps.pop()
referenced = False
for (_name, _deps) in all_components.items():
if _name == name:
continue
else:
if name in _deps:
referenced = True
break
if not referenced:
no_edges.add(name)
#should now be no edges else something bad happended
for (_, deps) in all_components.items():
if len(deps):
msg = "Your specified components have at least one cycle!"
raise excp.DependencyException(msg)
#reverse so its in the right order for us since we just determined
#the pkgs that have no one depending on them (which should be installed
#last and those that have incoming edges that packages are depending on need
#to go first, but those were inserted last), so this reverse fixes that
ordering.reverse()
return ordering
def format_secs_taken(secs):
output = "%.03f seconds" % (secs)
output += " or %.02f minutes" % (secs / 60.0)
return output
def joinlinesep(*pieces):
@ -418,7 +354,8 @@ def param_replace(text, replacements, ignore_missing=False):
def _get_welcome_stack():
possibles = list()
#thank you figlet ;)
# Thank you figlet ;)
# See: http://www.figlet.org/
possibles.append(r'''
___ ____ _____ _ _ ____ _____ _ ____ _ __
/ _ \| _ \| ____| \ | / ___|_ _|/ \ / ___| |/ /
@ -474,7 +411,7 @@ def center_text(text, fill, max_len):
def _welcome_slang():
potentials = list()
potentials.append("And now for something completely different!")
return random.choice(potentials).strip("\n\r")
return random.choice(potentials)
def color_text(text, color, bold=False,
@ -503,7 +440,8 @@ def _color_blob(text, text_color):
def _goodbye_header(worked):
#cowsay headers
# Cowsay headers
# See: http://www.nog.net/~tony/warez/cowsay.shtml
potentials_oks = list()
potentials_oks.append(r'''
___________
@ -707,28 +645,6 @@ def goodbye(worked):
print(msg)
def parse_components(components):
if not components:
return dict()
adjusted_components = dict()
for c in components:
mtch = EXT_COMPONENT.match(c)
if mtch:
component_name = mtch.group(1).lower().strip()
if component_name in settings.COMPONENT_NAMES:
component_opts = mtch.group(2)
components_opts_cleaned = None
if component_opts:
components_opts_cleaned = list()
sp_component_opts = component_opts.split(",")
for co in sp_component_opts:
cleaned_opt = co.strip()
if cleaned_opt:
components_opts_cleaned.append(cleaned_opt)
adjusted_components[component_name] = components_opts_cleaned
return adjusted_components
def welcome(ident):
lower = "| %s %s |" % (ident, version.version_string())
welcome_header = _get_welcome_stack()
@ -738,8 +654,8 @@ def welcome(ident):
footer += color_text(lower, 'blue', True)
uncolored_footer = (settings.PROG_NICE_NAME + ": " + lower)
if max_line_len - len(uncolored_footer) > 0:
#this format string will center the uncolored text which
#we will then replace with the color text equivalent
# This format string will center the uncolored text which
# we will then replace with the color text equivalent.
centered_str = center_text(uncolored_footer, " ", max_line_len)
footer = centered_str.replace(uncolored_footer, footer)
print(welcome_header)

63
stack
View File

@ -20,6 +20,7 @@ import sys
import time
import traceback
from devstack import cfg
from devstack import cfg_helpers
from devstack import date
from devstack import distro
@ -27,13 +28,11 @@ from devstack import env
from devstack import env_rc
from devstack import log as logging
from devstack import opts
from devstack import passwords
from devstack import settings
from devstack import shell as sh
from devstack import utils
from devstack.progs import actions
from devstack.progs import common
LOG = logging.getLogger("devstack.stack")
@ -50,7 +49,7 @@ _WELCOME_MAP = {
_CFG_GROUPS = {
cfg_helpers.make_id('passwords', None): 'Passwords',
cfg_helpers.make_id('db', None): 'Database info',
#catch all
# Catch all
cfg_helpers.make_id(None, None): 'Misc configs',
}
@ -68,12 +67,12 @@ def dump_config(config_cache):
value = mp.get(key)
LOG.info(item_format(key, value))
#first partition into our groups
# First partition into our groups
partitions = dict()
for name in _CFG_ORDERING:
partitions[name] = dict()
#now put the config cached values into there partition
# Now put the config cached values into there partitions
for (k, v) in config_cache.items():
for name in _CFG_ORDERING:
entries = partitions[name]
@ -81,7 +80,7 @@ def dump_config(config_cache):
entries[k] = v
break
#now print them..
# Now print them..
for name in _CFG_ORDERING:
nice_name = _CFG_GROUPS.get(name)
LOG.info(nice_name + ":")
@ -103,49 +102,55 @@ def load_rc_files():
def run(args):
action = args.pop("action").strip().lower()
action = args.pop("action", '').strip().lower()
if not (action in settings.ACTIONS):
print(utils.color_text("No valid action specified!", "red"))
return False
loaded_rcs = False
rootdir = args.pop("dir")
if not rootdir:
root_dir = args.pop("dir")
if not root_dir:
load_rc_files()
loaded_rcs = True
rootdir = env.get_key(env_rc.INSTALL_ROOT)
if not rootdir:
root_dir = env.get_key(env_rc.INSTALL_ROOT)
if not root_dir:
print(utils.color_text("No root directory specified!", "red"))
return False
root_dir = sh.abspth(root_dir)
#welcome!
persona_fn = args.pop('persona_fn')
if not persona_fn or not sh.isfile(persona_fn):
print(utils.color_text("No valid persona file name specified!", "red"))
return False
# Welcome!
(repeat_string, line_max_len) = utils.welcome(_WELCOME_MAP.get(action))
print(utils.center_text("Action Runner", repeat_string, line_max_len))
#here on out we should be using the logger (and not print)
start_time = time.time()
# !!
# Here on out we should be using the logger (and not print)!!
# !!
# if we didn't load them before, load them now
# If we didn't load them before, load them now
if not loaded_rcs:
load_rc_files()
loaded_rcs = True
# Stash the dryrun value (if any) into the global configuration
sh.set_dryrun(args['dryrun'])
sh.set_dryrun(args.pop('dryrun'))
dist = distro.Distro.get_current()
config = common.get_config()
pw_gen = passwords.PasswordGenerator(config, args['prompt_for_passwords'])
pkg_factory = dist.get_packager_factory()
pkg_manager = pkg_factory(dist, args['keep_old'])
components = utils.parse_components(args.pop("components"))
runner = actions.ActionRunner(dist, action, rootdir, config, pw_gen,
pkg_manager, components=components, **args)
config = cfg.get_config()
runner = actions.ActionRunner(dist, action, config, **args)
LOG.info("Starting action [%s] on %s for distro [%s]" % (action, date.rcf8222date(), dist))
runner.run()
LOG.info("It took (%s) to complete action [%s]" % (common.format_secs_taken((time.time() - start_time)), action))
start_time = time.time()
runner.run(persona_fn, root_dir)
end_time = time.time()
LOG.info("It took (%s) to complete action [%s]" %
(utils.format_secs_taken((end_time - start_time)), action))
LOG.info("After action [%s] your settings which were created or read are:" % (action))
dump_config(config.configs_fetched)
@ -154,7 +159,7 @@ def run(args):
def main():
#do this first so people can see the help message...
# Do this first so people can see the help message...
args = opts.parse()
prog_name = sys.argv[0]
@ -163,7 +168,7 @@ def main():
LOG.debug("Command line options %s" % (args))
#will need root to setup openstack
# Will need root to setup openstack
if not sh.got_root():
rest_args = sys.argv[1:]
print("This program requires a user with sudo access.")
@ -173,7 +178,7 @@ def main():
return 1
try:
#drop to usermode
# Drop to usermode
sh.user_mode(False)
started_ok = run(args)
if not started_ok: