merge origin/master

This commit is contained in:
Jesse Andrews 2011-11-10 11:46:18 -08:00
commit 16b6efab4c
34 changed files with 1384 additions and 551 deletions

2
.gitignore vendored
View File

@ -1,3 +1,5 @@
proto proto
*~ *~
*.log
src
localrc localrc

11
exercises/floating_ips.sh Executable file → Normal file
View File

@ -84,6 +84,15 @@ nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
# Waiting for boot # Waiting for boot
# ---------------- # ----------------
# Max time to wait while vm goes from build to active state
ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
# Max time till the vm is bootable
BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
# Max time to wait for proper association and dis-association.
ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
# check that the status is active within ACTIVE_TIMEOUT seconds # check that the status is active within ACTIVE_TIMEOUT seconds
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
echo "server didn't become active!" echo "server didn't become active!"
@ -158,7 +167,7 @@ ping -c1 -w1 $IP
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
# FIXME (anthony): make xs support security groups # FIXME (anthony): make xs support security groups
if [ "$VIRT_DRIVER" != "xenserver"]; then if [ "$VIRT_DRIVER" != "xenserver" ]; then
# test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
print "Security group failure - ping should not be allowed!" print "Security group failure - ping should not be allowed!"

View File

@ -1,7 +1,9 @@
dnsmasq-base dnsmasq-base
dnsmasq-utils # for dhcp_release
kpartx kpartx
parted parted
mysql-server arping # used for send_arp_for_ha option in nova-network
mysql-server # NOPRIME
python-mysqldb python-mysqldb
kvm kvm
gawk gawk
@ -10,10 +12,10 @@ ebtables
sqlite3 sqlite3
sudo sudo
kvm kvm
libvirt-bin libvirt-bin # NOPRIME
vlan vlan
curl curl
rabbitmq-server rabbitmq-server # NOPRIME
socat # used by ajaxterm socat # used by ajaxterm
python-mox python-mox
python-paste python-paste

View File

@ -1,18 +0,0 @@
# a collection of packages that speed up installation as they are dependencies
# of packages we can't install during bootstraping (rabbitmq-server,
# mysql-server, libvirt-bin)
#
# NOTE: only add packages to this file that aren't needed directly
mysql-common
mysql-client-5.1
erlang-base
erlang-ssl
erlang-nox
erlang-inets
erlang-mnesia
libhtml-template-perl
gettext-base
libavahi-client3
libxml2-utils
libpciaccess0
libparted0debian1

17
files/apts/swift Normal file
View File

@ -0,0 +1,17 @@
curl
gcc
memcached # NOPRIME
python-configobj
python-coverage
python-dev
python-eventlet
python-greenlet
python-netifaces
python-nose
python-pastedeploy
python-setuptools
python-simplejson
python-webob
python-xattr
sqlite3
xfsprogs

View File

@ -24,7 +24,7 @@ registry_port = 9191
# Log to this file. Make sure you do not set the same log # Log to this file. Make sure you do not set the same log
# file for both the API and registry servers! # file for both the API and registry servers!
log_file = %DEST%/glance/api.log #log_file = %DEST%/glance/api.log
# Send logs to syslog (/dev/log) instead of to file specified by `log_file` # Send logs to syslog (/dev/log) instead of to file specified by `log_file`
use_syslog = %SYSLOG% use_syslog = %SYSLOG%

View File

@ -13,7 +13,7 @@ bind_port = 9191
# Log to this file. Make sure you do not set the same log # Log to this file. Make sure you do not set the same log
# file for both the API and registry servers! # file for both the API and registry servers!
log_file = %DEST%/glance/registry.log #log_file = %DEST%/glance/registry.log
# Where to store images # Where to store images
filesystem_store_datadir = %DEST%/glance/images filesystem_store_datadir = %DEST%/glance/images

View File

@ -30,12 +30,13 @@ $BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin
$BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service" $BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service"
$BIN_DIR/keystone-manage $* service add glance image "Glance Image Service" $BIN_DIR/keystone-manage $* service add glance image "Glance Image Service"
$BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service" $BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service"
$BIN_DIR/keystone-manage $* service add swift object-store "Swift Service"
#endpointTemplates #endpointTemplates
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1
# $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1 $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1
# Tokens # Tokens
$BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00 $BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00

127
files/nova-api-paste.ini Normal file
View File

@ -0,0 +1,127 @@
#######
# EC2 #
#######
[composite:ec2]
use = egg:Paste#urlmap
/: ec2versions
/services/Cloud: ec2cloud
/services/Admin: ec2admin
/latest: ec2metadata
/2007-01-19: ec2metadata
/2007-03-01: ec2metadata
/2007-08-29: ec2metadata
/2007-10-10: ec2metadata
/2007-12-15: ec2metadata
/2008-02-01: ec2metadata
/2008-09-01: ec2metadata
/2009-04-04: ec2metadata
/1.0: ec2metadata
[pipeline:ec2cloud]
pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
[pipeline:ec2admin]
pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
[pipeline:ec2metadata]
pipeline = logrequest ec2md
[pipeline:ec2versions]
pipeline = logrequest ec2ver
[filter:logrequest]
paste.filter_factory = nova.api.ec2:RequestLogging.factory
[filter:ec2lockout]
paste.filter_factory = nova.api.ec2:Lockout.factory
[filter:totoken]
paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory
[filter:ec2noauth]
paste.filter_factory = nova.api.ec2:NoAuth.factory
[filter:authenticate]
paste.filter_factory = nova.api.ec2:Authenticate.factory
[filter:cloudrequest]
controller = nova.api.ec2.cloud.CloudController
paste.filter_factory = nova.api.ec2:Requestify.factory
[filter:adminrequest]
controller = nova.api.ec2.admin.AdminController
paste.filter_factory = nova.api.ec2:Requestify.factory
[filter:authorizer]
paste.filter_factory = nova.api.ec2:Authorizer.factory
[app:ec2executor]
paste.app_factory = nova.api.ec2:Executor.factory
[app:ec2ver]
paste.app_factory = nova.api.ec2:Versions.factory
[app:ec2md]
paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
#############
# Openstack #
#############
[composite:osapi]
use = egg:Paste#urlmap
/: osversions
/v1.0: openstackapi10
/v1.1: openstackapi11
[pipeline:openstackapi10]
pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
[pipeline:openstackapi11]
pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
[filter:faultwrap]
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
[filter:auth]
paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
[filter:noauth]
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
[filter:ratelimit]
paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
[filter:extensions]
paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
[app:osapiapp10]
paste.app_factory = nova.api.openstack:APIRouterV10.factory
[app:osapiapp11]
paste.app_factory = nova.api.openstack:APIRouterV11.factory
[pipeline:osversions]
pipeline = faultwrap osversionapp
[app:osversionapp]
paste.app_factory = nova.api.openstack.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
service_protocol = http
service_host = 127.0.0.1
service_port = 5000
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
auth_uri = http://127.0.0.1:5000/
admin_token = %SERVICE_TOKEN%

View File

@ -41,7 +41,7 @@ Cmnd_Alias NOVACMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \
/usr/bin/socat, \ /usr/bin/socat, \
/sbin/parted, \ /sbin/parted, \
/usr/sbin/dnsmasq, \ /usr/sbin/dnsmasq, \
/usr/bin/arping /usr/sbin/arping
%USER% ALL = (root) NOPASSWD: SETENV: NOVACMDS %USER% ALL = (root) NOPASSWD: SETENV: NOVACMDS

View File

@ -0,0 +1,20 @@
[DEFAULT]
devices = %NODE_PATH%/node
mount_check = false
bind_port = %BIND_PORT%
user = %USER%
log_facility = LOG_LOCAL%LOG_FACILITY%
swift_dir = %SWIFT_CONFIG_LOCATION%
[pipeline:main]
pipeline = account-server
[app:account-server]
use = egg:swift#account
[account-replicator]
vm_test_mode = yes
[account-auditor]
[account-reaper]

View File

@ -0,0 +1,22 @@
[DEFAULT]
devices = %NODE_PATH%/node
mount_check = false
bind_port = %BIND_PORT%
user = %USER%
log_facility = LOG_LOCAL%LOG_FACILITY%
swift_dir = %SWIFT_CONFIG_LOCATION%
[pipeline:main]
pipeline = container-server
[app:container-server]
use = egg:swift#container
[container-replicator]
vm_test_mode = yes
[container-updater]
[container-auditor]
[container-sync]

View File

@ -0,0 +1,20 @@
[DEFAULT]
devices = %NODE_PATH%/node
mount_check = false
bind_port = %BIND_PORT%
user = %USER%
log_facility = LOG_LOCAL%LOG_FACILITY%
swift_dir = %SWIFT_CONFIG_LOCATION%
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:swift#object
[object-replicator]
vm_test_mode = yes
[object-updater]
[object-auditor]

View File

@ -0,0 +1,32 @@
[DEFAULT]
bind_port = 8080
user = %USER%
log_facility = LOG_LOCAL1
swift_dir = %SWIFT_CONFIG_LOCATION%
[pipeline:main]
pipeline = healthcheck cache %AUTH_SERVER% proxy-server
[app:proxy-server]
use = egg:swift#proxy
allow_account_management = true
account_autocreate = true
[filter:keystone]
use = egg:swiftkeystone2#keystone2
keystone_admin_token = %SERVICE_TOKEN%
keystone_url = http://localhost:35357/v2.0
[filter:tempauth]
use = egg:swift#tempauth
user_admin_admin = admin .admin .reseller_admin
user_test_tester = testing .admin
user_test2_tester2 = testing2 .admin
user_test_tester3 = testing3
bind_ip = 0.0.0.0
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:cache]
use = egg:swift#memcache

79
files/swift/rsyncd.conf Normal file
View File

@ -0,0 +1,79 @@
uid = %USER%
gid = %GROUP%
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
address = 127.0.0.1
[account6012]
max connections = 25
path = %SWIFT_DATA_LOCATION%/1/node/
read only = false
lock file = /var/lock/account6012.lock
[account6022]
max connections = 25
path = %SWIFT_DATA_LOCATION%/2/node/
read only = false
lock file = /var/lock/account6022.lock
[account6032]
max connections = 25
path = %SWIFT_DATA_LOCATION%/3/node/
read only = false
lock file = /var/lock/account6032.lock
[account6042]
max connections = 25
path = %SWIFT_DATA_LOCATION%/4/node/
read only = false
lock file = /var/lock/account6042.lock
[container6011]
max connections = 25
path = %SWIFT_DATA_LOCATION%/1/node/
read only = false
lock file = /var/lock/container6011.lock
[container6021]
max connections = 25
path = %SWIFT_DATA_LOCATION%/2/node/
read only = false
lock file = /var/lock/container6021.lock
[container6031]
max connections = 25
path = %SWIFT_DATA_LOCATION%/3/node/
read only = false
lock file = /var/lock/container6031.lock
[container6041]
max connections = 25
path = %SWIFT_DATA_LOCATION%/4/node/
read only = false
lock file = /var/lock/container6041.lock
[object6010]
max connections = 25
path = %SWIFT_DATA_LOCATION%/1/node/
read only = false
lock file = /var/lock/object6010.lock
[object6020]
max connections = 25
path = %SWIFT_DATA_LOCATION%/2/node/
read only = false
lock file = /var/lock/object6020.lock
[object6030]
max connections = 25
path = %SWIFT_DATA_LOCATION%/3/node/
read only = false
lock file = /var/lock/object6030.lock
[object6040]
max connections = 25
path = %SWIFT_DATA_LOCATION%/4/node/
read only = false
lock file = /var/lock/object6040.lock

26
files/swift/swift-remakerings Executable file
View File

@ -0,0 +1,26 @@
#!/bin/bash
cd %SWIFT_CONFIG_LOCATION%
rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
swift-ring-builder object.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
swift-ring-builder object.builder add z1-127.0.0.1:6010/sdb1 1
swift-ring-builder object.builder add z2-127.0.0.1:6020/sdb2 1
swift-ring-builder object.builder add z3-127.0.0.1:6030/sdb3 1
swift-ring-builder object.builder add z4-127.0.0.1:6040/sdb4 1
swift-ring-builder object.builder rebalance
swift-ring-builder container.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
swift-ring-builder container.builder add z1-127.0.0.1:6011/sdb1 1
swift-ring-builder container.builder add z2-127.0.0.1:6021/sdb2 1
swift-ring-builder container.builder add z3-127.0.0.1:6031/sdb3 1
swift-ring-builder container.builder add z4-127.0.0.1:6041/sdb4 1
swift-ring-builder container.builder rebalance
swift-ring-builder account.builder create %SWIFT_PARTITION_POWER_SIZE% 3 1
swift-ring-builder account.builder add z1-127.0.0.1:6012/sdb1 1
swift-ring-builder account.builder add z2-127.0.0.1:6022/sdb2 1
swift-ring-builder account.builder add z3-127.0.0.1:6032/sdb3 1
swift-ring-builder account.builder add z4-127.0.0.1:6042/sdb4 1
swift-ring-builder account.builder rebalance

3
files/swift/swift-startmain Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
swift-init all restart

3
files/swift/swift.conf Normal file
View File

@ -0,0 +1,3 @@
[swift-hash]
# random unique string that can never change (DO NOT LOSE)
swift_hash_path_suffix = %SWIFT_HASH%

316
stack.sh
View File

@ -70,7 +70,7 @@ fi
# called ``localrc`` # called ``localrc``
# #
# If ``localrc`` exists, then ``stackrc`` will load those settings. This is # If ``localrc`` exists, then ``stackrc`` will load those settings. This is
# useful for changing a branch or repostiory to test other versions. Also you # useful for changing a branch or repository to test other versions. Also you
# can store your other settings like **MYSQL_PASSWORD** or **ADMIN_PASSWORD** instead # can store your other settings like **MYSQL_PASSWORD** or **ADMIN_PASSWORD** instead
# of letting devstack generate random ones for you. # of letting devstack generate random ones for you.
source ./stackrc source ./stackrc
@ -103,8 +103,7 @@ if [[ $EUID -eq 0 ]]; then
# since this script runs as a normal user, we need to give that user # since this script runs as a normal user, we need to give that user
# ability to run sudo # ability to run sudo
apt_get update dpkg -l sudo || apt_get update && apt_get install sudo
apt_get install sudo
if ! getent passwd stack >/dev/null; then if ! getent passwd stack >/dev/null; then
echo "Creating a user called stack" echo "Creating a user called stack"
@ -150,6 +149,12 @@ KEYSTONE_DIR=$DEST/keystone
NOVACLIENT_DIR=$DEST/python-novaclient NOVACLIENT_DIR=$DEST/python-novaclient
OPENSTACKX_DIR=$DEST/openstackx OPENSTACKX_DIR=$DEST/openstackx
NOVNC_DIR=$DEST/noVNC NOVNC_DIR=$DEST/noVNC
SWIFT_DIR=$DEST/swift
SWIFT_KEYSTONE_DIR=$DEST/swift-keystone2
QUANTUM_DIR=$DEST/quantum
# Default Quantum Plugin
Q_PLUGIN=${Q_PLUGIN:-openvswitch}
# Specify which services to launch. These generally correspond to screen tabs # Specify which services to launch. These generally correspond to screen tabs
ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit} ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit}
@ -169,6 +174,9 @@ if [ ! -n "$HOST_IP" ]; then
HOST_IP=`LC_ALL=C /sbin/ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` HOST_IP=`LC_ALL=C /sbin/ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
fi fi
# Service startup timeout
SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
# Generic helper to configure passwords # Generic helper to configure passwords
function read_password { function read_password {
set +o xtrace set +o xtrace
@ -224,7 +232,7 @@ VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE}
# Multi-host is a mode where each compute node runs its own network node. This # Multi-host is a mode where each compute node runs its own network node. This
# allows network operations and routing for a VM to occur on the server that is # allows network operations and routing for a VM to occur on the server that is
# running the VM - removing a SPOF and bandwidth bottleneck. # running the VM - removing a SPOF and bandwidth bottleneck.
MULTI_HOST=${MULTI_HOST:-0} MULTI_HOST=${MULTI_HOST:-False}
# If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE`` # If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE``
# variable but make sure that the interface doesn't already have an # variable but make sure that the interface doesn't already have an
@ -239,11 +247,22 @@ MULTI_HOST=${MULTI_HOST:-0}
# If you are running on a single node and don't need to access the VMs from # If you are running on a single node and don't need to access the VMs from
# devices other than that node, you can set the flat interface to the same # devices other than that node, you can set the flat interface to the same
# value as ``FLAT_NETWORK_BRIDGE``. This will stop the network hiccup from # value as ``FLAT_NETWORK_BRIDGE``. This will stop the network hiccup from
# occuring. # occurring.
FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
# Using Quantum networking:
#
# Make sure that q-svc is enabled in ENABLED_SERVICES. If it is the network
# manager will be set to the QuantumManager.
#
# If you're planning to use the Quantum openvswitch plugin, set Q_PLUGIN to
# "openvswitch" and make sure the q-agt service is enabled in
# ENABLED_SERVICES.
#
# With Quantum networking the NET_MAN variable is ignored.
# MySQL & RabbitMQ # MySQL & RabbitMQ
# ---------------- # ----------------
@ -270,6 +289,42 @@ read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
# Glance connection info. Note the port must be specified. # Glance connection info. Note the port must be specified.
GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292} GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292}
# SWIFT
# -----
# TODO: implement glance support
# TODO: add logging to different location.
# By default the location of swift drives and objects is located inside
# the swift source directory. SWIFT_DATA_LOCATION variable allow you to redefine
# this.
SWIFT_DATA_LOCATION=${SWIFT_DATA_LOCATION:-${SWIFT_DIR}/data}
# We are going to have the configuration files inside the source
# directory, change SWIFT_CONFIG_LOCATION if you want to adjust that.
SWIFT_CONFIG_LOCATION=${SWIFT_CONFIG_LOCATION:-${SWIFT_DIR}/config}
# devstack will create a loop-back disk formatted as XFS to store the
# swift data. By default the disk size is 1 gigabyte. The variable
# SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change
# that.
SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000}
# The ring uses a configurable number of bits from a paths MD5 hash as
# a partition index that designates a device. The number of bits kept
# from the hash is known as the partition power, and 2 to the partition
# power indicates the partition count. Partitioning the full MD5 hash
# ring allows other parts of the cluster to work in batches of items at
# once which ends up either more efficient or at least less complex than
# working with each item separately or the entire cluster all at once.
# By default we define 9 for the partition count (which mean 512).
SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
# We only ask for Swift Hash if we have enabled swift service.
if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
# SWIFT_HASH is a random unique string for a swift cluster that
# can never change.
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
fi
# Keystone # Keystone
# -------- # --------
@ -283,7 +338,7 @@ read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (
LOGFILE=${LOGFILE:-"$PWD/stack.sh.$$.log"} LOGFILE=${LOGFILE:-"$PWD/stack.sh.$$.log"}
( (
# So that errors don't compound we exit on any errors so you see only the # So that errors don't compound we exit on any errors so you see only the
# first error that occured. # first error that occurred.
trap failed ERR trap failed ERR
failed() { failed() {
local r=$? local r=$?
@ -310,7 +365,7 @@ fi
# install apt requirements # install apt requirements
apt_get update apt_get update
apt_get install `cat $FILES/apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server"` apt_get install `cat $FILES/apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server|memcached"`
# install python requirements # install python requirements
sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install `cat $FILES/pips/*` sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install `cat $FILES/pips/*`
@ -349,6 +404,10 @@ function git_clone {
# compute service # compute service
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
# storage service
git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
# swift + keystone middleware
git_clone $SWIFT_KEYSTONE_REPO $SWIFT_KEYSTONE_DIR $SWIFT_KEYSTONE_BRANCH
# image catalog service # image catalog service
git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
# unified auth system (manages accounts/tokens) # unified auth system (manages accounts/tokens)
@ -362,6 +421,8 @@ git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
# openstackx is a collection of extensions to openstack.compute & nova # openstackx is a collection of extensions to openstack.compute & nova
# that is *deprecated*. The code is being moved into python-novaclient & nova. # that is *deprecated*. The code is being moved into python-novaclient & nova.
git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH
# quantum
git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
# Initialization # Initialization
# ============== # ==============
@ -370,12 +431,15 @@ git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH
# setup our checkouts so they are installed into python path # setup our checkouts so they are installed into python path
# allowing ``import nova`` or ``import glance.client`` # allowing ``import nova`` or ``import glance.client``
cd $KEYSTONE_DIR; sudo python setup.py develop cd $KEYSTONE_DIR; sudo python setup.py develop
cd $SWIFT_DIR; sudo python setup.py develop
cd $SWIFT_KEYSTONE_DIR; sudo python setup.py develop
cd $GLANCE_DIR; sudo python setup.py develop cd $GLANCE_DIR; sudo python setup.py develop
cd $NOVACLIENT_DIR; sudo python setup.py develop cd $NOVACLIENT_DIR; sudo python setup.py develop
cd $NOVA_DIR; sudo python setup.py develop cd $NOVA_DIR; sudo python setup.py develop
cd $OPENSTACKX_DIR; sudo python setup.py develop cd $OPENSTACKX_DIR; sudo python setup.py develop
cd $HORIZON_DIR/django-openstack; sudo python setup.py develop cd $HORIZON_DIR/django-openstack; sudo python setup.py develop
cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop
cd $QUANTUM_DIR; sudo python setup.py develop
# Add a useful screenrc. This isn't required to run openstack but is we do # Add a useful screenrc. This isn't required to run openstack but is we do
# it since we are going to run the services in screen for simple # it since we are going to run the services in screen for simple
@ -500,13 +564,12 @@ fi
# ---- # ----
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
# We are going to use the sample http middleware configuration from the # We are going to use a sample http middleware configuration based on the
# keystone project to launch nova. This paste config adds the configuration # one from the keystone project to launch nova. This paste config adds
# required for nova to validate keystone tokens - except we need to switch # the configuration required for nova to validate keystone tokens. We add
# the config to use our service token instead (instead of the invalid token # our own service token to the configuration.
# 999888777666). cp $FILES/nova-api-paste.ini $NOVA_DIR/bin
cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
fi fi
if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
@ -580,6 +643,129 @@ if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then
mkdir -p $NOVA_DIR/networks mkdir -p $NOVA_DIR/networks
fi fi
# Storage Service
if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
# We first do a bit of setup by creating the directories and
# changing the permissions so we can run it as our user.
USER_GROUP=$(id -g)
sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives
sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives
# We then create a loopback disk and format it to XFS.
if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then
mkdir -p ${SWIFT_DATA_LOCATION}/drives/images
sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img
sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img
dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \
bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
mkfs.xfs -f -i size=1024 ${SWIFT_DATA_LOCATION}/drives/images/swift.img
fi
# After the drive being created we mount the disk with a few mount
# options to make it most efficient as possible for swift.
mkdir -p ${SWIFT_DATA_LOCATION}/drives/sdb1
if ! egrep -q ${SWIFT_DATA_LOCATION}/drives/sdb1 /proc/mounts;then
sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \
${SWIFT_DATA_LOCATION}/drives/images/swift.img ${SWIFT_DATA_LOCATION}/drives/sdb1
fi
# We then create link to that mounted location so swift would know
# where to go.
for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
# We now have to emulate a few different servers into one we
# create all the directories needed for swift
tmpd=""
for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \
${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \
${SWIFT_DATA_LOCATION}/{1..4}/node/sdb1 /var/run/swift ;do
[[ -d $d ]] && continue
sudo install -o ${USER} -g $USER_GROUP -d $d
done
# We do want to make sure this is all owned by our user.
sudo chown -R $USER: ${SWIFT_DATA_LOCATION}/{1..4}/node
sudo chown -R $USER: ${SWIFT_CONFIG_LOCATION}
# swift-init has a bug using /etc/swift until bug #885595 is fixed
# we have to create a link
sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift
# Swift use rsync to syncronize between all the different
# partitions (which make more sense when you have a multi-node
# setup) we configure it with our version of rsync.
sed -e "s/%GROUP%/${USER_GROUP}/;s/%USER%/$USER/;s,%SWIFT_DATA_LOCATION%,$SWIFT_DATA_LOCATION," $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
# By default Swift will be installed with the tempauth middleware
# which has some default username and password if you have
# configured keystone it will checkout the directory.
if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
swift_auth_server=keystone
# We need a special version of bin/swift which understand the
# OpenStack api 2.0, we download it until this is getting
# integrated in swift.
sudo curl -s -o/usr/local/bin/swift \
'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e'
else
swift_auth_server=tempauth
fi
# We do the install of the proxy-server and swift configuration
# replacing a few directives to match our configuration.
sed "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%USER%/$USER/;s/%SERVICE_TOKEN%/${SERVICE_TOKEN}/;s/%AUTH_SERVER%/${swift_auth_server}/" \
$FILES/swift/proxy-server.conf|sudo tee ${SWIFT_CONFIG_LOCATION}/proxy-server.conf
sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_LOCATION}/swift.conf
# We need to generate a object/account/proxy configuration
# emulating 4 nodes on different ports we have a little function
# that help us doing that.
function generate_swift_configuration() {
local server_type=$1
local bind_port=$2
local log_facility=$3
local node_number
for node_number in {1..4};do
node_path=${SWIFT_DATA_LOCATION}/${node_number}
sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
$FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_LOCATION}/${server_type}-server/${node_number}.conf
bind_port=$(( ${bind_port} + 10 ))
log_facility=$(( ${log_facility} + 1 ))
done
}
generate_swift_configuration object 6010 2
generate_swift_configuration container 6011 2
generate_swift_configuration account 6012 2
# We create two helper scripts :
#
# - swift-remakerings
# Allow to recreate rings from scratch.
# - swift-startmain
# Restart your full cluster.
#
sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s/%SWIFT_PARTITION_POWER_SIZE%/$SWIFT_PARTITION_POWER_SIZE/" $FILES/swift/swift-remakerings | \
sudo tee /usr/local/bin/swift-remakerings
sudo install -m755 $FILES/swift/swift-startmain /usr/local/bin/
sudo chmod +x /usr/local/bin/swift-*
# We then can start rsync.
sudo /etc/init.d/rsync restart || :
# Create our ring for the object/container/account.
/usr/local/bin/swift-remakerings
# And now we launch swift-startmain to get our cluster running
# ready to be tested.
/usr/local/bin/swift-startmain || :
unset s swift_hash swift_auth_server tmpd
fi
# Volume Service # Volume Service
# -------------- # --------------
@ -593,7 +779,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
# By default, the backing file is 2G in size, and is stored in /opt/stack. # By default, the backing file is 2G in size, and is stored in /opt/stack.
# #
if ! sudo vgdisplay | grep -q nova-volumes; then if ! sudo vgdisplay | grep -q nova-volumes; then
VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-/opt/stack/nova-volumes-backing-file} VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file}
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M} VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M}
truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
@ -616,8 +802,16 @@ add_nova_flag "--nodaemon"
add_nova_flag "--allow_admin_api" add_nova_flag "--allow_admin_api"
add_nova_flag "--scheduler_driver=$SCHEDULER" add_nova_flag "--scheduler_driver=$SCHEDULER"
add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf" add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf"
add_nova_flag "--network_manager=nova.network.manager.$NET_MAN"
add_nova_flag "--fixed_range=$FIXED_RANGE" add_nova_flag "--fixed_range=$FIXED_RANGE"
if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager"
if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
add_nova_flag "--libvirt_vif_type=ethernet"
add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
fi
else
add_nova_flag "--network_manager=nova.network.manager.$NET_MAN"
fi
add_nova_flag "--my_ip=$HOST_IP" add_nova_flag "--my_ip=$HOST_IP"
add_nova_flag "--public_interface=$PUBLIC_INTERFACE" add_nova_flag "--public_interface=$PUBLIC_INTERFACE"
add_nova_flag "--vlan_interface=$VLAN_INTERFACE" add_nova_flag "--vlan_interface=$VLAN_INTERFACE"
@ -632,15 +826,16 @@ add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST"
add_nova_flag "--rabbit_host=$RABBIT_HOST" add_nova_flag "--rabbit_host=$RABBIT_HOST"
add_nova_flag "--rabbit_password=$RABBIT_PASSWORD" add_nova_flag "--rabbit_password=$RABBIT_PASSWORD"
add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT" add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT"
add_nova_flag "--force_dhcp_release"
if [ -n "$INSTANCES_PATH" ]; then if [ -n "$INSTANCES_PATH" ]; then
add_nova_flag "--instances_path=$INSTANCES_PATH" add_nova_flag "--instances_path=$INSTANCES_PATH"
fi fi
if [ -n "$MULTI_HOST" ]; then if [ "$MULTI_HOST" != "False" ]; then
add_nova_flag "--multi_host=$MULTI_HOST" add_nova_flag "--multi_host"
add_nova_flag "--send_arp_for_ha=1" add_nova_flag "--send_arp_for_ha"
fi fi
if [ "$SYSLOG" != "False" ]; then if [ "$SYSLOG" != "False" ]; then
add_nova_flag "--use_syslog=1" add_nova_flag "--use_syslog"
fi fi
# XenServer # XenServer
@ -676,12 +871,6 @@ if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then
# (re)create nova database # (re)create nova database
$NOVA_DIR/bin/nova-manage db sync $NOVA_DIR/bin/nova-manage db sync
# create a small network
$NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE
# create some floating ips
$NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
fi fi
@ -721,16 +910,20 @@ fi
function screen_it { function screen_it {
NL=`echo -ne '\015'` NL=`echo -ne '\015'`
if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then
screen -S nova -X screen -t $1 screen -S stack -X screen -t $1
screen -S nova -p $1 -X stuff "$2$NL" # sleep to allow bash to be ready to be send the command - we are
# creating a new window in screen and then sends characters, so if
# bash isn't running by the time we send the command, nothing happens
sleep 1
screen -S stack -p $1 -X stuff "$2$NL"
fi fi
} }
# create a new named screen to run processes in # create a new named screen to run processes in
screen -d -m -S nova -t nova screen -d -m -S stack -t stack
sleep 1 sleep 1
# launch the glance registery service # launch the glance registry service
if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf" screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf"
fi fi
@ -739,7 +932,7 @@ fi
if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf" screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf"
echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
if ! timeout 60 sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
echo "g-api did not start" echo "g-api did not start"
exit 1 exit 1
fi fi
@ -749,7 +942,7 @@ fi
if [[ "$ENABLED_SERVICES" =~ "key" ]]; then if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF -d" screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF -d"
echo "Waiting for keystone to start..." echo "Waiting for keystone to start..."
if ! timeout 60 sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then
echo "keystone did not start" echo "keystone did not start"
exit 1 exit 1
fi fi
@ -759,11 +952,62 @@ fi
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api" screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api"
echo "Waiting for nova-api to start..." echo "Waiting for nova-api to start..."
if ! timeout 60 sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
echo "nova-api did not start" echo "nova-api did not start"
exit 1 exit 1
fi fi
fi fi
# Quantum
if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
# Install deps
# FIXME add to files/apts/quantum, but don't install if not needed!
apt_get install openvswitch-switch openvswitch-datapath-dkms
# Create database for the plugin/agent
if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;'
else
echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
exit 1
fi
fi
QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/quantum/plugins.ini
# Make sure we're using the openvswitch plugin
sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE
screen_it q-svc "cd $QUANTUM_DIR && export PYTHONPATH=.:$PYTHONPATH; python $QUANTUM_DIR/bin/quantum $QUANTUM_DIR/etc/quantum.conf"
fi
# Quantum agent (for compute nodes)
if [[ "$ENABLED_SERVICES" =~ "q-agt" ]]; then
if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
# Set up integration bridge
OVS_BRIDGE=${OVS_BRIDGE:-br-int}
sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
fi
# Start up the quantum <-> openvswitch agent
screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR/quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v"
fi
# If we're using Quantum (i.e. q-svc is enabled), network creation has to
# happen after we've started the Quantum service.
if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then
# create a small network
$NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE
if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
echo "Not creating floating IPs (not supported by QuantumManager)"
else
# create some floating ips
$NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
fi
fi
# Launching nova-compute should be as simple as running ``nova-compute`` but # Launching nova-compute should be as simple as running ``nova-compute`` but
# have to do a little more than that in our script. Since we add the group # have to do a little more than that in our script. Since we add the group
# ``libvirtd`` to our user in this script, when nova-compute is run it is # ``libvirtd`` to our user in this script, when nova-compute is run it is
@ -787,7 +1031,7 @@ screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log"
# TTY also uses cloud-init, supporting login via keypair and sending scripts as # TTY also uses cloud-init, supporting login via keypair and sending scripts as
# userdata. See https://help.ubuntu.com/community/CloudInit for more on cloud-init # userdata. See https://help.ubuntu.com/community/CloudInit for more on cloud-init
# #
# Override ``IMAGE_URLS`` with a comma-seperated list of uec images. # Override ``IMAGE_URLS`` with a comma-separated list of uec images.
# #
# * **natty**: http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz # * **natty**: http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz
# * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz # * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz
@ -843,6 +1087,10 @@ for ret in "${PIPESTATUS[@]}"; do [ $ret -eq 0 ] || exit $ret; done
# Using the cloud # Using the cloud
# =============== # ===============
echo ""
echo ""
echo ""
# If you installed the horizon on this server, then you should be able # If you installed the horizon on this server, then you should be able
# to access the site using your browser. # to access the site using your browser.
if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then

12
stackrc
View File

@ -2,6 +2,14 @@
NOVA_REPO=https://github.com/cloudbuilders/nova.git NOVA_REPO=https://github.com/cloudbuilders/nova.git
NOVA_BRANCH=diablo NOVA_BRANCH=diablo
# storage service
SWIFT_REPO=https://github.com/openstack/swift.git
SWIFT_BRANCH=stable/diablo
# swift and keystone integration
SWIFT_KEYSTONE_REPO=https://github.com/cloudbuilders/swift-keystone2.git
SWIFT_KEYSTONE_BRANCH=master
# image catalog service # image catalog service
GLANCE_REPO=https://github.com/cloudbuilders/glance.git GLANCE_REPO=https://github.com/cloudbuilders/glance.git
GLANCE_BRANCH=diablo GLANCE_BRANCH=diablo
@ -27,6 +35,10 @@ NOVACLIENT_BRANCH=master
OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git
OPENSTACKX_BRANCH=diablo OPENSTACKX_BRANCH=diablo
# quantum service
QUANTUM_REPO=https://github.com/openstack/quantum
QUANTUM_BRANCH=stable/diablo
# Specify a comma-separated list of uec images to download and install into glance. # Specify a comma-separated list of uec images to download and install into glance.
IMAGE_URLS=http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz IMAGE_URLS=http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz

View File

@ -1,402 +0,0 @@
#!/usr/bin/env bash
# exit on error to stop unexpected errors
set -o errexit
# Make sure that we have the proper version of ubuntu
UBUNTU_VERSION=`cat /etc/lsb-release | grep CODENAME | sed 's/.*=//g'`
if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then
if [ ! "natty" = "$UBUNTU_VERSION" ]; then
echo "This script only works with oneiric and natty"
exit 1
fi
fi
# Echo commands
set -o xtrace
# Keep track of the current directory
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=$TOOLS_DIR/..
# Where to store files and instances
KVMSTACK_DIR=${KVMSTACK_DIR:-/opt/kvmstack}
# Where to store images
IMAGES_DIR=$KVMSTACK_DIR/images
# Create images dir
mkdir -p $IMAGES_DIR
# Move to top devstack dir
cd $TOP_DIR
# Abort if localrc is not set
if [ ! -e ./localrc ]; then
echo "You must have a localrc with ALL necessary passwords defined before proceeding."
echo "See stack.sh for required passwords."
exit 1
fi
# Source params
source ./stackrc
# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
# Base image (natty by default)
DIST_NAME=${DIST_NAME:-natty}
IMAGE_FNAME=$DIST_NAME.raw
# Name of our instance, used by libvirt
GUEST_NAME=${GUEST_NAME:-kvmstack}
# Original version of built image
BASE_IMAGE=$KVMSTACK_DIR/images/$DIST_NAME.raw
# Copy of base image, which we pre-install with tasty treats
VM_IMAGE=$IMAGES_DIR/$DIST_NAME.$GUEST_NAME.raw
# Mop up after previous runs
virsh destroy $GUEST_NAME || true
# Where this vm is stored
VM_DIR=$KVMSTACK_DIR/instances/$GUEST_NAME
# Create vm dir
mkdir -p $VM_DIR
# Mount point into copied base image
COPY_DIR=$VM_DIR/copy
mkdir -p $COPY_DIR
# Create the base image if it does not yet exist
if [ ! -e $IMAGES_DIR/$IMAGE_FNAME ]; then
cd $TOOLS_DIR
./make_image.sh -m -r 5000 $DIST_NAME raw
mv $DIST_NAME.raw $BASE_IMAGE
cd $TOP_DIR
fi
# Create a copy of the base image
if [ ! -e $VM_IMAGE ]; then
cp -p $BASE_IMAGE $VM_IMAGE
fi
# Unmount the copied base image
function unmount_images() {
# unmount the filesystem
while df | grep -q $COPY_DIR; do
umount $COPY_DIR || echo 'ok'
sleep 1
done
}
# Unmount from failed runs
unmount_images
# Ctrl-c catcher
function kill_unmount() {
unmount_images
exit 1
}
# Install deps if needed
dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx
# Let Ctrl-c kill tail and exit
trap kill_unmount SIGINT
# Where Openstack code will live in image
DEST=${DEST:-/opt/stack}
# Mount the file system
mount -o loop,offset=32256 $VM_IMAGE $COPY_DIR
# git clone only if directory doesn't exist already. Since ``DEST`` might not
# be owned by the installation user, we create the directory and change the
# ownership to the proper user.
function git_clone {
if [ ! -d $2 ]; then
sudo mkdir $2
sudo chown `whoami` $2
git clone $1 $2
cd $2
# This checkout syntax works for both branches and tags
git checkout $3
fi
}
# Make sure that base requirements are installed
cp /etc/resolv.conf $COPY_DIR/etc/resolv.conf
chroot $COPY_DIR apt-get update
chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"`
chroot $COPY_DIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server
chroot $COPY_DIR pip install `cat files/pips/*`
# Clean out code repos if directed to do so
if [ "$CLEAN" = "1" ]; then
rm -rf $COPY_DIR/$DEST
fi
# Cache openstack code
mkdir -p $COPY_DIR/$DEST
git_clone $NOVA_REPO $COPY_DIR/$DEST/nova $NOVA_BRANCH
git_clone $GLANCE_REPO $COPY_DIR/$DEST/glance $GLANCE_BRANCH
git_clone $KEYSTONE_REPO $COPY_DIR/$DESTkeystone $KEYSTONE_BRANCH
git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH
git_clone $HORIZON_REPO $COPY_DIR/$DEST/horizon $HORIZON_BRANCH $HORIZON_TAG
git_clone $NOVACLIENT_REPO $COPY_DIR/$DEST/python-novaclient $NOVACLIENT_BRANCH
git_clone $OPENSTACKX_REPO $COPY_DIR/$DEST/openstackx $OPENSTACKX_BRANCH
git_clone $KEYSTONE_REPO $COPY_DIR/$DEST/keystone $KEYSTONE_BRANCH
git_clone $NOVNC_REPO $COPY_DIR/$DEST/noVNC $NOVNC_BRANCH
# Back to devstack
cd $TOP_DIR
# Unmount the filesystems
unmount_images
# Network configuration variables
BRIDGE=${BRIDGE:-br0}
GUEST_IP=${GUEST_IP:-192.168.1.50}
GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.1.1}
GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $(echo $GUEST_IP | sed "s/.*\.//")`"}
GUEST_RAM=${GUEST_RAM:-1524288}
GUEST_CORES=${GUEST_CORES:-1}
# libvirt.xml configuration
LIBVIRT_XML=$VM_DIR/libvirt.xml
cat > $LIBVIRT_XML <<EOF
<domain type='kvm'>
<name>$GUEST_NAME</name>
<memory>$GUEST_RAM</memory>
<os>
<type>hvm</type>
<bootmenu enable='yes'/>
</os>
<features>
<acpi/>
</features>
<vcpu>$GUEST_CORES</vcpu>
<devices>
<disk type='file'>
<driver type='qcow2'/>
<source file='$VM_DIR/disk'/>
<target dev='vda' bus='virtio'/>
</disk>
<interface type='bridge'>
<source bridge='$BRIDGE'/>
<mac address='$GUEST_MAC'/>
</interface>
<!-- The order is significant here. File must be defined first -->
<serial type="file">
<source path='$VM_DIR/console.log'/>
<target port='1'/>
</serial>
<console type='pty' tty='/dev/pts/2'>
<source path='/dev/pts/2'/>
<target port='0'/>
</console>
<serial type='pty'>
<source path='/dev/pts/2'/>
<target port='0'/>
</serial>
<graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
</devices>
</domain>
EOF
# Mount point for instance fs
ROOTFS=$VM_DIR/root
mkdir -p $ROOTFS
# Make sure we have nbd-ness
modprobe nbd max_part=63
# Which NBD device to use?
NBD=${NBD:-/dev/nbd5}
# Clean up from previous runs
umount $ROOTFS || echo 'ok'
qemu-nbd -d $NBD || echo 'ok'
# Clean up old runs
cd $VM_DIR
rm -f $VM_DIR/disk
# Create our instance fs
qemu-img create -f qcow2 -b $VM_IMAGE disk
# Connect our nbd and wait till it is mountable
qemu-nbd -c $NBD disk
NBD_DEV=`basename $NBD`
if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then
echo "Couldn't connect $NBD"
exit 1
fi
# Mount the instance
mount $NBD $ROOTFS -o offset=32256 -t ext4
# Configure instance network
INTERFACES=$ROOTFS/etc/network/interfaces
cat > $INTERFACES <<EOF
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
address $GUEST_IP
netmask $GUEST_NETMASK
gateway $GUEST_GATEWAY
EOF
# User configuration for the instance
chroot $ROOTFS groupadd libvirtd || true
chroot $ROOTFS useradd stack -s /bin/bash -d $DEST -G libvirtd
cp -pr $TOOLS_DIR/.. $ROOTFS/$DEST/devstack
echo "root:$ROOT_PASSWORD" | chroot $ROOTFS chpasswd
echo "stack:pass" | chroot $ROOTFS chpasswd
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $ROOTFS/etc/sudoers
# Gracefully cp only if source file/dir exists
function cp_it {
if [ -e $1 ] || [ -d $1 ]; then
cp -pRL $1 $2
fi
}
# Copy over your ssh keys and env if desired
COPYENV=${COPYENV:-1}
if [ "$COPYENV" = "1" ]; then
cp_it ~/.ssh $ROOTFS/$DEST/.ssh
cp_it ~/.ssh/id_rsa.pub $ROOTFS/$DEST/.ssh/authorized_keys
cp_it ~/.gitconfig $ROOTFS/$DEST/.gitconfig
cp_it ~/.vimrc $ROOTFS/$DEST/.vimrc
cp_it ~/.bashrc $ROOTFS/$DEST/.bashrc
fi
# pre-cache uec images
for image_url in ${IMAGE_URLS//,/ }; do
IMAGE_FNAME=`basename "$image_url"`
if [ ! -f $IMAGES_DIR/$IMAGE_FNAME ]; then
wget -c $image_url -O $IMAGES_DIR/$IMAGE_FNAME
fi
cp $IMAGES_DIR/$IMAGE_FNAME $ROOTFS/$DEST/devstack/files
done
# Configure the runner
RUN_SH=$ROOTFS/$DEST/run.sh
cat > $RUN_SH <<EOF
#!/usr/bin/env bash
# Kill any existing screens
killall screen
# Install and run stack.sh
sudo apt-get update
sudo apt-get -y --force-yes install git-core vim-nox sudo
if [ ! -d "$DEST/devstack" ]; then
git clone git://github.com/cloudbuilders/devstack.git $DEST/devstack
fi
cd $DEST/devstack && $STACKSH_PARAMS FORCE=yes ./stack.sh > /$DEST/run.sh.log
echo >> /$DEST/run.sh.log
echo >> /$DEST/run.sh.log
echo "All done! Time to start clicking." >> /$DEST/run.sh.log
cat $DEST/run.sh.log
EOF
chmod 755 $RUN_SH
# Make runner launch on boot
RC_LOCAL=$ROOTFS/etc/init.d/local
cat > $RC_LOCAL <<EOF
#!/bin/sh -e
# Reboot if this is our first run to enable console log on $DIST_NAME :(
if [ ! -e /root/firstlaunch ]; then
touch /root/firstlaunch
reboot -f
exit 0
fi
su -c "$DEST/run.sh" stack
EOF
chmod +x $RC_LOCAL
chroot $ROOTFS sudo update-rc.d local defaults 80
# Make our ip address hostnames look nice at the command prompt
echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/$DEST/.bashrc
echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $ROOTFS/etc/profile
# Give stack ownership over $DEST so it may do the work needed
chroot $ROOTFS chown -R stack $DEST
# Change boot params so that we get a console log
sudo sed -e "s/quiet splash/splash console=ttyS0 console=ttyS1,19200n8/g" -i $ROOTFS/boot/grub/menu.lst
sudo sed -e "s/^hiddenmenu//g" -i $ROOTFS/boot/grub/menu.lst
# Set the hostname
echo $GUEST_NAME > $ROOTFS/etc/hostname
# We need the hostname to resolve for rabbit to launch
if ! grep -q $GUEST_NAME $ROOTFS/etc/hosts; then
echo "$GUEST_IP $GUEST_NAME" >> $ROOTFS/etc/hosts
fi
# Unmount
umount $ROOTFS || echo 'ok'
qemu-nbd -d $NBD
# Create the instance
cd $VM_DIR && virsh create libvirt.xml
# Tail the console log till we are done
WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
# Done creating the container, let's tail the log
echo
echo "============================================================="
echo " -- YAY! --"
echo "============================================================="
echo
echo "We're done launching the vm, about to start tailing the"
echo "stack.sh log. It will take a second or two to start."
echo
echo "Just CTRL-C at any time to stop tailing."
while [ ! -e "$VM_DIR/console.log" ]; do
sleep 1
done
tail -F $VM_DIR/console.log &
TAIL_PID=$!
function kill_tail() {
kill $TAIL_PID
exit 1
}
# Let Ctrl-c kill tail and exit
trap kill_tail SIGINT
echo "Waiting stack.sh to finish..."
while ! cat $VM_DIR/console.log | grep -q 'All done' ; do
sleep 5
done
kill $TAIL_PID
if grep -q "stack.sh failed" $VM_DIR/console.log; then
exit 1
fi
echo ""
echo "Finished - Zip-a-dee Doo-dah!"
fi

View File

@ -12,6 +12,27 @@ if [ ! "oneiric" = "$UBUNTU_VERSION" ]; then
fi fi
fi fi
# Clean up any resources that may be in use
cleanup() {
set +o errexit
unmount_images
if [ -n "$ROOTFS" ]; then
umount $ROOTFS/dev
umount $ROOTFS
fi
# Release NBD devices
if [ -n "$NBD" ]; then
qemu-nbd -d $NBD
fi
# Kill ourselves to signal any calling process
trap 2; kill -2 $$
}
trap cleanup SIGHUP SIGINT SIGTERM
# Echo commands # Echo commands
set -o xtrace set -o xtrace
@ -100,9 +121,6 @@ function kill_unmount() {
# Install deps if needed # Install deps if needed
dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx dpkg -l kvm libvirt-bin kpartx || apt-get install -y --force-yes kvm libvirt-bin kpartx
# Let Ctrl-c kill tail and exit
trap kill_unmount SIGINT
# Where Openstack code will live in image # Where Openstack code will live in image
DEST=${DEST:-/opt/stack} DEST=${DEST:-/opt/stack}
@ -127,8 +145,8 @@ function git_clone {
# Make sure that base requirements are installed # Make sure that base requirements are installed
cp /etc/resolv.conf $COPY_DIR/etc/resolv.conf cp /etc/resolv.conf $COPY_DIR/etc/resolv.conf
chroot $COPY_DIR apt-get update chroot $COPY_DIR apt-get update
chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"` chroot $COPY_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
chroot $COPY_DIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server chroot $COPY_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1`
chroot $COPY_DIR pip install `cat files/pips/*` chroot $COPY_DIR pip install `cat files/pips/*`
# Clean out code repos if directed to do so # Clean out code repos if directed to do so
@ -156,6 +174,7 @@ unmount_images
# Network configuration variables # Network configuration variables
GUEST_NETWORK=${GUEST_NETWORK:-1} GUEST_NETWORK=${GUEST_NETWORK:-1}
GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes}
GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50} GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50}
GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24} GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
@ -176,8 +195,10 @@ cat > $NET_XML <<EOF
</network> </network>
EOF EOF
virsh net-destroy devstack-$GUEST_NETWORK || true if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
virsh net-create $VM_DIR/net.xml virsh net-destroy devstack-$GUEST_NETWORK || true
virsh net-create $VM_DIR/net.xml
fi
# libvirt.xml configuration # libvirt.xml configuration
LIBVIRT_XML=$VM_DIR/libvirt.xml LIBVIRT_XML=$VM_DIR/libvirt.xml
@ -239,26 +260,35 @@ rm -f $VM_DIR/disk
# Create our instance fs # Create our instance fs
qemu-img create -f qcow2 -b $VM_IMAGE disk qemu-img create -f qcow2 -b $VM_IMAGE disk
# Finds the next available NBD device
# Exits script if error connecting or none free
# map_nbd image
# returns full nbd device path
function map_nbd {
for i in `seq 0 15`; do
if [ ! -e /sys/block/nbd$i/pid ]; then
NBD=/dev/nbd$i
# Connect to nbd and wait till it is ready
qemu-nbd -c $NBD $1
if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
echo "Couldn't connect $NBD"
exit 1
fi
break
fi
done
if [ -z "$NBD" ]; then
echo "No free NBD slots"
exit 1
fi
echo $NBD
}
# Make sure we have nbd-ness # Make sure we have nbd-ness
modprobe nbd max_part=63 modprobe nbd max_part=63
# Set up nbd # Set up nbd
for i in `seq 0 15`; do NBD=`map_nbd disk`
if [ ! -e /sys/block/nbd$i/pid ]; then
NBD=/dev/nbd$i
# Connect to nbd and wait till it is ready
qemu-nbd -c $NBD disk
if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
echo "Couldn't connect $NBD"
exit 1
fi
break
fi
done
if [ -z "$NBD" ]; then
echo "No free NBD slots"
exit 1
fi
NBD_DEV=`basename $NBD` NBD_DEV=`basename $NBD`
# Mount the instance # Mount the instance
@ -381,7 +411,9 @@ sed -e 's/^PasswordAuthentication.*$/PasswordAuthentication yes/' -i $ROOTFS/etc
# Unmount # Unmount
umount $ROOTFS || echo 'ok' umount $ROOTFS || echo 'ok'
ROOTFS=""
qemu-nbd -d $NBD qemu-nbd -d $NBD
NBD=""
# Create the instance # Create the instance
cd $VM_DIR && virsh create libvirt.xml cd $VM_DIR && virsh create libvirt.xml

View File

@ -125,8 +125,8 @@ fi
# Make sure that base requirements are installed # Make sure that base requirements are installed
chroot $CACHEDIR apt-get update chroot $CACHEDIR apt-get update
chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"` chroot $CACHEDIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
chroot $CACHEDIR apt-get install -y --download-only rabbitmq-server libvirt-bin mysql-server chroot $CACHEDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1`
chroot $CACHEDIR pip install `cat files/pips/*` chroot $CACHEDIR pip install `cat files/pips/*`
# Clean out code repos if directed to do so # Clean out code repos if directed to do so

View File

@ -32,8 +32,9 @@ fi
# prime natty with as many apt/pips as we can # prime natty with as many apt/pips as we can
if [ ! -d $CHROOTCACHE/natty-dev ]; then if [ ! -d $CHROOTCACHE/natty-dev ]; then
rsync -azH $CHROOTCACHE/natty-base/ $CHROOTCACHE/natty-dev/ rsync -azH $CHROOTCACHE/natty-base/ $CHROOTCACHE/natty-dev/
chroot $CHROOTCACHE/natty-dev apt-get install -y `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"` chroot $CHROOTCACHE apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
chroot $CHROOTCACHE/natty-dev pip install `cat files/pips/*` chroot $CHROOTCACHE apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1`
chroot $CHROOTCACHE pip install `cat files/pips/*`
# Create a stack user that is a member of the libvirtd group so that stack # Create a stack user that is a member of the libvirtd group so that stack
# is able to interact with libvirt. # is able to interact with libvirt.

View File

@ -11,6 +11,22 @@ PXEDIR=${PXEDIR:-/var/cache/devstack/pxe}
OPWD=`pwd` OPWD=`pwd`
PROGDIR=`dirname $0` PROGDIR=`dirname $0`
# Clean up any resources that may be in use
cleanup() {
set +o errexit
# Mop up temporary files
if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then
umount $MNTDIR
rmdir $MNTDIR
fi
# Kill ourselves to signal any calling process
trap 2; kill -2 $$
}
trap cleanup SIGHUP SIGINT SIGTERM
mkdir -p $DEST_DIR/pxelinux.cfg mkdir -p $DEST_DIR/pxelinux.cfg
cd $DEST_DIR cd $DEST_DIR
for i in memdisk menu.c32 pxelinux.0; do for i in memdisk menu.c32 pxelinux.0; do

View File

@ -1,60 +1,120 @@
#!/bin/bash #!/bin/bash
# build_ramdisk.sh - Build RAM disk images # build_ramdisk.sh - Build RAM disk images
# exit on error to stop unexpected errors
set -o errexit
if [ ! "$#" -eq "1" ]; then if [ ! "$#" -eq "1" ]; then
echo "$0 builds a gziped natty openstack install" echo "$0 builds a gziped Ubuntu OpenStack install"
echo "usage: $0 dest" echo "usage: $0 dest"
exit 1 exit 1
fi fi
# Clean up any resources that may be in use
cleanup() {
set +o errexit
# Mop up temporary files
if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then
umount $MNTDIR
rmdir $MNTDIR
fi
if [ -n "$DEV_FILE_TMP" -a -e "$DEV_FILE_TMP "]; then
rm -f $DEV_FILE_TMP
fi
if [ -n "$IMG_FILE_TMP" -a -e "$IMG_FILE_TMP" ]; then
rm -f $IMG_FILE_TMP
fi
# Release NBD devices
if [ -n "$NBD" ]; then
qemu-nbd -d $NBD
fi
# Kill ourselves to signal any calling process
trap 2; kill -2 $$
}
trap cleanup SIGHUP SIGINT SIGTERM
# Set up nbd
modprobe nbd max_part=63
# Echo commands
set -o xtrace
IMG_FILE=$1 IMG_FILE=$1
PROGDIR=`dirname $0` # Keep track of the current directory
CHROOTCACHE=${CHROOTCACHE:-/var/cache/devstack} TOOLS_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
# Source params
source ./stackrc
# Store cwd # Store cwd
CWD=`pwd` CWD=`pwd`
cd $TOP_DIR
# Source params
source ./stackrc
CACHEDIR=${CACHEDIR:-/var/cache/devstack}
DEST=${DEST:-/opt/stack} DEST=${DEST:-/opt/stack}
# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
# Base image (natty by default)
DIST_NAME=${DIST_NAME:-natty}
# Param string to pass to stack.sh. Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova" # Param string to pass to stack.sh. Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova"
STACKSH_PARAMS=${STACKSH_PARAMS:-} STACKSH_PARAMS=${STACKSH_PARAMS:-}
# Option to use the version of devstack on which we are currently working # Option to use the version of devstack on which we are currently working
USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1} USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1}
# Set up nbd # clean install
modprobe nbd max_part=63 if [ ! -r $CACHEDIR/$DIST_NAME-base.img ]; then
NBD=${NBD:-/dev/nbd9} $TOOLS_DIR/get_uec_image.sh $DIST_NAME $CACHEDIR/$DIST_NAME-base.img
NBD_DEV=`basename $NBD`
# clean install of natty
if [ ! -r $CHROOTCACHE/natty-base.img ]; then
$PROGDIR/get_uec_image.sh natty $CHROOTCACHE/natty-base.img
# # copy kernel modules...
# # NOTE(ja): is there a better way to do this?
# cp -pr /lib/modules/`uname -r` $CHROOTCACHE/natty-base/lib/modules
# # a simple password - pass
# echo root:pass | chroot $CHROOTCACHE/natty-base chpasswd
fi fi
# prime natty with as many apt/pips as we can # Finds the next available NBD device
if [ ! -r $CHROOTCACHE/natty-dev.img ]; then # Exits script if error connecting or none free
cp -p $CHROOTCACHE/natty-base.img $CHROOTCACHE/natty-dev.img # map_nbd image
# returns full nbd device path
qemu-nbd -c $NBD $CHROOTCACHE/natty-dev.img function map_nbd {
if ! timeout 60 sh -c "while ! [ -e /sys/block/$NBD_DEV/pid ]; do sleep 1; done"; then for i in `seq 0 15`; do
echo "Couldn't connect $NBD" if [ ! -e /sys/block/nbd$i/pid ]; then
NBD=/dev/nbd$i
# Connect to nbd and wait till it is ready
qemu-nbd -c $NBD $1
if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
echo "Couldn't connect $NBD"
exit 1
fi
break
fi
done
if [ -z "$NBD" ]; then
echo "No free NBD slots"
exit 1 exit 1
fi fi
echo $NBD
}
# prime image with as many apt/pips as we can
DEV_FILE=$CACHEDIR/$DIST_NAME-dev.img
DEV_FILE_TMP=`mktemp $DEV_FILE.XXXXXX`
if [ ! -r $DEV_FILE ]; then
cp -p $CACHEDIR/$DIST_NAME-base.img $DEV_FILE_TMP
NBD=`map_nbd $DEV_FILE_TMP`
MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX` MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX`
mount -t ext4 ${NBD}p1 $MNTDIR mount -t ext4 ${NBD}p1 $MNTDIR
cp -p /etc/resolv.conf $MNTDIR/etc/resolv.conf cp -p /etc/resolv.conf $MNTDIR/etc/resolv.conf
chroot $MNTDIR apt-get install -y `cat files/apts/* | cut -d\# -f1 | egrep -v "(rabbitmq|libvirt-bin|mysql-server)"` chroot $MNTDIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
chroot $MNTDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1`
chroot $MNTDIR pip install `cat files/pips/*` chroot $MNTDIR pip install `cat files/pips/*`
# Create a stack user that is a member of the libvirtd group so that stack # Create a stack user that is a member of the libvirtd group so that stack
@ -66,6 +126,7 @@ if [ ! -r $CHROOTCACHE/natty-dev.img ]; then
# a simple password - pass # a simple password - pass
echo stack:pass | chroot $MNTDIR chpasswd echo stack:pass | chroot $MNTDIR chpasswd
echo root:$ROOT_PASSWORD | chroot $MNTDIR chpasswd
# and has sudo ability (in the future this should be limited to only what # and has sudo ability (in the future this should be limited to only what
# stack requires) # stack requires)
@ -74,27 +135,31 @@ if [ ! -r $CHROOTCACHE/natty-dev.img ]; then
umount $MNTDIR umount $MNTDIR
rmdir $MNTDIR rmdir $MNTDIR
qemu-nbd -d $NBD qemu-nbd -d $NBD
NBD=""
mv $DEV_FILE_TMP $DEV_FILE
fi fi
rm -f $DEV_FILE_TMP
# clone git repositories onto the system # clone git repositories onto the system
# ====================================== # ======================================
IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX`
if [ ! -r $IMG_FILE ]; then if [ ! -r $IMG_FILE ]; then
qemu-nbd -c $NBD $CHROOTCACHE/natty-dev.img NBD=`map_nbd $DEV_FILE`
if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
echo "Couldn't connect $NBD"
exit 1
fi
# Pre-create the image file # Pre-create the image file
# FIXME(dt): This should really get the partition size to # FIXME(dt): This should really get the partition size to
# pre-create the image file # pre-create the image file
dd if=/dev/zero of=$IMG_FILE bs=1 count=1 seek=$((2*1024*1024*1024)) dd if=/dev/zero of=$IMG_FILE_TMP bs=1 count=1 seek=$((2*1024*1024*1024))
# Create filesystem image for RAM disk # Create filesystem image for RAM disk
dd if=${NBD}p1 of=$IMG_FILE bs=1M dd if=${NBD}p1 of=$IMG_FILE_TMP bs=1M
qemu-nbd -d $NBD qemu-nbd -d $NBD
NBD=""
mv $IMG_FILE_TMP $IMG_FILE
fi fi
rm -f $IMG_FILE_TMP
MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX` MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX`
mount -t ext4 -o loop $IMG_FILE $MNTDIR mount -t ext4 -o loop $IMG_FILE $MNTDIR

248
tools/build_uec.sh Executable file
View File

@ -0,0 +1,248 @@
#!/usr/bin/env bash
# Make sure that we have the proper version of ubuntu (only works on oneiric)
if ! egrep -q "oneiric" /etc/lsb-release; then
echo "This script only works with ubuntu oneiric."
exit 1
fi
# Keep track of the current directory
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
cd $TOP_DIR
# Source params
source ./stackrc
# Ubuntu distro to install
DIST_NAME=${DIST_NAME:-oneiric}
# Configure how large the VM should be
GUEST_SIZE=${GUEST_SIZE:-10G}
# exit on error to stop unexpected errors
set -o errexit
set -o xtrace
# Abort if localrc is not set
if [ ! -e $TOP_DIR/localrc ]; then
echo "You must have a localrc with ALL necessary passwords defined before proceeding."
echo "See stack.sh for required passwords."
exit 1
fi
# Install deps if needed
DEPS="kvm libvirt-bin kpartx cloud-utils"
dpkg -l $DEPS || apt-get install -y --force-yes $DEPS
# Where to store files and instances
WORK_DIR=${WORK_DIR:-/opt/kvmstack}
# Where to store images
image_dir=$WORK_DIR/images/$DIST_NAME
mkdir -p $image_dir
# Original version of built image
uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz
tarball=$image_dir/$(basename $uec_url)
# download the base uec image if we haven't already
if [ ! -f $tarball ]; then
curl $uec_url -o $tarball
(cd $image_dir && tar -Sxvzf $tarball)
resize-part-image $image_dir/*.img $GUEST_SIZE $image_dir/disk
cp $image_dir/*-vmlinuz-virtual $image_dir/kernel
fi
# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
# Name of our instance, used by libvirt
GUEST_NAME=${GUEST_NAME:-devstack}
# Mop up after previous runs
virsh destroy $GUEST_NAME || true
# Where this vm is stored
vm_dir=$WORK_DIR/instances/$GUEST_NAME
# Create vm dir and remove old disk
mkdir -p $vm_dir
rm -f $vm_dir/disk
# Create a copy of the base image
qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk
# Back to devstack
cd $TOP_DIR
GUEST_NETWORK=${GUEST_NETWORK:-1}
GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes}
GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50}
GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1}
GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"}
GUEST_RAM=${GUEST_RAM:-1524288}
GUEST_CORES=${GUEST_CORES:-1}
# libvirt.xml configuration
NET_XML=$vm_dir/net.xml
cat > $NET_XML <<EOF
<network>
<name>devstack-$GUEST_NETWORK</name>
<bridge name="stackbr%d" />
<forward/>
<ip address="$GUEST_GATEWAY" netmask="$GUEST_NETMASK">
<dhcp>
<range start='192.168.$GUEST_NETWORK.2' end='192.168.$GUEST_NETWORK.127' />
</dhcp>
</ip>
</network>
EOF
if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
virsh net-destroy devstack-$GUEST_NETWORK || true
# destroying the network isn't enough to delete the leases
rm -f /var/lib/libvirt/dnsmasq/devstack-$GUEST_NETWORK.leases
virsh net-create $vm_dir/net.xml
fi
# libvirt.xml configuration
LIBVIRT_XML=$vm_dir/libvirt.xml
cat > $LIBVIRT_XML <<EOF
<domain type='kvm'>
<name>$GUEST_NAME</name>
<memory>$GUEST_RAM</memory>
<os>
<type>hvm</type>
<kernel>$image_dir/kernel</kernel>
<cmdline>root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu</cmdline>
</os>
<features>
<acpi/>
</features>
<clock offset='utc'/>
<vcpu>$GUEST_CORES</vcpu>
<devices>
<disk type='file'>
<driver type='qcow2'/>
<source file='$vm_dir/disk'/>
<target dev='vda' bus='virtio'/>
</disk>
<interface type='network'>
<source network='devstack-$GUEST_NETWORK'/>
</interface>
<!-- The order is significant here. File must be defined first -->
<serial type="file">
<source path='$vm_dir/console.log'/>
<target port='1'/>
</serial>
<console type='pty' tty='/dev/pts/2'>
<source path='/dev/pts/2'/>
<target port='0'/>
</console>
<serial type='pty'>
<source path='/dev/pts/2'/>
<target port='0'/>
</serial>
<graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
</devices>
</domain>
EOF
rm -rf $vm_dir/uec
cp -r $TOOLS_DIR/uec $vm_dir/uec
# set metadata
cat > $vm_dir/uec/meta-data<<EOF
hostname: $GUEST_NAME
instance-id: i-hop
instance-type: m1.ignore
local-hostname: $GUEST_NAME.local
EOF
# set metadata
cat > $vm_dir/uec/user-data<<EOF
#!/bin/bash
# hostname needs to resolve for rabbit
sed -i "s/127.0.0.1/127.0.0.1 \`hostname\`/" /etc/hosts
apt-get update
apt-get install git sudo -y
git clone https://github.com/cloudbuilders/devstack.git
cd devstack
git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'`
git fetch
git checkout `git rev-parse HEAD`
cat > localrc <<LOCAL_EOF
ROOTSLEEP=0
`cat $TOP_DIR/localrc`
LOCAL_EOF
./stack.sh
EOF
# (re)start a metadata service
(
pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1`
[ -z "$pid" ] || kill -9 $pid
)
cd $vm_dir/uec
python meta.py 192.168.$GUEST_NETWORK.1:4567 &
# Create the instance
virsh create $vm_dir/libvirt.xml
# Tail the console log till we are done
WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
set +o xtrace
# Done creating the container, let's tail the log
echo
echo "============================================================="
echo " -- YAY! --"
echo "============================================================="
echo
echo "We're done launching the vm, about to start tailing the"
echo "stack.sh log. It will take a second or two to start."
echo
echo "Just CTRL-C at any time to stop tailing."
while [ ! -e "$vm_dir/console.log" ]; do
sleep 1
done
tail -F $vm_dir/console.log &
TAIL_PID=$!
function kill_tail() {
kill $TAIL_PID
exit 1
}
# Let Ctrl-c kill tail and exit
trap kill_tail SIGINT
echo "Waiting stack.sh to finish..."
while ! egrep -q '^stack.sh (completed|failed)' $vm_dir/console.log ; do
sleep 1
done
set -o xtrace
kill $TAIL_PID
if ! grep -q "^stack.sh completed in" $vm_dir/console.log; then
exit 1
fi
echo ""
echo "Finished - Zip-a-dee Doo-dah!"
fi

View File

@ -11,6 +11,26 @@ PXEDIR=${PXEDIR:-/var/cache/devstack/pxe}
OPWD=`pwd` OPWD=`pwd`
PROGDIR=`dirname $0` PROGDIR=`dirname $0`
# Clean up any resources that may be in use
cleanup() {
set +o errexit
# Mop up temporary files
if [ -n "$DEST_DEV" ]; then
umount $DEST_DIR
rmdir $DEST_DIR
fi
if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then
umount $MNTDIR
rmdir $MNTDIR
fi
# Kill ourselves to signal any calling process
trap 2; kill -2 $$
}
trap cleanup SIGHUP SIGINT SIGTERM
if [ -b $DEST_DIR ]; then if [ -b $DEST_DIR ]; then
# We have a block device, install syslinux and mount it # We have a block device, install syslinux and mount it
DEST_DEV=$DEST_DIR DEST_DEV=$DEST_DIR

View File

@ -14,6 +14,9 @@ MIN_PKGS=${MIN_PKGS:-"apt-utils gpgv openssh-server"}
TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOOLS_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=`cd $TOOLS_DIR/..; pwd` TOP_DIR=`cd $TOOLS_DIR/..; pwd`
# exit on error to stop unexpected errors
set -o errexit
usage() { usage() {
echo "Usage: $0 - Prepare Ubuntu images" echo "Usage: $0 - Prepare Ubuntu images"
echo "" echo ""
@ -26,6 +29,32 @@ usage() {
exit 1 exit 1
} }
# Clean up any resources that may be in use
cleanup() {
set +o errexit
# Mop up temporary files
if [ -n "$IMG_FILE_TMP" -a -e "$IMG_FILE_TMP" ]; then
rm -f $IMG_FILE_TMP
fi
# Release NBD devices
if [ -n "$NBD" ]; then
qemu-nbd -d $NBD
fi
# Kill ourselves to signal any calling process
trap 2; kill -2 $$
}
# apt-get wrapper to just get arguments set correctly
function apt_get() {
local sudo="sudo"
[ "$(id -u)" = "0" ] && sudo="env"
$sudo DEBIAN_FRONTEND=noninteractive apt-get \
--option "Dpkg::Options::=--force-confold" --assume-yes "$@"
}
while getopts f:hmr: c; do while getopts f:hmr: c; do
case $c in case $c in
f) FORMAT=$OPTARG f) FORMAT=$OPTARG
@ -89,11 +118,21 @@ case $DIST_NAME in
;; ;;
esac esac
trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT
# Check for dependencies
if [ ! -x "`which qemu-img`" -o ! -x "`which qemu-nbd`" ]; then
# Missing KVM?
apt_get install qemu-kvm
fi
# Prepare the base image # Prepare the base image
# Get the UEC image # Get the UEC image
UEC_NAME=$DIST_NAME-server-cloudimg-amd64 UEC_NAME=$DIST_NAME-server-cloudimg-amd64
if [ ! -e $CACHEDIR/$UEC_NAME-disk1.img ]; then if [ ! -e $CACHEDIR/$UEC_NAME-disk1.img ]; then
mkdir -p $CACHEDIR
(cd $CACHEDIR && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME-disk1.img) (cd $CACHEDIR && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME-disk1.img)
fi fi
@ -111,25 +150,33 @@ if [ $ROOTSIZE -gt 2000 ]; then
qemu-img resize $IMG_FILE_TMP +$((ROOTSIZE - 2000))M qemu-img resize $IMG_FILE_TMP +$((ROOTSIZE - 2000))M
fi fi
# Finds the next available NBD device
# Exits script if error connecting or none free
# map_nbd image
# returns full nbd device path
function map_nbd {
for i in `seq 0 15`; do
if [ ! -e /sys/block/nbd$i/pid ]; then
NBD=/dev/nbd$i
# Connect to nbd and wait till it is ready
qemu-nbd -c $NBD $1
if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
echo "Couldn't connect $NBD"
exit 1
fi
break
fi
done
if [ -z "$NBD" ]; then
echo "No free NBD slots"
exit 1
fi
echo $NBD
}
# Set up nbd # Set up nbd
modprobe nbd max_part=63 modprobe nbd max_part=63
for i in `seq 1 15`; do NBD=`map_nbd $IMG_FILE_TMP`
if [ ! -e /sys/block/nbd$i/pid ]; then
NBD=/dev/nbd$i
# Connect to nbd and wait till it is ready
qemu-nbd -c $NBD $IMG_FILE_TMP
if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
echo "Couldn't connect $NBD"
exit 1
fi
break
fi
done
if [ -z "$NBD" ]; then
echo "No free NBD slots"
exit 1
fi
NBD_DEV=`basename $NBD`
# Resize partition 1 to full size of the disk image # Resize partition 1 to full size of the disk image
echo "d echo "d
@ -162,5 +209,6 @@ rm -f $MNTDIR/etc/resolv.conf
umount $MNTDIR umount $MNTDIR
rmdir $MNTDIR rmdir $MNTDIR
qemu-nbd -d $NBD qemu-nbd -d $NBD
NBD=""
mv $IMG_FILE_TMP $IMG_FILE mv $IMG_FILE_TMP $IMG_FILE

74
tools/setup_stack_user.sh Executable file
View File

@ -0,0 +1,74 @@
#!/usr/bin/env bash
# Echo commands
set -o xtrace
# Exit on error to stop unexpected errors
set -o errexit
# Keep track of the current directory
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
# Change dir to top of devstack
cd $TOP_DIR
# Echo usage
usage() {
echo "Add stack user and keys"
echo ""
echo "Usage: $0 [full path to raw uec base image]"
}
# Make sure this is a raw image
if ! qemu-img info $1 | grep -q "file format: raw"; then
usage
exit 1
fi
# Mount the image
DEST=/opt/stack
STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage.user
mkdir -p $STAGING_DIR
umount $STAGING_DIR || true
sleep 1
mount -t ext4 -o loop $1 $STAGING_DIR
mkdir -p $STAGING_DIR/$DEST
# Create a stack user that is a member of the libvirtd group so that stack
# is able to interact with libvirt.
chroot $STAGING_DIR groupadd libvirtd || true
chroot $STAGING_DIR useradd stack -s /bin/bash -d $DEST -G libvirtd || true
# Add a simple password - pass
echo stack:pass | chroot $STAGING_DIR chpasswd
# Configure sudo
grep -q "^#includedir.*/etc/sudoers.d" $STAGING_DIR/etc/sudoers ||
echo "#includedir /etc/sudoers.d" | sudo tee -a $STAGING_DIR/etc/sudoers
cp $TOP_DIR/files/sudo/* $STAGING_DIR/etc/sudoers.d/
sed -e "s,%USER%,$USER,g" -i $STAGING_DIR/etc/sudoers.d/*
# and has sudo ability (in the future this should be limited to only what
# stack requires)
echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers
# Gracefully cp only if source file/dir exists
function cp_it {
if [ -e $1 ] || [ -d $1 ]; then
cp -pRL $1 $2
fi
}
# Copy over your ssh keys and env if desired
cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh
cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys
cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig
cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc
cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc
# Give stack ownership over $DEST so it may do the work needed
chroot $STAGING_DIR chown -R stack $DEST
# Unmount
umount $STAGING_DIR

29
tools/uec/meta.py Normal file
View File

@ -0,0 +1,29 @@
import sys
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SimpleHTTPServer import SimpleHTTPRequestHandler
def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""simple http server that listens on a give address:port"""
server_address = (host, port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
if sys.argv[1:]:
address = sys.argv[1]
else:
address = '0.0.0.0'
if ':' in address:
host, port = address.split(':')
else:
host = address
port = 8080
main(host, int(port))

53
tools/warm_apts_and_pips.sh Executable file
View File

@ -0,0 +1,53 @@
#!/usr/bin/env bash
# Echo commands
set -o xtrace
# Exit on error to stop unexpected errors
set -o errexit
# Keep track of the current directory
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=`cd $TOOLS_DIR/..; pwd`
# Change dir to top of devstack
cd $TOP_DIR
# Echo usage
usage() {
echo "Cache OpenStack dependencies on a uec image to speed up performance."
echo ""
echo "Usage: $0 [full path to raw uec base image]"
}
# Make sure this is a raw image
if ! qemu-img info $1 | grep -q "file format: raw"; then
usage
exit 1
fi
# Make sure we are in the correct dir
if [ ! -d files/apts ]; then
echo "Please run this script from devstack/tools/"
exit 1
fi
# Mount the image
STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage
mkdir -p $STAGING_DIR
umount $STAGING_DIR || true
sleep 1
mount -t ext4 -o loop $1 $STAGING_DIR
# Make sure that base requirements are installed
cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf
# Perform caching on the base image to speed up subsequent runs
chroot $STAGING_DIR apt-get update
chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true
mkdir -p $STAGING_DIR/var/cache/pip
PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true
# Unmount
umount $STAGING_DIR

View File

@ -7,6 +7,12 @@ if [ ! -e ../../localrc ]; then
exit 1 exit 1
fi fi
# This directory
TOP_DIR=$(cd $(dirname "$0") && pwd)
# Source params
cd ../.. && source ./stackrc && cd $TOP_DIR
# Echo commands # Echo commands
set -o xtrace set -o xtrace
@ -41,9 +47,6 @@ GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}
# Size of image # Size of image
VDI_MB=${VDI_MB:-2500} VDI_MB=${VDI_MB:-2500}
# This directory
TOP_DIR=$(cd $(dirname "$0") && pwd)
# Make sure we have git # Make sure we have git
if ! which git; then if ! which git; then
GITDIR=/tmp/git-1.7.7 GITDIR=/tmp/git-1.7.7
@ -223,15 +226,21 @@ mkdir -p /boot/guest
SR_UUID=`xe sr-list --minimal name-label="Local storage"` SR_UUID=`xe sr-list --minimal name-label="Local storage"`
xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage
# Uninstall previous runs
xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh
# Destroy any instances that were launched # Shutdown previous runs
for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do DO_SHUTDOWN=${DO_SHUTDOWN:-1}
echo "Shutting down nova instance $uuid" if [ "$DO_SHUTDOWN" = "1" ]; then
xe vm-shutdown uuid=$uuid # Shutdown all domU's that created previously
xe vm-destroy uuid=$uuid xe vm-list --minimal name-label="$LABEL" | xargs ./scripts/uninstall-os-vpx.sh
done
# Destroy any instances that were launched
for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do
echo "Shutting down nova instance $uuid"
xe vm-unpause uuid=$uuid || true
xe vm-shutdown uuid=$uuid
xe vm-destroy uuid=$uuid
done
fi
# Path to head xva. By default keep overwriting the same one to save space # Path to head xva. By default keep overwriting the same one to save space
USE_SEPARATE_XVAS=${USE_SEPARATE_XVAS:-0} USE_SEPARATE_XVAS=${USE_SEPARATE_XVAS:-0}

35
tools/xen/build_domU_multi.sh Executable file
View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
# Echo commands
set -o xtrace
# Head node host, which runs glance, api, keystone
HEAD_PUB_IP=${HEAD_PUB_IP:-192.168.1.57}
HEAD_MGT_IP=${HEAD_MGT_IP:-172.16.100.57}
COMPUTE_PUB_IP=${COMPUTE_PUB_IP:-192.168.1.58}
COMPUTE_MGT_IP=${COMPUTE_MGT_IP:-172.16.100.58}
# Networking params
FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30}
# Variables common amongst all hosts in the cluster
COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE"
# Helper to launch containers
function build_domU {
GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_domU.sh
}
# Launch the head node - headnode uses a non-ip domain name,
# because rabbit won't launch with an ip addr hostname :(
build_domU HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit"
# Wait till the head node is up
while ! curl -L http://$HEAD_PUB_IP | grep -q username; do
echo "Waiting for head node ($HEAD_PUB_IP) to start..."
sleep 5
done
# Build the HA compute host
build_domU $COMPUTE_PUB_IP $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api"