anvil/conf/stack.ini
2012-03-05 10:54:06 -08:00

370 lines
13 KiB
INI
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# Devstack2 local configuration
# When a value looks like a bash variable + default then it is parsed like a bash
# variable and will perform similar lookups. Ie ${SQL_HOST:-localhost} will
# look in environment variable SQL_HOST and if that does not exist then
# localhost will be used instead.
#
# We also allow for simple referencing of other variables, similar to bash
# variables to occur when a keys value like the following format are found:
#
# web_host = ${RUNNING_HOST:-http://$(X:Y)}
#
# For this example, the RUNNING_HOST enviroment variable will be referenced.
# If it is not found (no value exists), then "http://$(X:Y)" will be
# examined and found to be contain a expression (denoted by "$(X:Y)").
#
# Then in that expression there are components of the format "X:Y" which the
# configuration class will attempt to resolve those values by looking up in the
# configuration file for a value in section "X" with option "Y" and replacing the
# retrieved value for what was previously "$(X:Y)". Multiple of these "expressions"
# are allowed and each will have its expression "text" replaced with the resolved
# value before the final value for the original variable is determined.
#
# For this example if the section X with option Y contained value "1.2.3.4" then
# the final string would be "http://1.2.3.4" which would then be cached as the value
# for option web_host.
[default]
# Where is rabbit located?
rabbit_host = ${RABBIT_HOST:-$(host:ip)}
# Sys log enabled or not
syslog = 0
# Which run type to use [fork (the default), upstart, screen]
run_type = fork
[upstart]
# These flags are used for starting components under upstart (if default/run_type is upstart)
respawn = 1
# Note that we support component start/stop and "all"
# Start and stop, this is the "all" event name
start_event = all_os_start
stop_event = all_os_stop
[host]
# Set api host endpoint
# If this is empty in code we will try to determine your network ip.
ip = ${HOST_IP:-}
[db]
# Where you db is located at and how to access it.
sql_host = ${SQL_HOST:-localhost}
sql_user = ${SQL_USER:-root}
port = ${SQL_PORT:-3306}
# What type of database is this?
type = ${SQL_TYPE:-mysql}
[keystone]
# Where is the keystone auth host at?
keystone_auth_host = ${KEYSTONE_AUTH_HOST:-$(host:ip)}
keystone_auth_port = ${KEYSTONE_AUTH_PORT:-35357}
keystone_auth_protocol = ${KEYSTONE_AUTH_PROTOCOL:-http}
# Where is the keystone service host at?
keystone_service_host = ${KEYSTONE_SERVICE_HOST:-$(host:ip)}
keystone_service_port = ${KEYSTONE_SERVICE_PORT:-5000}
keystone_service_protocol = ${KEYSTONE_SERVICE_PROTOCOL:-http}
# User names (used in init script)
invisible_user = invisible_to_admin
demo_user = ${KEYSTONE_DEMO_USER:-demo}
admin_user = ${KEYSTONE_ADMIN_USER:-admin}
# The above user names are also the tenant names.
#
# Nova original used project_id as the *account* that owned resources (servers,
# ip address, ...) With the addition of Keystone we have standardized on the
# term **tenant** as the entity that owns the resources.
[nova]
# Should nova be in verbose mode?
verbose = ${NOVA_VERBOSE:-1}
logdir = ${NOVA_LOGDIR:-/var/log/nova}
# Allow the admin api to be accessible?
allow_admin_api = 1
# Currently novaclient needs you to specify the *compute api* version.
nova_version = ${NOVA_VERSION:-1.1}
# Which scheduler will nova be running with?
# SimpleScheduler should work in most cases unless you are working on multi-zone mode.
scheduler = ${NOVA_SCHEDULER:-nova.scheduler.simple.SimpleScheduler}
# Network settings
# Very useful to read over:
# http://docs.openstack.org/cactus/openstack-compute/admin/content/configuring-networking-on-the-compute-node.html
fixed_range = ${NOVA_FIXED_RANGE:-10.0.0.0/24}
fixed_network_size = ${NOVA_FIXED_NETWORK_SIZE:-256}
network_manager = ${NET_MAN:-FlatDHCPManager}
public_interface = ${PUBLIC_INTERFACE:-eth0}
# DHCP Warning: If your flat interface device uses DHCP, there will be a hiccup while the network
# is moved from the flat interface to the flat network bridge. This will happen when you launch
# your first instance. Upon launch you will lose all connectivity to the node, and the vm launch will probably fail.
#
# If you are running on a single node and don't need to access the VMs from devices other than
# that node, you can set the flat interface to the same value as FLAT_NETWORK_BRIDGE. This will stop the network hiccup from occurring.
flat_interface = ${FLAT_INTERFACE:-eth0}
vlan_interface = ${VLAN_INTERFACE:-$(nova:public_interface)}
flat_network_bridge = ${FLAT_NETWORK_BRIDGE:-br100}
# Test floating pool and range are used for testing.
# They are defined here until the admin APIs can replace nova-manage
floating_range = ${FLOATING_RANGE:-172.24.4.224/28}
test_floating_pool = ${TEST_FLOATING_POOL:-test}
test_floating_range = ${TEST_FLOATING_RANGE:-192.168.253.0/29}
# TODO document these
vncproxy_url = ${VNCPROXY_URL:-http://$(host:ip):6080/vnc_auto.html}
xvpvncproxy_url = ${XVPVNCPROXY_URL:-http://$(host:ip):6081/console}
vncserver_listen = ${VNCSERVER_LISTEN:-127.0.0.1}
vncserver_proxyclient_address = ${VNCSERVER_PROXYCLIENT_ADDRESS:-}
ec2_dmz_host = ${EC2_DMZ_HOST:-$(host:ip)}
libvirt_firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver
# Volume settings
volume_group = ${VOLUME_GROUP:-nova-volumes}
volume_backing_file = ${VOLUME_BACKING_FILE:-}
volume_backing_file_size =${VOLUME_BACKING_FILE_SIZE:-2052M}
volume_name_prefix = ${VOLUME_NAME_PREFIX:-volume-}
volume_name_postfix = ${VOLUME_NAME_POSTFIX:-%08x}
# How instances will be named
instance_name_prefix = ${INSTANCE_NAME_PREFIX:-instance-}
instance_name_postfix = ${INSTANCE_NAME_POSTFIX:-%08x}
# Where instances will be stored
instances_path = ${INSTANCES_PATH:-}
# Are we setup in multihost mode?
# Multi-host is a mode where each compute node runs its own network node.
# This allows network operations and routing for a VM to occur on the server that is running the VM - removing a SPOF and bandwidth bottleneck.
multi_host = ${MULTI_HOST:-0}
# Virtualization settings
# Drivers known (libvirt, xensever, vmware, baremetal)
# Defaults to libvirt (the most compatible) if unknown.
virt_driver = ${VIRT_DRIVER:-libvirt}
# Only useful if above libvirt_type is "libvirt"
# Types known (qemu, kvm, xen, uml, lxc)
# Defaults to qemu (the most compatible) if unknown (or blank).
libvirt_type = ${LIBVIRT_TYPE:-}
# What type of image service will be used?
img_service = ${IMG_SERVICE:-nova.image.glance.GlanceImageService}
glance_server = ${GLANCE_SERVER:-$(host:ip):9292}
# Used however you want - ensure you know nova's conf file format if you use this!
extra_flags = ${NOVA_EXTRA_FLAGS:-}
# Xen server/api settings
xa_connection_url = http://169.254.0.1:80/
xa_connection_username = root
xs_firewall_driver = nova.virt.firewall.IptablesFirewallDriver
xs_flat_interface = eth1
xs_flat_network_bridge = xapi1
[extern]
# Set the ec2 url so euca2ools works
# Typically like http://localhost:8773/services/Cloud
# If blank we will generate this.
ec2_url = ${EC2_URL:-http://$(host:ip):8773/services/Cloud}
# Set the s3 url so euca2ools works
# Typically like http://localhost:3333/services/Cloud
# If blank we will generate this.
s3_url = ${S3_URL:-http://$(host:ip):3333/services/Cloud}
# Not used (currently)??
ec2_user_id = 42
ec2_cert_fn = ~/cert.pm
# Not used (currently)??
nova_cert_fn = ~/cert.pm
[git]
# Compute service git repo
nova_repo = git://github.com/openstack/nova.git
nova_branch = master
# Storage service git repo
swift_repo = git://github.com/openstack/swift.git
swift_branch = master
# Image catalog service git repo
glance_repo = git://github.com/openstack/glance.git
glance_branch = master
# Unified auth system (manages accounts/tokens) git repo
keystone_repo = git://github.com/openstack/keystone.git
keystone_branch = master
# A websockets/html5 or flash powered VNC console for vm instances
novnc_repo = git://github.com/cloudbuilders/noVNC.git
novnc_branch = master
# Django powered web control panel for openstack
horizon_repo = git://github.com/openstack/horizon.git
horizon_branch = master
# Python keystone client library to nova that horizon uses
keystoneclient_repo = git://github.com/openstack/python-keystoneclient.git
keystoneclient_branch = master
# Python client library to nova that horizon (and others) use
novaclient_repo = git://github.com/openstack/python-novaclient.git
novaclient_branch = master
# Quantum service git repo
quantum_repo = git://github.com/openstack/quantum.git
quantum_branch = master
# Quantum client git repo
quantum_client_repo = git://github.com/openstack/python-quantumclient.git
quantum_client_branch = master
# Melange service
melange_repo = git://github.com/openstack/melange.git
melange_branch = master
# Python melange client library
melangeclient_repo = git://github.com/openstack/python-melangeclient.git
melangeclient_branch = master
[melange]
# Default Melange Port
m_port = ${M_PORT:-9898}
# Default Melange Host
m_host = ${M_HOST:-$(host:ip)}
# Melange MAC Address Range
m_mac_range = ${M_MAC_RANGE:-404040/24}
[quantum]
# Where your quantum host is at
q_host = ${Q_HOST:-$(host:ip)}
# Which port your quantum host is at
q_port = ${Q_PORT:-9696}
# Which type of quantum plugin you will be using
q_plugin = ${Q_PLUGIN:-openvswitch}
# Default OVS bridge name
ovs_bridge = br-int
# OVS bridge external name
ovs_bridge_external_name = br-int
[horizon]
# What user will apache be serving from.
#
# Root will typically not work (for apache on most distros)
# sudo adduser <username> then sudo adduser <username> admin will be what you want to set this up (in ubuntu)
# I typically use user "horizon" for ubuntu and the runtime user (who will have sudo access) for RHEL.
# If blank the currently executing user will be used.
apache_user = ${APACHE_USER:-}
# This is the group of the previous user (adjust as needed)
apache_group = ${APACHE_GROUP:-$(horizon:apache_user)}
# Port horizon should run on
port = ${HORIZON_PORT:-80}
[swift]
# By default the location of swift drives and objects is located inside the swift source directory.
# SWIFT_DATA_LOCATION variable allow you to redefine this.
data_location = ${SWIFT_DATA_LOCATION:-data}
# TBD
swift_user = ${SWIFT_USER:-root}
# TBD
swift_group = ${SWIFT_GROUP:-$(swift:swift_user)}
# We will create a loop-back disk formatted as XFS to store the swift data.
# By default the disk size is 1 gigabyte. The variable SWIFT_LOOPBACK_DISK_SIZE
# specified in bytes allow you to change that.
loopback_disk_size = ${SWIFT_LOOPBACK_DISK_SIZE:-1G}
# The ring uses a configurable number of bits from a paths MD5 hash as a
# partition index that designates a device. The number of bits kept from the
# hash is known as the partition power, and 2 to the partition power indicates
# the partition count. Partitioning the full MD5 hash ring allows other parts
# of the cluster to work in batches of items at once which ends up either more
# efficient or at least less complex than working with each item separately or
# the entire cluster all at once. By default we define 9 for the partition count (which mean 512).
partition_power_size = ${SWIFT_PARTITION_POWER_SIZE:-9}
[img]
# Specify a comma-separated list of uec images to download and install into glance.
# supported urls here are:
#
# * "uec-style" images:
# If the file ends in .tar.gz, uncompress the tarball and and select the first
# .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel
# and "*-initrd*" as the ramdisk
# example: http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-amd64.tar.gz
# * disk image (*.img,*.img.gz)
# if file ends in .img, then it will be uploaded and registered as a to
# glance as a disk image. If it ends in .gz, it is uncompressed first.
# example:
# http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img
# http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz
# old ttylinux-uec image
#image_urls="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz"
# cirros full disk image
#image_urls="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img"
# uec style cirros image
image_urls = http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz, http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz
[passwords]
# This section is where passwords could be stored. This section also has special meaning
# in code in that the configuration class we use will look in this section for passwords
# and if no password is found (ie an empty string) then the user will be prompted to enter
# a password, if they do not enter one (or its blank) then one will be generated for the user.
#
# *You can set the enviroment variable "PASS_ASK" to 1 to not be prompted at all.*
# You will need to send the same MYSQL_PASSWORD to every host if you are doing a multi-node devstack installation.
sql = ${MYSQL_PASSWORD:-}
# Change the rabbit password since the default is "guest"
rabbit = ${RABBIT_PASSWORD:-}
# This password will be used by horizon and keystone as the admin password
horizon_keystone_admin = ${ADMIN_PASSWORD:-}
# Openstack components need to have an admin token to validate user tokens.
service_token = ${SERVICE_TOKEN:-}
# The xen api connection password
xenapi_connection = ${XENAPI_CONNECTION:-}
# swift_hash is a random unique string for a swift cluster that can never change.
swift_hash = ${SWIFT_HASH:-}