363 lines
13 KiB
INI
363 lines
13 KiB
INI
# Devstack2 local configuration
|
|
|
|
# When a value looks like a bash variable + default then it is parsed like a bash
|
|
# variable and will perform similar lookups. Ie ${SQL_HOST:-localhost} will
|
|
# look in environment variable SQL_HOST and if that does not exist then
|
|
# localhost will be used instead.
|
|
#
|
|
# We also allow for simple referencing of other variables, similar to bash
|
|
# variables to occur when a keys value like the following format are found:
|
|
#
|
|
# web_host = ${RUNNING_HOST:-http://$(X:Y)}
|
|
#
|
|
# For this example, the RUNNING_HOST enviroment variable will be referenced.
|
|
# If it is not found (no value exists), then "http://$(X:Y)" will be
|
|
# examined and found to be contain a expression (denoted by "$(X:Y)").
|
|
#
|
|
# Then in that expression there are components of the format "X:Y" which the
|
|
# configuration class will attempt to resolve those values by looking up in the
|
|
# configuration file for a value in section "X" with option "Y" and replacing the
|
|
# retrieved value for what was previously "$(X:Y)". Multiple of these "expressions"
|
|
# are allowed and each will have its expression "text" replaced with the resolved
|
|
# value before the final value for the original variable is determined.
|
|
#
|
|
# For this example if the section X with option Y contained value "1.2.3.4" then
|
|
# the final string would be "http://1.2.3.4" which would then be cached as the value
|
|
# for option web_host.
|
|
|
|
|
|
[default]
|
|
|
|
# Where is rabbit located?
|
|
rabbit_host = ${RABBIT_HOST:-$(host:ip)}
|
|
|
|
# Sys log enabled or not
|
|
syslog = 0
|
|
|
|
# Which net interfaces to attempt to detect an ip on
|
|
net_interface = eth0,br100
|
|
|
|
[host]
|
|
|
|
# Set api host endpoint
|
|
# If this is empty in code we will try to determine your network ip.
|
|
ip = ${HOST_IP:-}
|
|
|
|
[db]
|
|
|
|
# Where you db is located at and how to access it.
|
|
sql_host = ${SQL_HOST:-localhost}
|
|
sql_user = ${SQL_USER:-root}
|
|
port = ${SQL_PORT:-3306}
|
|
|
|
# What type of database is this?
|
|
type = ${SQL_TYPE:-mysql}
|
|
|
|
[keystone]
|
|
|
|
# Where is the keystone auth host at?
|
|
keystone_auth_host = ${KEYSTONE_AUTH_HOST:-$(host:ip)}
|
|
keystone_auth_port = ${KEYSTONE_AUTH_PORT:-35357}
|
|
keystone_auth_protocol = ${KEYSTONE_AUTH_PROTOCOL:-http}
|
|
|
|
# Where is the keystone service host at?
|
|
keystone_service_host = ${KEYSTONE_SERVICE_HOST:-$(host:ip)}
|
|
keystone_service_port = ${KEYSTONE_SERVICE_PORT:-5000}
|
|
keystone_service_protocol = ${KEYSTONE_SERVICE_PROTOCOL:-http}
|
|
|
|
[nova]
|
|
|
|
# Should nova be in verbose mode?
|
|
verbose = ${NOVA_VERBOSE:-1}
|
|
|
|
# Allow the admin api to be accessible?
|
|
allow_admin_api = 1
|
|
|
|
# Nova original used project_id as the *account* that owned resources (servers,
|
|
# ip address, ...) With the addition of Keystone we have standardized on the
|
|
# term **tenant** as the entity that owns the resources. **novaclient** still
|
|
# uses the old deprecated terms project_id. Note that this field should now be
|
|
# set to tenant_name, not tenant_id.
|
|
nova_project_id = ${TENANT:-demo}
|
|
|
|
# In addition to the owning entity (tenant), nova stores the entity performing
|
|
# the action as the **user**.
|
|
nova_username = ${USERNAME:-demo}
|
|
|
|
# With Keystone you pass the keystone password instead of an api key.
|
|
# The most recent versions of novaclient use NOVA_PASSWORD instead of NOVA_API_KEY
|
|
nova_password = ${ADMIN_PASSWORD:-secrete}
|
|
|
|
# With the addition of Keystone, to use an openstack cloud you should
|
|
# authenticate against keystone, which returns a **Token** and **Service
|
|
# Catalog**. The catalog contains the endpoint for all services the user/tenant
|
|
# has access to - including nova, glance, keystone, swift, ... We currently
|
|
# recommend using the 2.0 *auth api*.
|
|
#
|
|
# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We
|
|
# will use the 1.1 *compute api*
|
|
nova_url = ${NOVA_URL:-http://$(host:ip):5000/v2.0/}
|
|
|
|
# Currently novaclient needs you to specify the *compute api* version. This
|
|
# needs to match the config of your catalog returned by Keystone.
|
|
nova_version = ${NOVA_VERSION:-1.1}
|
|
|
|
# Which scheduler will nova be running with?
|
|
# SimpleScheduler should work in most cases unless you are working on multi-zone mode.
|
|
scheduler = ${NOVA_SCHEDULER:-nova.scheduler.simple.SimpleScheduler}
|
|
|
|
# Network settings
|
|
# Very useful to read over:
|
|
# http://docs.openstack.org/cactus/openstack-compute/admin/content/configuring-networking-on-the-compute-node.html
|
|
fixed_range = ${NOVA_FIXED_RANGE:-10.0.0.0/24}
|
|
fixed_network_size = ${NOVA_FIXED_NETWORK_SIZE:-256}
|
|
network_manager = ${NET_MAN:-FlatDHCPManager}
|
|
public_interface = ${PUBLIC_INTERFACE:-eth0}
|
|
|
|
#DHCP Warning: If your flat interface device uses DHCP, there will be a hiccup while the network is moved from the flat interface to the flat network bridge. This will happen when you launch your first instance. Upon launch you will lose all connectivity to the node, and the vm launch will probably fail.
|
|
#
|
|
#TODO: should/can we check that FLAT_INTERFACE is sane? (in code)
|
|
#
|
|
#If you are running on a single node and don't need to access the VMs from devices other than that node, you can set the flat interface to the same value as FLAT_NETWORK_BRIDGE. This will stop the network hiccup from occurring.
|
|
flat_interface = ${FLAT_INTERFACE:-eth0}
|
|
vlan_interface = ${VLAN_INTERFACE:-$(nova:public_interface)}
|
|
flat_network_bridge = ${FLAT_NETWORK_BRIDGE:-br100}
|
|
|
|
# Test floating pool and range are used for testing.
|
|
# They are defined here until the admin APIs can replace nova-manage
|
|
floating_range = ${FLOATING_RANGE:-172.24.4.224/28}
|
|
test_floating_pool = ${TEST_FLOATING_POOL:-test}
|
|
test_floating_range = ${TEST_FLOATING_RANGE:-192.168.253.0/29}
|
|
|
|
# TODO document these
|
|
vncproxy_url = ${VNCPROXY_URL:-http://$(host:ip):6080/vnc_auto.html}
|
|
xvpvncproxy_url = ${XVPVNCPROXY_URL:-http://$(host:ip):6081/console}
|
|
vncserver_listen = ${VNCSERVER_LISTEN:-127.0.0.1}
|
|
vncserver_proxyclient_addres = ${VNCSERVER_PROXYCLIENT_ADDRESS:-}
|
|
ec2_dmz_host = ${EC2_DMZ_HOST:-$(host:ip)}
|
|
xen_firewall_driver = nova.virt.firewall.IptablesFirewallDriver
|
|
libvirt_firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver
|
|
|
|
# Volume settings
|
|
volume_group = ${VOLUME_GROUP:-nova-volumes}
|
|
volume_backing_file = ${VOLUME_BACKING_FILE:-}
|
|
volume_backing_file_size =${VOLUME_BACKING_FILE_SIZE:-2052M}
|
|
volume_name_prefix = ${VOLUME_NAME_PREFIX:-volume-}
|
|
volume_name_postfix = ${VOLUME_NAME_POSTFIX:-%08x}
|
|
|
|
# How instances will be named
|
|
instance_name_prefix = ${INSTANCE_NAME_PREFIX:-instance-}
|
|
instance_name_postfix = ${INSTANCE_NAME_POSTFIX:-%08x}
|
|
|
|
# Where instances will be stored
|
|
instances_path = ${INSTANCES_PATH:-}
|
|
|
|
# Are we setup in multihost mode?
|
|
# Multi-host is a mode where each compute node runs its own network node. This allows network operations and routing for a VM to occur on the server that is running the VM - removing a SPOF and bandwidth bottleneck.
|
|
multi_host = ${MULTI_HOST:-0}
|
|
|
|
# Virtualization settings
|
|
virt_driver = ${VIRT_DRIVER:-libvirt}
|
|
libvirt_type = ${LIBVIRT_TYPE:-kvm}
|
|
|
|
# What type of image service will be used?
|
|
img_service = ${IMG_SERVICE:-nova.image.glance.GlanceImageService}
|
|
glance_server = ${GLANCE_SERVER:-$(host:ip):9292}
|
|
|
|
# Used however you want - ensure you know nova's conf file format if you use this!
|
|
extra_flags = ${NOVA_EXTRA_FLAGS:-}
|
|
|
|
[ec2]
|
|
|
|
# Set the ec2 url so euca2ools works
|
|
ec2_url = ${EC2_URL:-}
|
|
|
|
# Access key is set in the initial keystone data to be the same as username
|
|
ec2_access_key = ${USERNAME:-demo}
|
|
|
|
# Secret key is set in the initial keystone data to the admin password
|
|
ec2_secret_key = ${ADMIN_PASSWORD:-secrete}
|
|
|
|
[vm]
|
|
|
|
# Max time till the vm is bootable
|
|
boot_timeout = ${BOOT_TIMEOUT:-15}
|
|
|
|
# Max time to wait while vm goes from build to active state
|
|
active_timeout = ${ACTIVE_TIMEOUT:-10}
|
|
|
|
# Max time from run instance command until it is running
|
|
running_timeout = ${RUNNING_TIMEOUT:-$(vm:active_timeout)}
|
|
|
|
# Max time to wait for proper IP association and dis-association.
|
|
associate_timeout = ${ASSOCIATE_TIMEOUT:-10}
|
|
|
|
[git]
|
|
|
|
# Compute service git repo
|
|
nova_repo = git://github.com/openstack/nova.git
|
|
nova_branch = master
|
|
|
|
# Storage service git repo
|
|
swift_repo = git://github.com/openstack/swift.git
|
|
swift_branch = master
|
|
|
|
# Swift and keystone integration git repo
|
|
swift_keystone_repo = git://github.com/cloudbuilders/swift-keystone2.git
|
|
swift_keystone_branch = master
|
|
|
|
# Image catalog service git repo
|
|
glance_repo = git://github.com/openstack/glance.git
|
|
glance_branch = master
|
|
|
|
# Unified auth system (manages accounts/tokens) git repo
|
|
keystone_repo = git://github.com/openstack/keystone.git
|
|
keystone_branch = master
|
|
|
|
# A websockets/html5 or flash powered VNC console for vm instances
|
|
novnc_repo = git://github.com/cloudbuilders/noVNC.git
|
|
novnc_branch = master
|
|
|
|
# Django powered web control panel for openstack
|
|
horizon_repo = git://github.com/openstack/horizon.git
|
|
horizon_branch = master
|
|
|
|
# Python keystone client library to nova that horizon uses
|
|
keystoneclient_repo = git://github.com/openstack/python-keystoneclient.git
|
|
keystoneclient_branch = master
|
|
|
|
# Python client library to nova that horizon (and others) use
|
|
novaclient_repo = git://github.com/openstack/python-novaclient.git
|
|
novaclient_branch = master
|
|
|
|
# Quantum service git repo
|
|
quantum_repo = git://github.com/openstack/quantum.git
|
|
quantum_branch = master
|
|
|
|
# Quantum client git repo
|
|
quantum_client_repo = git://github.com/openstack/python-quantumclient.git
|
|
quantum_client_branch = master
|
|
|
|
# Melange service
|
|
melange_repo = git://github.com/openstack/melange.git
|
|
melange_branch = master
|
|
|
|
# Python melange client library
|
|
melangeclient_repo = git://github.com/openstack/python-melangeclient.git
|
|
melangeclient_branch = master
|
|
|
|
[melange]
|
|
|
|
# Default Melange Port
|
|
m_port = ${M_PORT:-9898}
|
|
|
|
# Default Melange Host
|
|
m_host = ${M_HOST:-$(host:ip)}
|
|
|
|
# Melange MAC Address Range
|
|
m_mac_range = ${M_MAC_RANGE:-404040/24}
|
|
|
|
[quantum]
|
|
|
|
# Where your quantum host is at
|
|
q_host = ${Q_HOST:-$(host:ip)}
|
|
|
|
# Which port your quantum host is at
|
|
q_port = ${Q_PORT:-9696}
|
|
|
|
# Which type of quantum plugin you will be using
|
|
q_plugin = ${Q_PLUGIN:-openvswitch}
|
|
|
|
# Default OVS bridge name
|
|
ovs_bridge = br-int
|
|
|
|
# OVS bridge external name
|
|
ovs_bridge_external_name = br-int
|
|
|
|
[horizon]
|
|
|
|
# What user will apache be serving from.
|
|
#
|
|
# Root will typically not work (for apache on most distros)
|
|
# sudo adduser <username> then sudo adduser <username> admin will be what you want to set this up (in ubuntu)
|
|
# I typically use user "horizon" for ubuntu and the runtime user (who will have sudo access) for RHEL.
|
|
# If blank the currently executing user will be used.
|
|
apache_user = ${APACHE_USER:-}
|
|
|
|
# This is the group of the previous user (adjust as needed)
|
|
apache_group = ${APACHE_GROUP:-$(horizon:apache_user)}
|
|
|
|
# Port horizon should run on
|
|
port = ${HORIZON_PORT:-80}
|
|
|
|
[swift]
|
|
|
|
# default is APP_DIR/data
|
|
data_location = ${SWIFT_DATA_LOCATION:-data}
|
|
|
|
swift_user = ${SWIFT_USER:-root}
|
|
|
|
swift_group = ${SWIFT_GROUP:-$(swift:swift_user)}
|
|
|
|
loopback_disk_size = ${SWIFT_LOOPBACK_DISK_SIZE:-1000000}
|
|
|
|
partition_power_size = ${SWIFT_PARTITION_POWER_SIZE:-9}
|
|
|
|
[ci]
|
|
|
|
# CI test suite
|
|
citest_repo = https://github.com/openstack/tempest.git
|
|
citest_branch = master
|
|
|
|
[img]
|
|
|
|
# Specify a comma-separated list of uec images to download and install into glance.
|
|
# supported urls here are:
|
|
#
|
|
# * "uec-style" images:
|
|
# If the file ends in .tar.gz, uncompress the tarball and and select the first
|
|
# .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel
|
|
# and "*-initrd*" as the ramdisk
|
|
# example: http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-amd64.tar.gz
|
|
# * disk image (*.img,*.img.gz)
|
|
# if file ends in .img, then it will be uploaded and registered as a to
|
|
# glance as a disk image. If it ends in .gz, it is uncompressed first.
|
|
# example:
|
|
# http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img
|
|
# http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz
|
|
|
|
# old ttylinux-uec image
|
|
#image_urls="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz"
|
|
# cirros full disk image
|
|
#image_urls="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img"
|
|
|
|
# uec style cirros image
|
|
image_urls = http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz
|
|
|
|
[passwords]
|
|
|
|
# This section is where passwords could be stored. This section also has special meaning
|
|
# in code in that the configuration class we use will look in this section for passwords
|
|
# and if no password is found (ie an empty string) then the user will be prompted to enter
|
|
# a password, if they do not enter one (or its blank) then one will be generated for the user.
|
|
#
|
|
# *You can set the enviroment variable "PASS_ASK" to 1 to not be prompted at all.*
|
|
|
|
# You will need to send the same MYSQL_PASSWORD to every host if you are doing a multi-node devstack installation.
|
|
sql = ${MYSQL_PASSWORD:-}
|
|
|
|
# Change the rabbit password since the default is "guest"
|
|
rabbit = ${RABBIT_PASSWORD:-}
|
|
|
|
# This password will be used by horizon and keystone as the admin password
|
|
horizon_keystone_admin = ${ADMIN_PASSWORD:-}
|
|
|
|
# Openstack components need to have an admin token to validate user tokens.
|
|
service_token = ${SERVICE_TOKEN:-}
|
|
|
|
# The xen api connection password
|
|
xenapi_connection = ${XENAPI_CONNECTION:-}
|
|
|
|
# swift password (random unique string)
|
|
swift_hash = ${SWIFT_HASH:-}
|