devstack/stackrc
Matt Riedemann 99e7445fb2 Adjust repo namespace for pbr
This is a follow up for change Ifcfce490edb3d77e4e436e002d35bc909e1a057c
where the GIT_BASE was changed to the opendev URL to avoid redirects.
The pbr namespace in stackrc was old and still getting redirected and
this change fixes that.

Change-Id: Ib444e928fa2ca7650670f97be6927202333a1dd7
2019-06-21 09:38:24 -04:00

951 lines
37 KiB
Bash

#!/bin/bash
#
# stackrc
#
# ensure we don't re-source this in the same environment
[[ -z "$_DEVSTACK_STACKRC" ]] || return 0
declare -r -g _DEVSTACK_STACKRC=1
# Find the other rc files
RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
# Source required DevStack functions and globals
source $RC_DIR/functions
# Set the target branch. This is used so that stable branching
# does not need to update each repo below.
TARGET_BRANCH=master
# Cycle trailing projects need to branch later than the others.
TRAILING_TARGET_BRANCH=master
# And some repos do not create stable branches, so this is used
# to make it explicit and avoid accidentally setting to a stable
# branch.
BRANCHLESS_TARGET_BRANCH=master
# Destination path for installation
DEST=/opt/stack
# Destination for working data
DATA_DIR=${DEST}/data
# Destination for status files
SERVICE_DIR=${DEST}/status
# Path for subunit output file
SUBUNIT_OUTPUT=${DEST}/devstack.subunit
# Determine stack user
if [[ $EUID -eq 0 ]]; then
STACK_USER=stack
else
STACK_USER=$(whoami)
fi
# Specify region name Region
REGION_NAME=${REGION_NAME:-RegionOne}
# Specify name of region where identity service endpoint is registered.
# When deploying multiple DevStack instances in different regions with shared
# Keystone, set KEYSTONE_REGION_NAME to the region where Keystone is running
# for DevStack instances which do not host Keystone.
KEYSTONE_REGION_NAME=${KEYSTONE_REGION_NAME:-$REGION_NAME}
# Specify which services to launch. These generally correspond to
# screen tabs. To change the default list, use the ``enable_service`` and
# ``disable_service`` functions in ``local.conf``.
# For example, to enable Swift as part of DevStack add the following
# settings in ``local.conf``:
# [[local|localrc]]
# enable_service s-proxy s-object s-container s-account
# This allows us to pass ``ENABLED_SERVICES``
if ! isset ENABLED_SERVICES ; then
# Keystone - nothing works without keystone
ENABLED_SERVICES=key
# Nova - services to support libvirt based openstack clouds
ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-api-meta
# Placement service needed for Nova
ENABLED_SERVICES+=,placement-api,placement-client
# Glance services needed for Nova
ENABLED_SERVICES+=,g-api,g-reg
# Cinder
ENABLED_SERVICES+=,c-sch,c-api,c-vol
# Neutron
ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3
# Dashboard
ENABLED_SERVICES+=,horizon
# Additional services
ENABLED_SERVICES+=,rabbit,tempest,mysql,etcd3,dstat
fi
# Global toggle for enabling services under mod_wsgi. If this is set to
# ``True`` all services that use HTTPD + mod_wsgi as the preferred method of
# deployment, will be deployed under Apache. If this is set to ``False`` all
# services will rely on the local toggle variable (e.g. ``KEYSTONE_USE_MOD_WSGI``)
ENABLE_HTTPD_MOD_WSGI_SERVICES=True
# Set the default Nova APIs to enable
NOVA_ENABLED_APIS=osapi_compute,metadata
# CELLSV2_SETUP - how we should configure services with cells v2
#
# - superconductor - this is one conductor for the api services, and
# one per cell managing the compute services. This is preferred
# - singleconductor - this is one conductor for the whole deployment,
# this is not recommended, and will be removed in the future.
CELLSV2_SETUP=${CELLSV2_SETUP:-"superconductor"}
# Set the root URL for Horizon
HORIZON_APACHE_ROOT="/dashboard"
# Whether to use SYSTEMD to manage services, we only do this from
# Queens forward.
USE_SYSTEMD="True"
USER_UNITS=$(trueorfalse False USER_UNITS)
if [[ "$USER_UNITS" == "True" ]]; then
SYSTEMD_DIR="$HOME/.local/share/systemd/user"
SYSTEMCTL="systemctl --user"
else
SYSTEMD_DIR="/etc/systemd/system"
SYSTEMCTL="sudo systemctl"
fi
# Whether or not to enable Kernel Samepage Merging (KSM) if available.
# This allows programs that mark their memory as mergeable to share
# memory pages if they are identical. This is particularly useful with
# libvirt backends. This reduces memory usage at the cost of CPU overhead
# to scan memory. We default to enabling it because we tend to be more
# memory constrained than CPU bound.
ENABLE_KSM=$(trueorfalse True ENABLE_KSM)
# Passwords generated by interactive devstack runs
if [[ -r $RC_DIR/.localrc.password ]]; then
source $RC_DIR/.localrc.password
fi
# Control whether Python 3 should be used at all.
export USE_PYTHON3=$(trueorfalse False USE_PYTHON3)
# Explicitly list services not to run under Python 3. See
# disable_python3_package to edit this variable.
export DISABLED_PYTHON3_PACKAGES="swift"
# When Python 3 is supported by an application, adding the specific
# version of Python 3 to this variable will install the app using that
# version of the interpreter instead of 2.7.
_DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)"
export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3.5}}
# Just to be more explicit on the Python 2 version to use.
_DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)"
export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}}
# Create a virtualenv with this
if [[ ${USE_PYTHON3} == True ]]; then
export VIRTUALENV_CMD="python3 -m venv"
else
export VIRTUALENV_CMD="virtualenv "
fi
# allow local overrides of env variables, including repo config
if [[ -f $RC_DIR/localrc ]]; then
# Old-style user-supplied config
source $RC_DIR/localrc
elif [[ -f $RC_DIR/.localrc.auto ]]; then
# New-style user-supplied config extracted from local.conf
source $RC_DIR/.localrc.auto
fi
# Default for log coloring is based on interactive-or-not.
# Baseline assumption is that non-interactive invocations are for CI,
# where logs are to be presented as browsable text files; hence color
# codes should be omitted.
# Simply override LOG_COLOR if your environment is different.
if [ -t 1 ]; then
_LOG_COLOR_DEFAULT=True
else
_LOG_COLOR_DEFAULT=False
fi
# Use color for logging output (only available if syslog is not used)
LOG_COLOR=$(trueorfalse $_LOG_COLOR_DEFAULT LOG_COLOR)
# Make tracing more educational
if [[ "$LOG_COLOR" == "True" ]]; then
# tput requires TERM or -T. If neither is present, use vt100, a
# no-frills least common denominator supported everywhere.
TPUT_T=
if ! [ $TERM ]; then
TPUT_T='-T vt100'
fi
export PS4='+\[$(tput '$TPUT_T' setaf 242)\]$(short_source)\[$(tput '$TPUT_T' sgr0)\] '
else
export PS4='+ $(short_source): '
fi
# Configure Identity API version: 2.0, 3
IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3}
# Set the option ENABLE_IDENTITY_V2 to True. It defines whether the DevStack
# deployment will be deploying the Identity v2 pipelines. If this option is set
# to ``False``, DevStack will: i) disable Identity v2; ii) configure Tempest to
# skip Identity v2 specific tests; and iii) configure Horizon to use Identity
# v3. When this option is set to ``False``, the option IDENTITY_API_VERSION
# will to be set to ``3`` in order to make DevStack register the Identity
# endpoint as v3. This flag is experimental and will be used as basis to
# identify the projects which still have issues to operate with Identity v3.
ENABLE_IDENTITY_V2=$(trueorfalse False ENABLE_IDENTITY_V2)
if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
IDENTITY_API_VERSION=3
fi
# Enable use of Python virtual environments. Individual project use of
# venvs are controlled by the PROJECT_VENV array; every project with
# an entry in the array will be installed into the named venv.
# By default this will put each project into its own venv.
USE_VENV=$(trueorfalse False USE_VENV)
# Add packages that need to be installed into a venv but are not in any
# requirmenets files here, in a comma-separated list
ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""}
# This can be used to turn database query logging on and off
# (currently only implemented for MySQL backend)
DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING)
# Set a timeout for git operations. If git is still running when the
# timeout expires, the command will be retried up to 3 times. This is
# in the format for timeout(1);
#
# DURATION is a floating point number with an optional suffix: 's'
# for seconds (the default), 'm' for minutes, 'h' for hours or 'd'
# for days.
#
# Zero disables timeouts
GIT_TIMEOUT=${GIT_TIMEOUT:-0}
# How should we be handling WSGI deployments. By default we're going
# to allow for 2 modes, which is "uwsgi" which runs with an apache
# proxy uwsgi in front of it, or "mod_wsgi", which runs in
# apache. mod_wsgi is deprecated, don't use it.
WSGI_MODE=${WSGI_MODE:-"uwsgi"}
# Repositories
# ------------
# Base GIT Repo URL
GIT_BASE=${GIT_BASE:-https://opendev.org}
# The location of REQUIREMENTS once cloned
REQUIREMENTS_DIR=$DEST/requirements
# Which libraries should we install from git instead of using released
# versions on pypi?
#
# By default DevStack is now installing libraries from pypi instead of
# from git repositories by default. This works great if you are
# developing server components, but if you want to develop libraries
# and see them live in DevStack you need to tell DevStack it should
# install them from git.
#
# ex: LIBS_FROM_GIT=python-keystoneclient,oslo.config
#
# Will install those 2 libraries from git, the rest from pypi.
#
# Setting the variable to 'ALL' will activate the download for all
# libraries.
DEVSTACK_SERIES="train"
##############
#
# OpenStack Server Components
#
##############
# block storage service
CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git}
CINDER_BRANCH=${CINDER_BRANCH:-$TARGET_BRANCH}
# image catalog service
GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git}
GLANCE_BRANCH=${GLANCE_BRANCH:-$TARGET_BRANCH}
# django powered web control panel for openstack
HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git}
HORIZON_BRANCH=${HORIZON_BRANCH:-$TARGET_BRANCH}
# unified auth system (manages accounts/tokens)
KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git}
KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-$TARGET_BRANCH}
# neutron service
NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git}
NEUTRON_BRANCH=${NEUTRON_BRANCH:-$TARGET_BRANCH}
# neutron fwaas service
NEUTRON_FWAAS_REPO=${NEUTRON_FWAAS_REPO:-${GIT_BASE}/openstack/neutron-fwaas.git}
NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-$TARGET_BRANCH}
# compute service
NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
NOVA_BRANCH=${NOVA_BRANCH:-$TARGET_BRANCH}
# object storage service
SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
SWIFT_BRANCH=${SWIFT_BRANCH:-$TARGET_BRANCH}
# placement service
PLACEMENT_REPO=${PLACEMENT_REPO:-${GIT_BASE}/openstack/placement.git}
PLACEMENT_BRANCH=${PLACEMENT_BRANCH:-$TARGET_BRANCH}
##############
#
# Testing Components
#
##############
# consolidated openstack requirements
REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git}
REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-$TARGET_BRANCH}
# Tempest test suite
TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git}
TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
##############
#
# OpenStack Client Library Components
# Note default install is from pip, see LIBS_FROM_GIT
#
##############
# volume client
GITREPO["python-cinderclient"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git}
GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-$TARGET_BRANCH}
# os-brick client for local volume attachement
GITREPO["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-brick-cinderclient-ext.git}
GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-$TARGET_BRANCH}
# python barbican client library
GITREPO["python-barbicanclient"]=${BARBICANCLIENT_REPO:-${GIT_BASE}/openstack/python-barbicanclient.git}
GITBRANCH["python-barbicanclient"]=${BARBICANCLIENT_BRANCH:-$TARGET_BRANCH}
GITDIR["python-barbicanclient"]=$DEST/python-barbicanclient
# python glance client library
GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git}
GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-$TARGET_BRANCH}
# ironic client
GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git}
GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-$TARGET_BRANCH}
# ironic plugin is out of tree, but nova uses it. set GITDIR here.
GITDIR["python-ironicclient"]=$DEST/python-ironicclient
# the base authentication plugins that clients use to authenticate
GITREPO["keystoneauth"]=${KEYSTONEAUTH_REPO:-${GIT_BASE}/openstack/keystoneauth.git}
GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-$TARGET_BRANCH}
# python keystone client library to nova that horizon uses
GITREPO["python-keystoneclient"]=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git}
GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-$TARGET_BRANCH}
# neutron client
GITREPO["python-neutronclient"]=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git}
GITBRANCH["python-neutronclient"]=${NEUTRONCLIENT_BRANCH:-$TARGET_BRANCH}
# python client library to nova that horizon (and others) use
GITREPO["python-novaclient"]=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git}
GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-$TARGET_BRANCH}
# python swift client library
GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git}
GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-$TARGET_BRANCH}
# consolidated openstack python client
GITREPO["python-openstackclient"]=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-$TARGET_BRANCH}
# this doesn't exist in a lib file, so set it here
GITDIR["python-openstackclient"]=$DEST/python-openstackclient
# placement-api CLI
GITREPO["osc-placement"]=${OSC_PLACEMENT_REPO:-${GIT_BASE}/openstack/osc-placement.git}
GITBRANCH["osc-placement"]=${OSC_PLACEMENT_BRANCH:-$TARGET_BRANCH}
###################
#
# Oslo Libraries
# Note default install is from pip, see LIBS_FROM_GIT
#
###################
# castellan key manager interface
GITREPO["castellan"]=${CASTELLAN_REPO:-${GIT_BASE}/openstack/castellan.git}
GITBRANCH["castellan"]=${CASTELLAN_BRANCH:-$TARGET_BRANCH}
# cliff command line framework
GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git}
GITBRANCH["cliff"]=${CLIFF_BRANCH:-$TARGET_BRANCH}
# async framework/helpers
GITREPO["futurist"]=${FUTURIST_REPO:-${GIT_BASE}/openstack/futurist.git}
GITBRANCH["futurist"]=${FUTURIST_BRANCH:-$TARGET_BRANCH}
# debtcollector deprecation framework/helpers
GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git}
GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-$TARGET_BRANCH}
# helpful state machines
GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git}
GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-$TARGET_BRANCH}
# oslo.cache
GITREPO["oslo.cache"]=${OSLOCACHE_REPO:-${GIT_BASE}/openstack/oslo.cache.git}
GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-$TARGET_BRANCH}
# oslo.concurrency
GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git}
GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-$TARGET_BRANCH}
# oslo.config
GITREPO["oslo.config"]=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git}
GITBRANCH["oslo.config"]=${OSLOCFG_BRANCH:-$TARGET_BRANCH}
# oslo.context
GITREPO["oslo.context"]=${OSLOCTX_REPO:-${GIT_BASE}/openstack/oslo.context.git}
GITBRANCH["oslo.context"]=${OSLOCTX_BRANCH:-$TARGET_BRANCH}
# oslo.db
GITREPO["oslo.db"]=${OSLODB_REPO:-${GIT_BASE}/openstack/oslo.db.git}
GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-$TARGET_BRANCH}
# oslo.i18n
GITREPO["oslo.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git}
GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-$TARGET_BRANCH}
# oslo.log
GITREPO["oslo.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git}
GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-$TARGET_BRANCH}
# oslo.messaging
GITREPO["oslo.messaging"]=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git}
GITBRANCH["oslo.messaging"]=${OSLOMSG_BRANCH:-$TARGET_BRANCH}
# oslo.middleware
GITREPO["oslo.middleware"]=${OSLOMID_REPO:-${GIT_BASE}/openstack/oslo.middleware.git}
GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-$TARGET_BRANCH}
# oslo.policy
GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git}
GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-$TARGET_BRANCH}
# oslo.privsep
GITREPO["oslo.privsep"]=${OSLOPRIVSEP_REPO:-${GIT_BASE}/openstack/oslo.privsep.git}
GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-$TARGET_BRANCH}
# oslo.reports
GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git}
GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-$TARGET_BRANCH}
# oslo.rootwrap
GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-$TARGET_BRANCH}
# oslo.serialization
GITREPO["oslo.serialization"]=${OSLOSERIALIZATION_REPO:-${GIT_BASE}/openstack/oslo.serialization.git}
GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-$TARGET_BRANCH}
# oslo.service
GITREPO["oslo.service"]=${OSLOSERVICE_REPO:-${GIT_BASE}/openstack/oslo.service.git}
GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-$TARGET_BRANCH}
# oslo.utils
GITREPO["oslo.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git}
GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-$TARGET_BRANCH}
# oslo.versionedobjects
GITREPO["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_REPO:-${GIT_BASE}/openstack/oslo.versionedobjects.git}
GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-$TARGET_BRANCH}
# oslo.vmware
GITREPO["oslo.vmware"]=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git}
GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-$TARGET_BRANCH}
# osprofiler
GITREPO["osprofiler"]=${OSPROFILER_REPO:-${GIT_BASE}/openstack/osprofiler.git}
GITBRANCH["osprofiler"]=${OSPROFILER_BRANCH:-$TARGET_BRANCH}
# pycadf auditing library
GITREPO["pycadf"]=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git}
GITBRANCH["pycadf"]=${PYCADF_BRANCH:-$TARGET_BRANCH}
# stevedore plugin manager
GITREPO["stevedore"]=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git}
GITBRANCH["stevedore"]=${STEVEDORE_BRANCH:-$TARGET_BRANCH}
# taskflow plugin manager
GITREPO["taskflow"]=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git}
GITBRANCH["taskflow"]=${TASKFLOW_BRANCH:-$TARGET_BRANCH}
# tooz plugin manager
GITREPO["tooz"]=${TOOZ_REPO:-${GIT_BASE}/openstack/tooz.git}
GITBRANCH["tooz"]=${TOOZ_BRANCH:-$TARGET_BRANCH}
# pbr drives the setuptools configs
GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack/pbr.git}
GITBRANCH["pbr"]=${PBR_BRANCH:-$TARGET_BRANCH}
##################
#
# Libraries managed by OpenStack programs (non oslo)
#
##################
# cursive library
GITREPO["cursive"]=${CURSIVE_REPO:-${GIT_BASE}/openstack/cursive.git}
GITBRANCH["cursive"]=${CURSIVE_BRANCH:-$TARGET_BRANCH}
# glance store library
GITREPO["glance_store"]=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git}
GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-$TARGET_BRANCH}
# keystone middleware
GITREPO["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_REPO:-${GIT_BASE}/openstack/keystonemiddleware.git}
GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-$TARGET_BRANCH}
# ceilometer middleware
GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git}
GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-$TARGET_BRANCH}
GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware
# openstacksdk OpenStack Python SDK
GITREPO["openstacksdk"]=${OPENSTACKSDK_REPO:-${GIT_BASE}/openstack/openstacksdk.git}
GITBRANCH["openstacksdk"]=${OPENSTACKSDK_BRANCH:-$TARGET_BRANCH}
# os-brick library to manage local volume attaches
GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git}
GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-$TARGET_BRANCH}
# os-client-config to manage clouds.yaml and friends
GITREPO["os-client-config"]=${OS_CLIENT_CONFIG_REPO:-${GIT_BASE}/openstack/os-client-config.git}
GITBRANCH["os-client-config"]=${OS_CLIENT_CONFIG_BRANCH:-$TARGET_BRANCH}
GITDIR["os-client-config"]=$DEST/os-client-config
# os-vif library to communicate between Neutron to Nova
GITREPO["os-vif"]=${OS_VIF_REPO:-${GIT_BASE}/openstack/os-vif.git}
GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-$TARGET_BRANCH}
# osc-lib OpenStackClient common lib
GITREPO["osc-lib"]=${OSC_LIB_REPO:-${GIT_BASE}/openstack/osc-lib.git}
GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-$TARGET_BRANCH}
# ironic common lib
GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git}
GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-$TARGET_BRANCH}
# this doesn't exist in a lib file, so set it here
GITDIR["ironic-lib"]=$DEST/ironic-lib
# diskimage-builder tool
GITREPO["diskimage-builder"]=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-$TARGET_BRANCH}
GITDIR["diskimage-builder"]=$DEST/diskimage-builder
# neutron-lib library containing neutron stable non-REST interfaces
GITREPO["neutron-lib"]=${NEUTRON_LIB_REPO:-${GIT_BASE}/openstack/neutron-lib.git}
GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH}
GITDIR["neutron-lib"]=$DEST/neutron-lib
# os-traits library for resource provider traits in the placement service
GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git}
GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH}
##################
#
# TripleO / Heat Agent Components
#
##################
# run-parts script required by os-refresh-config
DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git}
DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
# os-apply-config configuration template tool
OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git}
OAC_BRANCH=${OAC_BRANCH:-$TRAILING_TARGET_BRANCH}
# os-collect-config configuration agent
OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git}
OCC_BRANCH=${OCC_BRANCH:-$TRAILING_TARGET_BRANCH}
# os-refresh-config configuration run-parts tool
ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git}
ORC_BRANCH=${ORC_BRANCH:-$TRAILING_TARGET_BRANCH}
#################
#
# 3rd Party Components (non pip installable)
#
# NOTE(sdague): these should be converted to release version installs or removed
#
#################
# ironic python agent
IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironic-python-agent.git}
IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH}
# a websockets/html5 or flash powered VNC console for vm instances
NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git}
NOVNC_BRANCH=${NOVNC_BRANCH:-v1.0.0}
# a websockets/html5 or flash powered SPICE console for vm instances
SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
SPICE_BRANCH=${SPICE_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
# Global flag used to configure Tempest and potentially other services if
# volume multiattach is supported. In Queens, only the libvirt compute driver
# and lvm volume driver support multiattach, and qemu must be less than 2.10
# or libvirt must be greater than or equal to 3.10.
ENABLE_VOLUME_MULTIATTACH=$(trueorfalse False ENABLE_VOLUME_MULTIATTACH)
# Nova hypervisor configuration. We default to libvirt with **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
# also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core
# is installed, the default will be XenAPI
DEFAULT_VIRT_DRIVER=libvirt
is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver
VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER}
case "$VIRT_DRIVER" in
ironic|libvirt)
LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then
# The groups change with newer libvirt. Older Ubuntu used
# 'libvirtd', but now uses libvirt like Debian. Do a quick check
# to see if libvirtd group already exists to handle grenade's case.
LIBVIRT_GROUP=$(cut -d ':' -f 1 /etc/group | grep 'libvirtd$' || true)
LIBVIRT_GROUP=${LIBVIRT_GROUP:-libvirt}
else
LIBVIRT_GROUP=libvirtd
fi
;;
lxd)
LXD_GROUP=${LXD_GROUP:-"lxd"}
;;
docker|zun)
DOCKER_GROUP=${DOCKER_GROUP:-"docker"}
;;
fake)
NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1}
;;
xenserver)
# Xen config common to nova and neutron
XENAPI_USER=${XENAPI_USER:-"root"}
# This user will be used for dom0 - domU communication
# should be able to log in to dom0 without a password
# will be used to install the plugins
DOMZERO_USER=${DOMZERO_USER:-"domzero"}
;;
*)
;;
esac
# By default, devstack will use Ubuntu Cloud Archive.
ENABLE_UBUNTU_CLOUD_ARCHIVE=$(trueorfalse True ENABLE_UBUNTU_CLOUD_ARCHIVE)
# Images
# ------
# Specify a comma-separated list of images to download and install into glance.
# Supported urls here are:
# * "uec-style" images:
# If the file ends in .tar.gz, uncompress the tarball and and select the first
# .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel
# and "*-initrd*" as the ramdisk
# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz
# * disk image (*.img,*.img.gz)
# if file ends in .img, then it will be uploaded and registered as a to
# glance as a disk image. If it ends in .gz, it is uncompressed first.
# example:
# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img
# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz
# * OpenVZ image:
# OpenVZ uses its own format of image, and does not support UEC style images
#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
CIRROS_VERSION=${CIRROS_VERSION:-"0.4.0"}
CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
# which may be set in ``local.conf``. Also allow ``DEFAULT_IMAGE_NAME`` and
# ``IMAGE_URLS`` to be set in the `localrc` section of ``local.conf``.
DOWNLOAD_DEFAULT_IMAGES=$(trueorfalse True DOWNLOAD_DEFAULT_IMAGES)
if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then
if [[ -n "$IMAGE_URLS" ]]; then
IMAGE_URLS+=","
fi
case "$VIRT_DRIVER" in
libvirt)
case "$LIBVIRT_TYPE" in
lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs}
DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz}
IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
*) # otherwise, use the qcow image
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk}
DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img}
IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
esac
;;
vsphere)
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk}
DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME}
IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";;
xenserver)
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk}
DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk.vhd.tgz}
IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.5-x86_64-disk.vhd.tgz"
IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";;
fake)
# Use the same as the default for libvirt
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk}
DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img}
IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
esac
DOWNLOAD_DEFAULT_IMAGES=False
fi
# This is a comma separated list of extra URLS to be listed for
# download by the tools/image_list.sh script. CI environments can
# pre-download these URLS and place them in $FILES. Later scripts can
# then use "get_extra_file <url>" which will print out the path to the
# file; it will either be downloaded on demand or acquired from the
# cache if there.
EXTRA_CACHE_URLS=""
# etcd3 defaults
ETCD_VERSION=${ETCD_VERSION:-v3.3.12}
ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"dc5d82df095dae0a2970e4d870b6929590689dd707ae3d33e7b86da0f7f211b6"}
ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"170b848ac1a071fe7d495d404a868a2c0090750b2944f8a260ef1c6125b2b4f4"}
ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"77f807b1b51abbf51e020bb05bdb8ce088cb58260fcd22749ea32eee710463d3"}
# etcd v3.2.x doesn't have anything for s390x
ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""}
# Make sure etcd3 downloads the correct architecture
if is_arch "x86_64"; then
ETCD_ARCH="amd64"
ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_AMD64}
elif is_arch "aarch64"; then
ETCD_ARCH="arm64"
ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_ARM64}
elif is_arch "ppc64le"; then
ETCD_ARCH="ppc64le"
ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64}
elif is_arch "s390x"; then
# An etcd3 binary for s390x is not available on github like it is
# for other arches. Only continue if a custom download URL was
# provided.
if [[ -n "${ETCD_DOWNLOAD_URL}" ]]; then
ETCD_ARCH="s390x"
ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X}
else
exit_distro_not_supported "etcd3. No custom ETCD_DOWNLOAD_URL provided."
fi
else
exit_distro_not_supported "invalid hardware type - $ETCD_ARCH"
fi
ETCD_PORT=${ETCD_PORT:-2379}
ETCD_PEER_PORT=${ETCD_PEER_PORT:-2380}
ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/etcd-io/etcd/releases/download}
ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH
ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz
ETCD_DOWNLOAD_LOCATION=$ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE
# etcd is always required, so place it into list of pre-cached downloads
EXTRA_CACHE_URLS+=",$ETCD_DOWNLOAD_LOCATION"
# Detect duplicate values in IMAGE_URLS
for image_url in ${IMAGE_URLS//,/ }; do
if [ $(echo "$IMAGE_URLS" | grep -o -F "$image_url" | wc -l) -gt 1 ]; then
die $LINENO "$image_url is duplicate, please remove it from IMAGE_URLS."
fi
done
# 24Gb default volume backing file size
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-24G}
# Prefixes for volume and instance names
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
# Set default port for nova-objectstore
S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
# Common network names
PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"}
PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-""}
# Allow the use of an alternate protocol (such as https) for service endpoints
SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
# Sets the maximum number of workers for most services to reduce
# the memory used where there are a large number of CPUs present
# (the default number of workers for many services is the number of CPUs)
# Also sets the minimum number of workers to 2.
API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))}
# Service startup timeout
SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
# Timeout for compute node registration in Nova
NOVA_READY_TIMEOUT=${NOVA_READY_TIMEOUT:-$SERVICE_TIMEOUT}
# Service graceful shutdown timeout
SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5}
# Service graceful shutdown timeout
WORKER_TIMEOUT=${WORKER_TIMEOUT:-90}
# Choose DNF on RedHat/Fedora platforms with it, or otherwise default
# to YUM. Can remove this when only dnf is supported (i.e. centos7
# disappears)
if [[ -e /usr/bin/dnf ]]; then
YUM=${YUM:-dnf}
else
YUM=${YUM:-yum}
fi
# Common Configuration
# --------------------
# Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without
# Internet access. ``stack.sh`` must have been previously run with Internet
# access to install prerequisites and fetch repositories.
OFFLINE=$(trueorfalse False OFFLINE)
# Set ``ERROR_ON_CLONE`` to ``True`` to configure ``stack.sh`` to exit if
# the destination git repository does not exist during the ``git_clone``
# operation.
ERROR_ON_CLONE=$(trueorfalse False ERROR_ON_CLONE)
# Whether to enable the debug log level in OpenStack services
ENABLE_DEBUG_LOG_LEVEL=$(trueorfalse True ENABLE_DEBUG_LOG_LEVEL)
# Set fixed and floating range here so we can make sure not to use addresses
# from either range when attempting to guess the IP to use for the host.
# Note that setting ``FIXED_RANGE`` may be necessary when running DevStack
# in an OpenStack cloud that uses either of these address ranges internally.
FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
IPV4_ADDRS_SAFE_TO_USE=${IPV4_ADDRS_SAFE_TO_USE:-10.0.0.0/22}
FIXED_RANGE=${FIXED_RANGE:-$IPV4_ADDRS_SAFE_TO_USE}
HOST_IP_IFACE=${HOST_IP_IFACE:-}
HOST_IP=${HOST_IP:-}
HOST_IPV6=${HOST_IPV6:-}
HOST_IP=$(get_default_host_ip "$FIXED_RANGE" "$FLOATING_RANGE" "$HOST_IP_IFACE" "$HOST_IP" "inet")
if [ "$HOST_IP" == "" ]; then
die $LINENO "Could not determine host ip address. See local.conf for suggestions on setting HOST_IP."
fi
HOST_IPV6=$(get_default_host_ip "" "" "$HOST_IP_IFACE" "$HOST_IPV6" "inet6")
# Whether or not the port_security extension should be enabled for Neutron.
NEUTRON_PORT_SECURITY=$(trueorfalse True NEUTRON_PORT_SECURITY)
# SERVICE IP version
# This is the IP version that services should be listening on, as well
# as using to register their endpoints with keystone.
SERVICE_IP_VERSION=${SERVICE_IP_VERSION:-4}
# Validate SERVICE_IP_VERSION
# It would be nice to support "4+6" here as well, but that will require
# multiple calls into keystone to register endpoints, so for now let's
# just support one or the other.
if [[ $SERVICE_IP_VERSION != "4" ]] && [[ $SERVICE_IP_VERSION != "6" ]]; then
die $LINENO "SERVICE_IP_VERSION must be either 4 or 6"
fi
if [[ "$SERVICE_IP_VERSION" == 4 ]]; then
DEF_SERVICE_HOST=$HOST_IP
DEF_SERVICE_LOCAL_HOST=127.0.0.1
DEF_SERVICE_LISTEN_ADDRESS=0.0.0.0
fi
if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
if [ "$HOST_IPV6" == "" ]; then
die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6."
fi
DEF_SERVICE_HOST=[$HOST_IPV6]
DEF_SERVICE_LOCAL_HOST=::1
DEF_SERVICE_LISTEN_ADDRESS="[::]"
fi
# This is either 0.0.0.0 for IPv4 or [::] for IPv6
SERVICE_LISTEN_ADDRESS=${SERVICE_LISTEN_ADDRESS:-${DEF_SERVICE_LISTEN_ADDRESS}}
# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for
# service endpoints. Default is dependent on SERVICE_IP_VERSION above.
SERVICE_HOST=${SERVICE_HOST:-${DEF_SERVICE_HOST}}
# This is either 127.0.0.1 for IPv4 or ::1 for IPv6
SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}}
REGION_NAME=${REGION_NAME:-RegionOne}
# Configure services to use syslog instead of writing to individual log files
SYSLOG=$(trueorfalse False SYSLOG)
SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
SYSLOG_PORT=${SYSLOG_PORT:-516}
# Set global ``GIT_DEPTH=<number>`` to limit the history depth of the git clone
# Set to 0 to disable shallow cloning
GIT_DEPTH=${GIT_DEPTH:-0}
# We may not need to recreate database in case 2 Keystone services
# sharing the same database. It would be useful for multinode Grenade tests.
RECREATE_KEYSTONE_DB=$(trueorfalse True RECREATE_KEYSTONE_DB)
# Following entries need to be last items in file
# New way is LOGDIR for all logs and LOGFILE for stack.sh trace log, but if not fully-qualified will be in LOGDIR
# LOGFILE LOGDIR output
# not set not set (new) set LOGDIR from default
# set not set stack.sh log to LOGFILE, (new) set LOGDIR from LOGFILE
# not set set screen logs to LOGDIR
# set set stack.sh log to LOGFILE, screen logs to LOGDIR
# Set up new logging defaults
if [[ -z "${LOGDIR:-}" ]]; then
default_logdir=$DEST/logs
if [[ -z "${LOGFILE:-}" ]]; then
# Nothing is set, we need a default
LOGDIR="$default_logdir"
else
# Set default LOGDIR
LOGDIR="${LOGFILE%/*}"
logfile="${LOGFILE##*/}"
if [[ -z "$LOGDIR" || "$LOGDIR" == "$logfile" ]]; then
# LOGFILE had no path, set a default
LOGDIR="$default_logdir"
fi
fi
unset default_logdir logfile
fi
# ``LOGDIR`` is always set at this point so it is not useful as a 'enable' for service logs
# System-wide ulimit file descriptors override
ULIMIT_NOFILE=${ULIMIT_NOFILE:-2048}
# Local variables:
# mode: shell-script
# End: