Merge branch "https://github.com/baigk/compass-core.git into dev/opnfv
Change-Id: Ie2be5756f0c425a5d40e3092f52f245709fccbf3 Signed-off-by: baigk <baiguoku@huawei.com>
This commit is contained in:
parent
d825d6e6d7
commit
47d8d49133
1858
bin/client.py
1858
bin/client.py
File diff suppressed because it is too large
Load Diff
@ -7,25 +7,16 @@ service mysqld status || exit $?
|
||||
/opt/compass/bin/clean_installation_logs.py
|
||||
rm -rf /var/ansible/run/*
|
||||
service httpd restart
|
||||
sleep 10
|
||||
service httpd status || exit $?
|
||||
service rsyslog restart
|
||||
sleep 10
|
||||
service rsyslog status || exit $?
|
||||
service redis restart
|
||||
sleep 10
|
||||
service redis status || exit $?
|
||||
redis-cli flushall
|
||||
service cobblerd restart
|
||||
sleep 10
|
||||
service cobblerd status || exit $?
|
||||
chef-server-ctl restart
|
||||
sleep 10
|
||||
chef-server-ctl status || exit $?
|
||||
service compass-celeryd restart
|
||||
sleep 10
|
||||
service compass-celeryd status || exit $?
|
||||
service compass-progress-updated restart
|
||||
sleep 10
|
||||
service compass-progress-updated status || exit $?
|
||||
|
||||
|
@ -71,38 +71,20 @@ def os_installed(
|
||||
|
||||
deploy_manager = DeployManager(
|
||||
adapter_info, cluster_info, hosts_info)
|
||||
|
||||
if not os_installed_triggered:
|
||||
deploy_manager.os_installed()
|
||||
util.ActionHelper.host_ready(host_id, True, user)
|
||||
os_installed_triggered = True
|
||||
|
||||
if clusterhost_ready:
|
||||
deploy_manager.cluster_os_installed()
|
||||
# deploy_manager.cluster_os_installed()
|
||||
util.ActionHelper.cluster_host_ready(
|
||||
cluster_id, host_id, False, user
|
||||
)
|
||||
|
||||
for cluster_id, cluster_os_ready in clusters_os_ready.items():
|
||||
if not cluster_os_ready and os_installed_triggered:
|
||||
continue
|
||||
|
||||
cluster_info = util.ActionHelper.get_cluster_info(
|
||||
cluster_id, user)
|
||||
adapter_id = cluster_info[const.ADAPTER_ID]
|
||||
|
||||
adapter_info = util.ActionHelper.get_adapter_info(
|
||||
adapter_id, cluster_id, user)
|
||||
hosts_info = util.ActionHelper.get_hosts_info(
|
||||
cluster_id, [host_id], user)
|
||||
|
||||
deploy_manager = DeployManager(
|
||||
adapter_info, cluster_info, hosts_info)
|
||||
if not os_installed_triggered:
|
||||
deploy_manager.os_installed()
|
||||
util.ActionHelper.host_ready(host_id, True, user)
|
||||
os_installed_triggered = True
|
||||
|
||||
if cluster_os_ready:
|
||||
if util.ActionHelper.is_cluster_os_ready(cluster_id, user):
|
||||
logging.info("deploy_manager begin cluster_os_installed")
|
||||
deploy_manager.cluster_os_installed()
|
||||
|
||||
|
||||
|
@ -102,8 +102,8 @@ class ActionHelper(object):
|
||||
...
|
||||
}
|
||||
To view a complete output, please refer to backend doc.
|
||||
|
||||
"""
|
||||
|
||||
adapter_info = adapter_db.get_adapter(adapter_id, user=user)
|
||||
metadata = cluster_db.get_cluster_metadata(cluster_id, user=user)
|
||||
adapter_info.update({const.METADATA: metadata})
|
||||
@ -139,6 +139,7 @@ class ActionHelper(object):
|
||||
"owner": "xxx"
|
||||
}
|
||||
"""
|
||||
|
||||
cluster_info = cluster_db.get_cluster(cluster_id, user=user)
|
||||
|
||||
# convert roles retrieved from db into a list of role names
|
||||
@ -161,34 +162,35 @@ class ActionHelper(object):
|
||||
def get_hosts_info(cluster_id, hosts_id_list, user):
|
||||
"""Get hosts information. Return a dictionary as below,
|
||||
|
||||
{
|
||||
"hosts": {
|
||||
1($host_id): {
|
||||
"reinstall_os": True,
|
||||
"mac": "xxx",
|
||||
"name": "xxx",
|
||||
"roles": [xxx, yyy]
|
||||
},
|
||||
"networks": {
|
||||
"eth0": {
|
||||
"ip": "192.168.1.1",
|
||||
"netmask": "255.255.255.0",
|
||||
"is_mgmt": True,
|
||||
"is_promiscuous": False,
|
||||
"subnet": "192.168.1.0/24"
|
||||
},
|
||||
"eth1": {...}
|
||||
},
|
||||
"os_config": {},
|
||||
"package_config": {},
|
||||
"deployed_os_config": {},
|
||||
"deployed_package_config": {}
|
||||
},
|
||||
2: {...},
|
||||
....
|
||||
}
|
||||
}
|
||||
{
|
||||
"hosts": {
|
||||
1($host_id): {
|
||||
"reinstall_os": True,
|
||||
"mac": "xxx",
|
||||
"name": "xxx",
|
||||
"roles": [xxx, yyy]
|
||||
},
|
||||
"networks": {
|
||||
"eth0": {
|
||||
"ip": "192.168.1.1",
|
||||
"netmask": "255.255.255.0",
|
||||
"is_mgmt": True,
|
||||
"is_promiscuous": False,
|
||||
"subnet": "192.168.1.0/24"
|
||||
},
|
||||
"eth1": {...}
|
||||
},
|
||||
"os_config": {},
|
||||
"package_config": {},
|
||||
"deployed_os_config": {},
|
||||
"deployed_package_config": {}
|
||||
},
|
||||
2: {...},
|
||||
....
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
hosts_info = {}
|
||||
for host_id in hosts_id_list:
|
||||
info = cluster_db.get_cluster_host(cluster_id, host_id, user=user)
|
||||
@ -322,6 +324,10 @@ class ActionHelper(object):
|
||||
user=user, ready=True
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def is_cluster_os_ready(cluster_id, user=None):
|
||||
return cluster_db.is_cluster_os_ready(cluster_id, user=user)
|
||||
|
||||
@staticmethod
|
||||
def cluster_ready(cluster_id, from_database_only, user):
|
||||
"""Trigger cluster ready."""
|
||||
|
@ -231,6 +231,23 @@ def get_cluster(
|
||||
)
|
||||
|
||||
|
||||
@database.run_in_session()
|
||||
@user_api.check_user_permission(
|
||||
permission.PERMISSION_LIST_CLUSTERS)
|
||||
def is_cluster_os_ready(
|
||||
cluster_id, exception_when_missing=True,
|
||||
user=None, session=None, **kwargs
|
||||
):
|
||||
cluster = utils.get_db_object(
|
||||
session, models.Cluster, exception_when_missing, id=cluster_id)
|
||||
|
||||
all_states = ([i.host.state.ready for i in cluster.clusterhosts])
|
||||
|
||||
logging.info("is_cluster_os_ready: all_states %s" % all_states)
|
||||
|
||||
return all(all_states)
|
||||
|
||||
|
||||
def check_cluster_validated(cluster):
|
||||
"""Check cluster is validated."""
|
||||
if not cluster.config_validated:
|
||||
@ -518,6 +535,7 @@ def get_cluster_metadata(cluster_id, user=None, session=None, **kwargs):
|
||||
user=user, session=session
|
||||
)
|
||||
)
|
||||
|
||||
return metadatas
|
||||
|
||||
|
||||
@ -2003,7 +2021,7 @@ def update_cluster_host_state_internal(
|
||||
)
|
||||
return _update_clusterhost_state(
|
||||
clusterhost, from_database_only=from_database_only,
|
||||
session=None, **kwargs
|
||||
session=session, **kwargs
|
||||
)
|
||||
|
||||
|
||||
|
@ -12,345 +12,107 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__author__ = "Grace Yu (grace.yu@huawei.com)"
|
||||
__author__ = "baigk baiguoku@huawei.com)"
|
||||
|
||||
|
||||
"""Module to manage and access cluster, hosts and adapter config.
|
||||
"""
|
||||
from collections import defaultdict
|
||||
from copy import deepcopy
|
||||
import json
|
||||
import logging
|
||||
|
||||
import netaddr
|
||||
|
||||
from compass.deployment.utils import constants as const
|
||||
|
||||
ip_generator_map = {}
|
||||
|
||||
class BaseConfigManager(object):
|
||||
|
||||
def __init__(self, adapter_info, cluster_info, hosts_info):
|
||||
def get_ip_addr(ip_ranges):
|
||||
def _get_ip_addr():
|
||||
for ip_range in ip_ranges:
|
||||
for ip in netaddr.iter_iprange(*ip_range):
|
||||
yield str(ip)
|
||||
|
||||
s = json.dumps(ip_ranges)
|
||||
if s not in ip_generator_map:
|
||||
ip_generator_map[s] = _get_ip_addr()
|
||||
return ip_generator_map[s]
|
||||
else:
|
||||
return ip_generator_map[s]
|
||||
|
||||
|
||||
class AdapterInfo(object):
|
||||
def __init__(self, adapter_info):
|
||||
self.adapter_info = adapter_info
|
||||
self.name = self.adapter_info.get(const.NAME)
|
||||
self.dist_system_name = self.name
|
||||
self.health_check_cmd = self.adapter_info.get(const.HEALTH_CHECK_CMD)
|
||||
|
||||
self.os_installer = self.adapter_info.setdefault(
|
||||
const.OS_INSTALLER, {}
|
||||
)
|
||||
self.os_installer.setdefault(const.INSTALLER_SETTINGS, {})
|
||||
|
||||
self.package_installer = self.adapter_info.setdefault(
|
||||
const.PK_INSTALLER, {}
|
||||
)
|
||||
self.package_installer.setdefault(const.INSTALLER_SETTINGS, {})
|
||||
|
||||
self.metadata = self.adapter_info.setdefault(const.METADATA, {})
|
||||
self.os_metadata = self.metadata.setdefault(const.OS_CONFIG, {})
|
||||
self.package_metadata = self.metadata.setdefault(const.PK_CONFIG, {})
|
||||
|
||||
self.flavors = dict([(f[const.FLAVOR_NAME], f)
|
||||
for f in self.adapter_info.get(const.FLAVOR, [])])
|
||||
|
||||
@property
|
||||
def flavor_list(self):
|
||||
return self.flavors.values()
|
||||
|
||||
def get_flavor(self, flavor_name):
|
||||
return self.flavors.get(flavor_name)
|
||||
|
||||
|
||||
class ClusterInfo(object):
|
||||
def __init__(self, cluster_info):
|
||||
self.cluster_info = cluster_info
|
||||
self.hosts_info = hosts_info
|
||||
|
||||
def get_cluster_id(self):
|
||||
return self.__get_cluster_item(const.ID)
|
||||
|
||||
def get_clustername(self):
|
||||
return self.__get_cluster_item(const.NAME)
|
||||
|
||||
def get_os_version(self):
|
||||
return self.__get_cluster_item(const.OS_VERSION)
|
||||
|
||||
def get_cluster_baseinfo(self):
|
||||
"""Get cluster base information.
|
||||
|
||||
Including cluster_id, os_version and cluster_name.
|
||||
"""
|
||||
attr_names = [const.ID, const.NAME, const.OS_VERSION]
|
||||
|
||||
base_info = {}
|
||||
for name in attr_names:
|
||||
base_info[name] = self.__get_cluster_item(name)
|
||||
|
||||
return base_info
|
||||
|
||||
def get_host_id_list(self):
|
||||
if not self.hosts_info:
|
||||
logging.info("hosts config is None or {}")
|
||||
return []
|
||||
|
||||
return self.hosts_info.keys()
|
||||
|
||||
def get_hosts_id_list_for_os_installation(self):
|
||||
"""Get info of hosts which need to install/reinstall OS."""
|
||||
result = []
|
||||
all_host_ids = self.get_host_id_list()
|
||||
for host_id in all_host_ids:
|
||||
if self.hosts_info[host_id][const.REINSTALL_OS_FLAG]:
|
||||
result.append(host_id)
|
||||
return result
|
||||
|
||||
def get_cluster_flavor_info(self):
|
||||
return self.__get_cluster_item(const.FLAVOR, {})
|
||||
|
||||
def get_cluster_flavor_name(self):
|
||||
flavor_info = self.get_cluster_flavor_info()
|
||||
return flavor_info.setdefault(const.FLAVOR_NAME, None)
|
||||
|
||||
def get_cluster_flavor_roles(self):
|
||||
flavor_info = self.get_cluster_flavor_info()
|
||||
return flavor_info.setdefault(const.ROLES, [])
|
||||
|
||||
def get_cluster_flavor_template(self):
|
||||
flavor_info = self.get_cluster_flavor_info()
|
||||
return flavor_info.setdefault(const.TMPL, None)
|
||||
|
||||
def get_cluster_os_config(self):
|
||||
return deepcopy(self.__get_cluster_item(const.OS_CONFIG, {}))
|
||||
|
||||
def get_server_credentials(self):
|
||||
cluster_os_config = self.get_cluster_os_config()
|
||||
if not cluster_os_config:
|
||||
logging.info("cluster os_config is None!")
|
||||
return ()
|
||||
|
||||
username = cluster_os_config[const.SERVER_CREDS][const.USERNAME]
|
||||
password = cluster_os_config[const.SERVER_CREDS][const.PASSWORD]
|
||||
return (username, password)
|
||||
|
||||
def get_cluster_package_config(self):
|
||||
return deepcopy(self.__get_cluster_item(const.PK_CONFIG, {}))
|
||||
|
||||
def get_cluster_network_mapping(self):
|
||||
package_config = self.get_cluster_package_config()
|
||||
if not package_config:
|
||||
logging.info("cluster package_config is None or {}.")
|
||||
return {}
|
||||
|
||||
mapping = package_config.setdefault(const.NETWORK_MAPPING, {})
|
||||
logging.info("Network mapping in the config is '%s'!", mapping)
|
||||
|
||||
return mapping
|
||||
|
||||
def get_cluster_deployed_os_config(self):
|
||||
return deepcopy(self.__get_cluster_item(const.DEPLOYED_OS_CONFIG, {}))
|
||||
|
||||
def get_cluster_deployed_package_config(self):
|
||||
return deepcopy(self.__get_cluster_item(const.DEPLOYED_PK_CONFIG, {}))
|
||||
|
||||
def __get_cluster_item(self, item, default_value=None):
|
||||
if not self.cluster_info:
|
||||
logging.info("cluster config is None or {}")
|
||||
return None
|
||||
|
||||
return self.cluster_info.setdefault(item, default_value)
|
||||
|
||||
def get_cluster_roles_mapping(self):
|
||||
if not self.cluster_info:
|
||||
logging.info("cluster config is None or {}")
|
||||
return {}
|
||||
|
||||
deploy_config = self.get_cluster_deployed_package_config()
|
||||
mapping = deploy_config.setdefault(const.ROLES_MAPPING, {})
|
||||
|
||||
if not mapping:
|
||||
mapping = self._get_cluster_roles_mapping_helper()
|
||||
deploy_config[const.ROLES_MAPPING] = mapping
|
||||
|
||||
return mapping
|
||||
|
||||
def _get_host_info(self, host_id):
|
||||
if not self.hosts_info:
|
||||
logging.info("hosts config is None or {}")
|
||||
return {}
|
||||
|
||||
if host_id not in self.hosts_info:
|
||||
logging.info("Cannot find host, ID is '%s'", host_id)
|
||||
return {}
|
||||
|
||||
return self.hosts_info[host_id]
|
||||
|
||||
def __get_host_item(self, host_id, item, default_value=None):
|
||||
host_info = self._get_host_info(host_id)
|
||||
if not host_info:
|
||||
return {}
|
||||
|
||||
return deepcopy(host_info.setdefault(item, default_value))
|
||||
|
||||
def get_host_baseinfo(self, host_id):
|
||||
"""Get host base information."""
|
||||
host_info = self._get_host_info(host_id)
|
||||
if not host_info:
|
||||
return {}
|
||||
|
||||
attr_names = [const.REINSTALL_OS_FLAG, const.MAC_ADDR, const.NAME,
|
||||
const.HOSTNAME, const.NETWORKS]
|
||||
base_info = {}
|
||||
for attr in attr_names:
|
||||
temp = host_info[attr]
|
||||
if isinstance(temp, dict) or isinstance(temp, list):
|
||||
base_info[attr] = deepcopy(temp)
|
||||
else:
|
||||
base_info[attr] = temp
|
||||
|
||||
base_info[const.DNS] = self.get_host_dns(host_id)
|
||||
|
||||
return base_info
|
||||
|
||||
def get_host_fullname(self, host_id):
|
||||
return self.__get_host_item(host_id, const.NAME, None)
|
||||
|
||||
def get_host_dns(self, host_id):
|
||||
host_info = self._get_host_info(host_id)
|
||||
if not host_info:
|
||||
return None
|
||||
|
||||
if const.DNS not in host_info:
|
||||
hostname = host_info[const.HOSTNAME]
|
||||
domain = self.get_host_domain(host_id)
|
||||
host_info[const.DNS] = '.'.join((hostname, domain))
|
||||
|
||||
return host_info[const.DNS]
|
||||
|
||||
def get_host_mac_address(self, host_id):
|
||||
return self.__get_host_item(host_id, const.MAC_ADDR, None)
|
||||
|
||||
def get_hostname(self, host_id):
|
||||
return self.__get_host_item(host_id, const.HOSTNAME, None)
|
||||
|
||||
def get_host_networks(self, host_id):
|
||||
return self.__get_host_item(host_id, const.NETWORKS, {})
|
||||
|
||||
def get_host_interfaces(self, host_id):
|
||||
networks = self.get_host_networks(host_id)
|
||||
return networks.keys()
|
||||
|
||||
def get_host_interface_config(self, host_id, interface):
|
||||
networks = self.get_host_networks(host_id)
|
||||
return networks.setdefault(interface, {})
|
||||
|
||||
def get_host_interface_ip(self, host_id, interface):
|
||||
interface_config = self._get_host_interface_config(host_id, interface)
|
||||
return interface_config.setdefault(const.IP_ADDR, None)
|
||||
|
||||
def get_host_interface_netmask(self, host_id, interface):
|
||||
interface_config = self.get_host_interface_config(host_id, interface)
|
||||
return interface_config.setdefault(const.NETMASK, None)
|
||||
|
||||
def get_host_interface_subnet(self, host_id, interface):
|
||||
nic_config = self.get_host_interface_config(host_id, interface)
|
||||
return nic_config.setdefault(const.SUBNET, None)
|
||||
|
||||
def is_interface_promiscuous(self, host_id, interface):
|
||||
nic_config = self.get_host_interface_config(host_id, interface)
|
||||
if not nic_config:
|
||||
raise Exception("Cannot find interface '%s'", interface)
|
||||
|
||||
return nic_config[const.PROMISCUOUS_FLAG]
|
||||
|
||||
def is_interface_mgmt(self, host_id, interface):
|
||||
nic_config = self.get_host_interface_config(host_id, interface)
|
||||
if not nic_config:
|
||||
raise Exception("Cannot find interface '%s'", interface)
|
||||
|
||||
return nic_config[const.MGMT_NIC_FLAG]
|
||||
|
||||
def get_host_os_config(self, host_id):
|
||||
return self.__get_host_item(host_id, const.OS_CONFIG, {})
|
||||
|
||||
def get_host_domain(self, host_id):
|
||||
os_config = self.get_host_os_config(host_id)
|
||||
os_general_config = os_config.setdefault(const.OS_CONFIG_GENERAL, {})
|
||||
domain = os_general_config.setdefault(const.DOMAIN, None)
|
||||
if domain is None:
|
||||
global_config = self.get_cluster_os_config()
|
||||
global_general = global_config.setdefault(const.OS_CONFIG_GENERAL,
|
||||
{})
|
||||
domain = global_general.setdefault(const.DOMAIN, None)
|
||||
|
||||
return domain
|
||||
|
||||
def get_host_network_mapping(self, host_id):
|
||||
package_config = self.get_host_package_config(host_id)
|
||||
if const.NETWORK_MAPPING not in package_config:
|
||||
network_mapping = self.get_cluster_network_mapping()
|
||||
else:
|
||||
network_mapping = package_config[const.NETWORK_MAPPING]
|
||||
|
||||
return network_mapping
|
||||
|
||||
def get_host_package_config(self, host_id):
|
||||
return self.__get_host_item(host_id, const.PK_CONFIG, {})
|
||||
|
||||
def get_host_deployed_os_config(self, host_id):
|
||||
host_info = self._get_host_info(host_id)
|
||||
return host_info.setdefault(const.DEPLOYED_OS_CONFIG, {})
|
||||
|
||||
def get_host_deployed_package_config(self, host_id):
|
||||
host_info = self._get_host_info(host_id)
|
||||
return host_info.setdefault(const.DEPLOYED_PK_CONFIG, {})
|
||||
|
||||
def get_host_roles(self, host_id):
|
||||
return self.__get_host_item(host_id, const.ROLES, [])
|
||||
|
||||
def get_all_hosts_roles(self, hosts_id_list=None):
|
||||
roles = []
|
||||
if hosts_id_list is None:
|
||||
hosts_id_list = self.get_host_id_list()
|
||||
|
||||
for host_id in hosts_id_list:
|
||||
host_roles = self.get_host_roles(host_id)
|
||||
roles.extend([role for role in host_roles if role not in roles])
|
||||
|
||||
return roles
|
||||
|
||||
def get_host_roles_mapping(self, host_id):
|
||||
roles_mapping = {}
|
||||
deployed_pk_config = self.get_host_package_config(host_id)
|
||||
|
||||
if const.ROLES_MAPPING not in deployed_pk_config:
|
||||
roles_mapping = self._get_host_roles_mapping_helper(host_id)
|
||||
deployed_pk_config[const.ROLES_MAPPING] = roles_mapping
|
||||
else:
|
||||
roles_mapping = deployed_pk_config[const.ROLES_MAPPING]
|
||||
|
||||
return deepcopy(roles_mapping)
|
||||
|
||||
def get_host_ipmi_info(self, host_id):
|
||||
ipmi_info = self.__get_host_item(host_id, const.IPMI, {})
|
||||
|
||||
if not ipmi_info:
|
||||
return (None, None, None)
|
||||
|
||||
ipmi_ip = ipmi_info[const.IP_ADDR]
|
||||
ipmi_user = ipmi_info[const.IPMI_CREDS][const.USERNAME]
|
||||
ipmi_pass = ipmi_info[const.IPMI_CREDS][const.PASSWORD]
|
||||
|
||||
return (ipmi_ip, ipmi_user, ipmi_pass)
|
||||
|
||||
def __get_adapter_item(self, item, default_value=None):
|
||||
if not self.adapter_info:
|
||||
logging.info("Adapter Info is None!")
|
||||
return None
|
||||
|
||||
return deepcopy(self.adapter_info.setdefault(item, default_value))
|
||||
|
||||
def get_adapter_name(self):
|
||||
return self.__get_adapter_item(const.NAME, None)
|
||||
|
||||
def get_dist_system_name(self):
|
||||
return self.__get_adapter_item(const.NAME, None)
|
||||
|
||||
def get_adapter_health_check_cmd(self):
|
||||
return self.__get_adapter_item(const.HEALTH_CHECK_CMD)
|
||||
|
||||
def get_os_installer_settings(self):
|
||||
installer_info = self.__get_adapter_item(const.OS_INSTALLER, {})
|
||||
return installer_info.setdefault(const.INSTALLER_SETTINGS, {})
|
||||
|
||||
def get_pk_installer_settings(self):
|
||||
installer_info = self.__get_adapter_item(const.PK_INSTALLER, {})
|
||||
return installer_info.setdefault(const.INSTALLER_SETTINGS, {})
|
||||
|
||||
def get_os_config_metadata(self):
|
||||
metadata = self.__get_adapter_item(const.METADATA, {})
|
||||
return metadata.setdefault(const.OS_CONFIG, {})
|
||||
|
||||
def get_pk_config_meatadata(self):
|
||||
metadata = self.__get_adapter_item(const.METADATA, {})
|
||||
return metadata.setdefault(const.PK_CONFIG, {})
|
||||
|
||||
def get_adapter_all_flavors(self):
|
||||
return self.__get_adapter_item(const.FLAVORS, [])
|
||||
|
||||
def get_adapter_flavor(self, flavor_name):
|
||||
flavors = self.__get_adapter_item(const.FLAVORS, [])
|
||||
for flavor in flavors:
|
||||
if flavor[const.FLAVOR_NAME] == flavor_name:
|
||||
return flavor
|
||||
|
||||
return None
|
||||
|
||||
def _get_cluster_roles_mapping_helper(self):
|
||||
self.id = self.cluster_info.get(const.ID)
|
||||
self.name = self.cluster_info.get(const.NAME)
|
||||
self.os_version = self.cluster_info.get(const.OS_VERSION)
|
||||
self.flavor = self.cluster_info.setdefault(
|
||||
const.FLAVOR, {}
|
||||
)
|
||||
self.os_config = self.cluster_info.setdefault(
|
||||
const.OS_CONFIG, {}
|
||||
)
|
||||
self.package_config = self.cluster_info.setdefault(
|
||||
const.PK_CONFIG, {}
|
||||
)
|
||||
self.deployed_os_config = self.cluster_info.setdefault(
|
||||
const.DEPLOYED_OS_CONFIG, {}
|
||||
)
|
||||
self.deployed_package_config = self.cluster_info.setdefault(
|
||||
const.DEPLOYED_PK_CONFIG, {}
|
||||
)
|
||||
self.network_mapping = self.package_config.setdefault(
|
||||
const.NETWORK_MAPPING, {}
|
||||
)
|
||||
|
||||
os_config_general = self.os_config.setdefault(
|
||||
const.OS_CONFIG_GENERAL, {}
|
||||
)
|
||||
self.domain = os_config_general.setdefault(const.DOMAIN, None)
|
||||
self.hosts = []
|
||||
|
||||
def add_host(self, host):
|
||||
self.hosts.append(host)
|
||||
|
||||
@property
|
||||
def roles_mapping(self):
|
||||
deploy_config = self.deployed_package_config
|
||||
return deploy_config.setdefault(
|
||||
const.ROLES_MAPPING, self._get_cluster_roles_mapping()
|
||||
)
|
||||
|
||||
def _get_cluster_roles_mapping(self):
|
||||
"""The ouput format will be as below, for example:
|
||||
|
||||
{
|
||||
@ -369,37 +131,364 @@ class BaseConfigManager(object):
|
||||
...
|
||||
}
|
||||
"""
|
||||
mapping = {}
|
||||
hosts_id_list = self.get_host_id_list()
|
||||
network_mapping = self.get_cluster_network_mapping()
|
||||
if not network_mapping:
|
||||
mapping = defaultdict(list)
|
||||
for host in self.hosts:
|
||||
for role, value in host.roles_mapping.iteritems():
|
||||
mapping[role].append(value)
|
||||
|
||||
return dict(mapping)
|
||||
|
||||
@property
|
||||
def base_info(self):
|
||||
return {
|
||||
const.ID: self.id,
|
||||
const.NAME: self.name,
|
||||
const.OS_VERSION: self.os_version
|
||||
}
|
||||
|
||||
|
||||
class HostInfo(object):
|
||||
def __init__(self, host_info, cluster_info):
|
||||
self.host_info = host_info
|
||||
self.cluster_info = cluster_info
|
||||
self.id = self.host_info.get(const.ID)
|
||||
self.name = self.host_info.get(const.NAME)
|
||||
self.mac = self.host_info.get(const.MAC_ADDR)
|
||||
self.hostname = self.host_info.get(const.HOSTNAME)
|
||||
self.networks = self.host_info.setdefault(const.NETWORKS, {})
|
||||
self.os_config = self.host_info.setdefault(const.OS_CONFIG, {})
|
||||
|
||||
self.package_config = self.host_info.setdefault(const.PK_CONFIG, {})
|
||||
self.roles = self.host_info.setdefault(const.ROLES, [])
|
||||
self.ipmi = deepcopy(self.host_info.setdefault(const.IPMI, {}))
|
||||
self.reinstall_os_flag = self.host_info.get(const.REINSTALL_OS_FLAG)
|
||||
self.deployed_os_config = self.host_info.setdefault(
|
||||
const.DEPLOYED_OS_CONFIG, {}
|
||||
)
|
||||
self.deployed_package_config = self.host_info.setdefault(
|
||||
const.DEPLOYED_PK_CONFIG, {}
|
||||
)
|
||||
|
||||
os_general_config = self.os_config.setdefault(
|
||||
const.OS_CONFIG_GENERAL, {}
|
||||
)
|
||||
domain = os_general_config.setdefault(const.DOMAIN, None)
|
||||
if domain is None:
|
||||
self.domain = self.cluster_info.domain
|
||||
else:
|
||||
self.domain = domain
|
||||
|
||||
if const.DNS in host_info:
|
||||
self.dns = host_info[const.DNS]
|
||||
else:
|
||||
self.dns = '.'.join((self.hostname, self.domain))
|
||||
|
||||
if const.NETWORK_MAPPING not in self.package_config:
|
||||
self.network_mapping = self.cluster_info.network_mapping
|
||||
else:
|
||||
self.network_mapping = self.package_config[const.NETWORK_MAPPING]
|
||||
|
||||
if const.ROLES_MAPPING not in self.deployed_package_config:
|
||||
self.roles_mapping = self._get_host_roles_mapping()
|
||||
self.deployed_package_config[
|
||||
const.ROLES_MAPPING
|
||||
] = self.roles_mapping
|
||||
else:
|
||||
self.roles_mapping = \
|
||||
self.deployed_package_config[const.ROLES_MAPPING]
|
||||
|
||||
self.cluster_info.add_host(self)
|
||||
|
||||
def valid_interface(self, interface):
|
||||
if interface not in self.networks:
|
||||
raise RuntimeError("interface %s is invalid" % interface)
|
||||
|
||||
def get_interface(self, interface):
|
||||
self.valid_interface(interface)
|
||||
return self.networks[interface]
|
||||
|
||||
def get_interface_ip(self, interface):
|
||||
return self.get_interface(interface).get(const.IP_ADDR)
|
||||
|
||||
def get_interface_netmask(self, interface):
|
||||
return self.get_interface(interface).get(const.NETMASK)
|
||||
|
||||
def get_interface_subnet(self, interface):
|
||||
return self.get_interface(interface).get(const.SUBNET)
|
||||
|
||||
def is_interface_promiscuous(self, interface):
|
||||
return self.get_interface(interface).get(const.PROMISCUOUS_FLAG)
|
||||
|
||||
def is_interface_mgmt(self, interface):
|
||||
return self.get_interface(interface).get(const.MGMT_NIC_FLAG)
|
||||
|
||||
def _get_host_roles_mapping(self):
|
||||
if not self.network_mapping:
|
||||
return {}
|
||||
|
||||
for host_id in hosts_id_list:
|
||||
roles_mapping = self.get_host_roles_mapping(host_id)
|
||||
for role, value in roles_mapping.items():
|
||||
mapping.setdefault(role, []).append(value)
|
||||
return mapping
|
||||
|
||||
def _get_host_roles_mapping_helper(self, host_id):
|
||||
"""The format will be the same as cluster roles mapping."""
|
||||
network_mapping = self.get_host_network_mapping(host_id)
|
||||
if not network_mapping:
|
||||
return {}
|
||||
|
||||
hostname = self.get_hostname(host_id)
|
||||
roles = self.get_host_roles(host_id)
|
||||
interfaces = self.get_host_interfaces(host_id)
|
||||
net_info = {const.HOSTNAME: self.hostname}
|
||||
for k, v in self.network_mapping.items():
|
||||
try:
|
||||
net_info[k] = self.networks[v[const.NIC]]
|
||||
net_info[k][const.NIC] = v[const.NIC]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
mapping = {}
|
||||
temp = {const.HOSTNAME: hostname}
|
||||
for key in network_mapping:
|
||||
nic = network_mapping[key][const.NIC]
|
||||
if nic in interfaces:
|
||||
temp[key] = self.get_host_interface_config(host_id, nic)
|
||||
temp[key][const.NIC] = nic
|
||||
|
||||
for role in roles:
|
||||
for role in self.roles:
|
||||
role = role.replace("-", "_")
|
||||
mapping[role] = temp
|
||||
mapping[role] = net_info
|
||||
|
||||
return mapping
|
||||
|
||||
@property
|
||||
def baseinfo(self):
|
||||
return {
|
||||
const.REINSTALL_OS_FLAG: self.reinstall_os_flag,
|
||||
const.MAC_ADDR: self.mac,
|
||||
const.NAME: self.name,
|
||||
const.HOSTNAME: self.hostname,
|
||||
const.DNS: self.dns,
|
||||
const.NETWORKS: deepcopy(self.networks)
|
||||
}
|
||||
|
||||
|
||||
class BaseConfigManager(object):
|
||||
def __init__(self, adapter_info={}, cluster_info={}, hosts_info={}):
|
||||
assert(adapter_info and isinstance(adapter_info, dict))
|
||||
assert(cluster_info and isinstance(adapter_info, dict))
|
||||
assert(hosts_info and isinstance(adapter_info, dict))
|
||||
|
||||
self.adapter_info = AdapterInfo(adapter_info)
|
||||
self.cluster_info = ClusterInfo(cluster_info)
|
||||
self.hosts_info = dict([(k, HostInfo(v, self.cluster_info))
|
||||
for k, v in hosts_info.iteritems()])
|
||||
|
||||
def get_adapter_name(self):
|
||||
return self.adapter_info.name
|
||||
|
||||
def get_dist_system_name(self):
|
||||
return self.adapter_info.dist_system_name
|
||||
|
||||
def get_adapter_health_check_cmd(self):
|
||||
return self.adapter_info.health_check_cmd
|
||||
|
||||
def get_os_installer_settings(self):
|
||||
return self.adapter_info.os_installer[const.INSTALLER_SETTINGS]
|
||||
|
||||
def get_pk_installer_settings(self):
|
||||
return self.adapter_info.package_installer[const.INSTALLER_SETTINGS]
|
||||
|
||||
def get_os_config_metadata(self):
|
||||
return self.adapter_info.metadata[const.OS_CONFIG]
|
||||
|
||||
def get_pk_config_meatadata(self):
|
||||
return self.adapter_info.metadata[const.PK_CONFIG]
|
||||
|
||||
def get_adapter_all_flavors(self):
|
||||
return self.adapter_info.flavor_list
|
||||
|
||||
def get_adapter_flavor(self, flavor_name):
|
||||
return self.adapter_info.get_flavor(flavor_name)
|
||||
|
||||
def get_cluster_id(self):
|
||||
return self.cluster_info.id
|
||||
|
||||
def get_clustername(self):
|
||||
return self.cluster_info.name
|
||||
|
||||
def get_os_version(self):
|
||||
return self.cluster_info.os_version
|
||||
|
||||
def get_cluster_os_config(self):
|
||||
return self.cluster_info.os_config
|
||||
|
||||
def get_cluster_baseinfo(self):
|
||||
return self.cluster_info.base_info
|
||||
|
||||
def get_cluster_flavor_name(self):
|
||||
return self.cluster_info.flavor.get(const.FLAVOR_NAME)
|
||||
|
||||
def get_cluster_flavor_roles(self):
|
||||
return self.cluster_info.flavor.get(const.ROLES, [])
|
||||
|
||||
def get_cluster_flavor_template(self):
|
||||
return self.cluster_info.flavor.get(const.TMPL)
|
||||
|
||||
def get_cluster_package_config(self):
|
||||
return self.cluster_info.package_config
|
||||
|
||||
def get_cluster_network_mapping(self):
|
||||
mapping = self.cluster_info.network_mapping
|
||||
logging.info("Network mapping in the config is '%s'!", mapping)
|
||||
return mapping
|
||||
|
||||
def get_cluster_deployed_os_config(self):
|
||||
return self.cluster_info.deployed_os_config
|
||||
|
||||
def get_cluster_deployed_package_config(self):
|
||||
return self.cluster_info.deployed_package_config
|
||||
|
||||
def get_cluster_roles_mapping(self):
|
||||
return self.cluster_info.roles_mapping
|
||||
|
||||
def validate_host(self, host_id):
|
||||
if host_id not in self.hosts_info:
|
||||
raise RuntimeError("host_id %s is invalid" % host_id)
|
||||
|
||||
def get_host_id_list(self):
|
||||
return self.hosts_info.keys()
|
||||
|
||||
def get_hosts_id_list_for_os_installation(self):
|
||||
"""Get info of hosts which need to install/reinstall OS."""
|
||||
return [
|
||||
id for id, info in self.hosts_info.items()
|
||||
if info.reinstall_os_flag
|
||||
]
|
||||
|
||||
def get_server_credentials(self):
|
||||
cluster_os_config = self.get_cluster_os_config()
|
||||
if not cluster_os_config:
|
||||
logging.info("cluster os_config is None!")
|
||||
return ()
|
||||
|
||||
username = cluster_os_config[const.SERVER_CREDS][const.USERNAME]
|
||||
password = cluster_os_config[const.SERVER_CREDS][const.PASSWORD]
|
||||
return (username, password)
|
||||
|
||||
def _get_host_info(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id]
|
||||
|
||||
def get_host_baseinfo(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
host_info = self.hosts_info[host_id]
|
||||
return host_info.baseinfo
|
||||
|
||||
def get_host_fullname(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].name
|
||||
|
||||
def get_host_dns(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].dns
|
||||
|
||||
def get_host_mac_address(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].mac
|
||||
|
||||
def get_hostname(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].hostname
|
||||
|
||||
def get_host_networks(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].networks
|
||||
|
||||
def get_host_interfaces(self, host_id):
|
||||
# get interface names
|
||||
return self.get_host_networks(host_id).keys()
|
||||
|
||||
def get_host_interface_ip(self, host_id, interface):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].get_interface_ip(interface)
|
||||
|
||||
def get_host_interface_netmask(self, host_id, interface):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].get_interface_netmask(interface)
|
||||
|
||||
def get_host_interface_subnet(self, host_id, interface):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].get_interface_subnet(interface)
|
||||
|
||||
def is_interface_promiscuous(self, host_id, interface):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].is_interface_promiscuous(interface)
|
||||
|
||||
def is_interface_mgmt(self, host_id, interface):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].is_interface_mgmt(interface)
|
||||
|
||||
def get_host_os_config(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].os_config
|
||||
|
||||
def get_host_domain(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].domain
|
||||
|
||||
def get_host_network_mapping(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].network_mapping
|
||||
|
||||
def get_host_package_config(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].package_config
|
||||
|
||||
def get_host_deployed_os_config(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].deployed_os_config
|
||||
|
||||
def get_host_deployed_package_config(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].deployed_package_config
|
||||
|
||||
def get_host_roles(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].roles
|
||||
|
||||
def get_all_hosts_roles(self, hosts_id_list=None):
|
||||
roles = []
|
||||
for host_id, host_info in self.hosts_info.iteritems():
|
||||
roles.extend(host_info.roles)
|
||||
|
||||
return list(set(roles))
|
||||
|
||||
def get_hosts_ip_settings(self, ip_settings, sys_intf_mappings):
|
||||
logging.info(
|
||||
"get_hosts_ip_settings:ip_settings=%s, sys_intf_mappings=%s" %
|
||||
(ip_settings, sys_intf_mappings)
|
||||
)
|
||||
|
||||
intf_alias = {}
|
||||
for m in sys_intf_mappings:
|
||||
if "vlan_tag" in m:
|
||||
intf_alias[m["name"]] = m["name"]
|
||||
else:
|
||||
intf_alias[m["name"]] = m["interface"]
|
||||
|
||||
mappings = {}
|
||||
hosts_id_list = self.get_host_id_list()
|
||||
for host_id in hosts_id_list:
|
||||
hostname = self.get_hostname(host_id)
|
||||
mappings[hostname] = []
|
||||
for ip_info in ip_settings:
|
||||
logging.info("ip_info=%s" % ip_info)
|
||||
new_ip_info = deepcopy(ip_info)
|
||||
del new_ip_info["ip_ranges"]
|
||||
|
||||
ip_ranges = ip_info["ip_ranges"]
|
||||
new_ip_info["netmask"] = netaddr.IPNetwork(
|
||||
ip_info["cidr"]
|
||||
).netmask.bin.count("1")
|
||||
new_ip_info["ip"] = get_ip_addr(ip_ranges).next()
|
||||
new_ip_info["alias"] = intf_alias[ip_info["name"]]
|
||||
mappings[hostname].append(new_ip_info)
|
||||
|
||||
return {"ip_settings": mappings}
|
||||
|
||||
def get_host_roles_mapping(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
return self.hosts_info[host_id].roles_mapping
|
||||
|
||||
def get_host_ipmi_info(self, host_id):
|
||||
self.validate_host(host_id)
|
||||
if self.hosts_info[host_id].ipmi:
|
||||
return (
|
||||
self.hosts_info[host_id].ipmi[const.IP_ADDR],
|
||||
self.hosts_info[host_id].ipmi
|
||||
[const.IPMI_CREDS][const.USERNAME],
|
||||
self.hosts_info[host_id].ipmi
|
||||
[const.IPMI_CREDS][const.USERNAME])
|
||||
else:
|
||||
return (None, None, None)
|
||||
|
@ -39,6 +39,7 @@ class CobblerInstaller(OSInstaller):
|
||||
TMPL_DIR = 'tmpl_dir'
|
||||
SYS_TMPL = 'system.tmpl'
|
||||
SYS_TMPL_NAME = 'system.tmpl'
|
||||
SYS_PROFILE_NAME = 'profile.tmpl'
|
||||
PROFILE = 'profile'
|
||||
|
||||
POWER_TYPE = 'power_type'
|
||||
@ -130,6 +131,8 @@ class CobblerInstaller(OSInstaller):
|
||||
|
||||
global_vars_dict = self._get_cluster_tmpl_vars_dict()
|
||||
|
||||
self.update_profile_config_to_cobbler(profile, global_vars_dict)
|
||||
|
||||
hosts_deploy_config = {}
|
||||
|
||||
for host_id in host_ids:
|
||||
@ -242,6 +245,14 @@ class CobblerInstaller(OSInstaller):
|
||||
|
||||
return system_config
|
||||
|
||||
def _generate_profile_config(self, cluster_vars_dict):
|
||||
os_version = self.config_manager.get_os_version()
|
||||
tmpl_path = os.path.join(
|
||||
os.path.join(self.tmpl_dir, os_version), self.SYS_PROFILE_NAME
|
||||
)
|
||||
|
||||
return self.get_config_from_template(tmpl_path, cluster_vars_dict)
|
||||
|
||||
def _get_profile_from_server(self, os_version):
|
||||
"""Get profile from cobbler server."""
|
||||
result = self.remote.find_profile({'name': os_version})
|
||||
@ -267,6 +278,10 @@ class CobblerInstaller(OSInstaller):
|
||||
|
||||
return sys_id
|
||||
|
||||
def _get_profile_id(self, profilename):
|
||||
"""get profile reference id for the cluster."""
|
||||
return self.remote.get_profile_handle(profilename, self.token)
|
||||
|
||||
def _clean_system(self, hostname):
|
||||
"""clean system."""
|
||||
sys_name = hostname
|
||||
@ -283,6 +298,12 @@ class CobblerInstaller(OSInstaller):
|
||||
|
||||
self.remote.save_system(sys_id, self.token)
|
||||
|
||||
def _update_profile_config(self, profile_id, profile_config):
|
||||
for key, value in profile_config.iteritems():
|
||||
self.remote.modify_profile(profile_id, str(key), value, self.token)
|
||||
|
||||
self.remote.save_profile(profile_id, self.token)
|
||||
|
||||
def _netboot_enabled(self, sys_id):
|
||||
"""enable netboot."""
|
||||
self.remote.modify_system(sys_id, 'netboot_enabled', True, self.token)
|
||||
@ -303,6 +324,18 @@ class CobblerInstaller(OSInstaller):
|
||||
self._update_system_config(sys_id, system_config)
|
||||
self._netboot_enabled(sys_id)
|
||||
|
||||
def update_profile_config_to_cobbler(self, profilename, cluster_vars_dict):
|
||||
"""update profile config and upload to cobbler server."""
|
||||
|
||||
profile_id = self._get_profile_id(profilename)
|
||||
|
||||
profile_config = self._generate_profile_config(cluster_vars_dict)
|
||||
logging.debug(
|
||||
'%s profile config to update: %s', profilename, profile_config
|
||||
)
|
||||
|
||||
self._update_profile_config(profile_id, profile_config)
|
||||
|
||||
def delete_hosts(self):
|
||||
hosts_id_list = self.config_manager.get_host_id_list()
|
||||
logging.debug('delete hosts %s', hosts_id_list)
|
||||
|
@ -33,6 +33,18 @@ from compass.utils import util
|
||||
NAME = "AnsibleInstaller"
|
||||
|
||||
|
||||
def byteify(input):
|
||||
if isinstance(input, dict):
|
||||
return dict([(byteify(key), byteify(value))
|
||||
for key, value in input.iteritems()])
|
||||
elif isinstance(input, list):
|
||||
return [byteify(element) for element in input]
|
||||
elif isinstance(input, unicode):
|
||||
return input.encode('utf-8')
|
||||
else:
|
||||
return input
|
||||
|
||||
|
||||
class AnsibleInstaller(PKInstaller):
|
||||
INVENTORY_TMPL_DIR = 'inventories'
|
||||
GROUPVARS_TMPL_DIR = 'vars'
|
||||
@ -161,7 +173,15 @@ class AnsibleInstaller(PKInstaller):
|
||||
logging.info("cluster role mapping is %s", mapping)
|
||||
cluster_vars_dict[const.ROLES_MAPPING] = mapping
|
||||
|
||||
return cluster_vars_dict
|
||||
# get ip settings to vars_dict
|
||||
hosts_ip_settings = self.config_manager.get_hosts_ip_settings(
|
||||
pk_meta_dict["network_cfg"]["ip_settings"],
|
||||
pk_meta_dict["network_cfg"]["sys_intf_mappings"]
|
||||
)
|
||||
logging.info("hosts_ip_settings is %s", hosts_ip_settings)
|
||||
cluster_vars_dict["ip_settings"] = hosts_ip_settings
|
||||
|
||||
return byteify(cluster_vars_dict)
|
||||
|
||||
def _generate_inventory_attributes(self, global_vars_dict):
|
||||
inventory_tmpl_path = os.path.join(
|
||||
@ -244,11 +264,16 @@ class AnsibleInstaller(PKInstaller):
|
||||
dirs = self.runner_dirs
|
||||
files = self.runner_files
|
||||
for dir in dirs:
|
||||
items = dir.split(':')
|
||||
src, dst = items[0], items[-1]
|
||||
if not os.path.exists(os.path.join(self.ansible_dir, src)):
|
||||
continue
|
||||
|
||||
shutil.copytree(
|
||||
os.path.join(self.ansible_dir, dir),
|
||||
os.path.join(self.ansible_dir, src),
|
||||
os.path.join(
|
||||
ansible_run_destination,
|
||||
dir
|
||||
dst
|
||||
)
|
||||
)
|
||||
for file in files:
|
||||
|
@ -23,6 +23,7 @@ from copy import deepcopy
|
||||
from mock import Mock
|
||||
import os
|
||||
import unittest2
|
||||
import xmlrpclib
|
||||
|
||||
|
||||
os.environ['COMPASS_IGNORE_SETTING'] = 'true'
|
||||
@ -59,14 +60,16 @@ class TestCobblerInstaller(unittest2.TestCase):
|
||||
"netmask": "255.255.255.0",
|
||||
"is_mgmt": True,
|
||||
"is_promiscuous": False,
|
||||
"subnet": "12.234.32.0/24"
|
||||
"subnet": "12.234.32.0/24",
|
||||
"interface": "vnet0"
|
||||
},
|
||||
"vnet1": {
|
||||
"ip": "172.16.1.1",
|
||||
"netmask": "255.255.255.0",
|
||||
"is_mgmt": False,
|
||||
"is_promiscuous": False,
|
||||
"subnet": "172.16.1.0/24"
|
||||
"subnet": "172.16.1.0/24",
|
||||
"interface": "vnet1"
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -115,7 +118,8 @@ class TestCobblerInstaller(unittest2.TestCase):
|
||||
hosts_info)
|
||||
|
||||
CobblerInstaller._get_cobbler_server = Mock()
|
||||
CobblerInstaller._get_cobbler_server.return_value = "mock_server"
|
||||
CobblerInstaller._get_cobbler_server.return_value = \
|
||||
DummyCobblerRemote()
|
||||
CobblerInstaller._get_token = Mock()
|
||||
CobblerInstaller._get_token.return_value = "mock_token"
|
||||
|
||||
@ -284,18 +288,17 @@ class TestCobblerInstaller(unittest2.TestCase):
|
||||
self.maxDiff = None
|
||||
self.assertDictEqual(expected_output, output)
|
||||
|
||||
def test_check_and_set_system_impi(self):
|
||||
self.test_cobbler._update_system_config = Mock()
|
||||
self.test_cobbler.dump_system_info = Mock()
|
||||
self.test_cobbler.dump_system_info.return_value = {
|
||||
'power_type': 'ipmilan',
|
||||
'power_address': '',
|
||||
'power_user': '',
|
||||
'power_pass': ''
|
||||
}
|
||||
output = self.test_cobbler._check_and_set_system_impi(3, "test_sys_id")
|
||||
self.assertTrue(output)
|
||||
|
||||
class DummyCobblerRemote:
|
||||
|
||||
def __init__(self):
|
||||
return
|
||||
|
||||
def get_profile_handle(self, profilename, token):
|
||||
return "dummyprofilehandle"
|
||||
|
||||
def save_profile(self, profile_id, token):
|
||||
return "dummysaveprofile"
|
||||
|
||||
if __name__ == '__main__':
|
||||
flags.init()
|
||||
|
@ -61,11 +61,6 @@ class TestConfigManager(unittest2.TestCase):
|
||||
output = self.test_config_manager.get_host_id_list()
|
||||
self.assertEqual(expected_output, output)
|
||||
|
||||
def test_get_cluster_flavor_info(self):
|
||||
expected_output = self.cluster_test_info[const.FLAVOR]
|
||||
output = self.test_config_manager.get_cluster_flavor_info()
|
||||
self.assertDictEqual(expected_output, output)
|
||||
|
||||
def test_get_cluster_roles_mapping(self):
|
||||
expected_output = {
|
||||
"os_controller": [{
|
||||
|
@ -1,7 +0,0 @@
|
||||
NAME = 'openstack_juno'
|
||||
DISPLAY_NAME = 'Openstack Juno'
|
||||
PARENT = 'openstack'
|
||||
PACKAGE_INSTALLER = 'ansible_installer'
|
||||
OS_INSTALLER = 'cobbler'
|
||||
SUPPORTED_OS_PATTERNS = ['(?i)ubuntu-14\.04.*']
|
||||
DEPLOYABLE = True
|
7
conf/adapter/ansible_openstack_juno.conf
Normal file
7
conf/adapter/ansible_openstack_juno.conf
Normal file
@ -0,0 +1,7 @@
|
||||
NAME = 'openstack_juno'
|
||||
DISPLAY_NAME = 'Openstack Juno'
|
||||
PARENT = 'openstack'
|
||||
PACKAGE_INSTALLER = 'ansible_installer_juno'
|
||||
OS_INSTALLER = 'cobbler'
|
||||
SUPPORTED_OS_PATTERNS = ['(?i)ubuntu-14\.04.*', '(?i)ubuntu-14\.04\.3.*', '(?i)CentOS-7.*1503-01.*']
|
||||
DEPLOYABLE = True
|
7
conf/adapter/ansible_openstack_kilo.conf
Normal file
7
conf/adapter/ansible_openstack_kilo.conf
Normal file
@ -0,0 +1,7 @@
|
||||
NAME = 'openstack_kilo'
|
||||
DISPLAY_NAME = 'Openstack kilo'
|
||||
PARENT = 'openstack'
|
||||
PACKAGE_INSTALLER = 'ansible_installer_kilo'
|
||||
OS_INSTALLER = 'cobbler'
|
||||
SUPPORTED_OS_PATTERNS = ['(?i)ubuntu-14\.04.*', '(?i)ubuntu-14\.04\.3.*', '(?i)CentOS-7.*1503-01.*']
|
||||
DEPLOYABLE = True
|
@ -9,7 +9,7 @@ FLAVORS = [{
|
||||
'display_name': 'Single Controller',
|
||||
'template': 'single-controller.tmpl',
|
||||
'roles': [
|
||||
'controller', 'compute', 'network', 'storage'
|
||||
'controller', 'compute', 'network', 'storage', 'odl', 'onos'
|
||||
],
|
||||
}, {
|
||||
'flavor': 'multinodes',
|
||||
@ -17,7 +17,16 @@ FLAVORS = [{
|
||||
'template': 'multinodes.tmpl',
|
||||
'roles': [
|
||||
'compute-controller', 'compute-worker', 'network-server',
|
||||
'network-worker', 'database', 'messaging', 'image',
|
||||
'network-worker', 'database', 'messaging', 'image', 'odl',
|
||||
'dashboard', 'identity', 'storage-controller', 'storage-volume'
|
||||
],
|
||||
}, {
|
||||
'flavor': 'HA-ansible-multinodes-juno',
|
||||
'display_name': 'HA-ansible-multinodes-juno',
|
||||
'template': 'HA-ansible-multinodes.tmpl',
|
||||
'roles': [
|
||||
'controller', 'compute', 'ha', 'odl', 'onos', 'ceph'
|
||||
],
|
||||
}]
|
||||
|
||||
|
32
conf/flavor/openstack_kilo_ansible.conf
Normal file
32
conf/flavor/openstack_kilo_ansible.conf
Normal file
@ -0,0 +1,32 @@
|
||||
ADAPTER_NAME = 'openstack_kilo'
|
||||
FLAVORS = [{
|
||||
'flavor': 'allinone',
|
||||
'display_name': 'All-In-One',
|
||||
'template': 'allinone.tmpl',
|
||||
'roles': ['allinone-compute'],
|
||||
}, {
|
||||
'flavor': 'single-controller',
|
||||
'display_name': 'Single Controller',
|
||||
'template': 'single-controller.tmpl',
|
||||
'roles': [
|
||||
'controller', 'compute', 'network', 'storage', 'odl', 'onos'
|
||||
],
|
||||
}, {
|
||||
'flavor': 'multinodes',
|
||||
'display_name': 'Multi-nodes',
|
||||
'template': 'multinodes.tmpl',
|
||||
'roles': [
|
||||
'compute-controller', 'compute-worker', 'network-server',
|
||||
'network-worker', 'database', 'messaging', 'image', 'odl',
|
||||
'dashboard', 'identity', 'storage-controller', 'storage-volume'
|
||||
],
|
||||
}, {
|
||||
'flavor': 'HA-ansible-multinodes-kilo',
|
||||
'display_name': 'HA-ansible-multinodes',
|
||||
'template': 'HA-ansible-multinodes.tmpl',
|
||||
'roles': [
|
||||
'controller', 'compute', 'ha', 'odl', 'onos', 'ceph'
|
||||
],
|
||||
}]
|
||||
|
||||
|
19
conf/flavor_metadata/HA-ansible-multinodes-juno.conf
Normal file
19
conf/flavor_metadata/HA-ansible-multinodes-juno.conf
Normal file
@ -0,0 +1,19 @@
|
||||
ADAPTER = 'openstack_juno'
|
||||
FLAVOR = 'HA-ansible-multinodes-juno'
|
||||
METADATA = {
|
||||
'ha_proxy': {
|
||||
'_self': {
|
||||
},
|
||||
'vip': {
|
||||
'_self': {
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
'mapping_to': 'ha_vip'
|
||||
}
|
||||
},
|
||||
'test': {
|
||||
'_self': {
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
19
conf/flavor_metadata/HA-ansible-multinodes-kilo.conf
Normal file
19
conf/flavor_metadata/HA-ansible-multinodes-kilo.conf
Normal file
@ -0,0 +1,19 @@
|
||||
ADAPTER = 'openstack_kilo'
|
||||
FLAVOR = 'HA-ansible-multinodes-kilo'
|
||||
METADATA = {
|
||||
'ha_proxy': {
|
||||
'_self': {
|
||||
},
|
||||
'vip': {
|
||||
'_self': {
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
'mapping_to': 'ha_vip'
|
||||
}
|
||||
},
|
||||
'test': {
|
||||
'_self': {
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
3
conf/os/centos7.1.conf
Normal file
3
conf/os/centos7.1.conf
Normal file
@ -0,0 +1,3 @@
|
||||
NAME = 'CentOS-7-Minimal-1503-01-x86_64'
|
||||
PARENT = 'CentOS'
|
||||
DEPLOYABLE = True
|
3
conf/os/ubuntu14.04.3.conf
Normal file
3
conf/os/ubuntu14.04.3.conf
Normal file
@ -0,0 +1,3 @@
|
||||
NAME = 'ubuntu-14.04.3-server-x86_64'
|
||||
PARENT = 'Ubuntu'
|
||||
DEPLOYABLE = True
|
@ -102,7 +102,21 @@ METADATA = {
|
||||
'default_callback': default_localrepo,
|
||||
'mapping_to': 'local_repo'
|
||||
}
|
||||
}
|
||||
},
|
||||
'repo_name': {
|
||||
'_self': {
|
||||
'field': 'general',
|
||||
'is_required': True,
|
||||
'mapping_to': 'repo_name'
|
||||
}
|
||||
},
|
||||
'deploy_type': {
|
||||
'_self': {
|
||||
'field': 'general',
|
||||
'is_required': True,
|
||||
'mapping_to': 'deploy_type'
|
||||
}
|
||||
},
|
||||
},
|
||||
'server_credentials': {
|
||||
'_self': {
|
||||
|
@ -1,5 +1,5 @@
|
||||
NAME = 'ansible_installer'
|
||||
INSTANCE_NAME = 'ansible_installer'
|
||||
INSTANCE_NAME = 'ansible_installer_juno'
|
||||
SETTINGS = {
|
||||
'ansible_dir': '/var/ansible',
|
||||
'ansible_run_dir': '/var/ansible/run',
|
||||
@ -8,6 +8,6 @@ SETTINGS = {
|
||||
'inventory_file': 'inventory.yml',
|
||||
'group_variable': 'all',
|
||||
'etc_hosts_path': 'roles/common/templates/hosts',
|
||||
'runner_dirs': ['roles']
|
||||
'runner_dirs': ['roles','openstack_juno/templates:templates']
|
||||
}
|
||||
|
||||
|
13
conf/package_installer/ansible-kilo.conf
Normal file
13
conf/package_installer/ansible-kilo.conf
Normal file
@ -0,0 +1,13 @@
|
||||
NAME = 'ansible_installer'
|
||||
INSTANCE_NAME = 'ansible_installer_kilo'
|
||||
SETTINGS = {
|
||||
'ansible_dir': '/var/ansible',
|
||||
'ansible_run_dir': '/var/ansible/run',
|
||||
'ansible_config': 'ansible.cfg',
|
||||
'playbook_file': 'site.yml',
|
||||
'inventory_file': 'inventory.yml',
|
||||
'group_variable': 'all',
|
||||
'etc_hosts_path': 'roles/common/templates/hosts',
|
||||
'runner_dirs': ['roles','openstack_kilo/templates:templates']
|
||||
}
|
||||
|
@ -6,17 +6,17 @@ METADATA = {
|
||||
},
|
||||
'service_credentials': {
|
||||
'_self': {
|
||||
'required_in_whole_config': True,
|
||||
'key_extensions': {
|
||||
'$service': ['image', 'compute', 'dashboard', 'identity', 'metering', 'rabbitmq', 'volume', 'mysql']
|
||||
},
|
||||
'required_in_whole_config': True,
|
||||
'key_extensions': {
|
||||
'$service': ['image', 'compute', 'dashboard', 'identity', 'metering', 'rabbitmq', 'volume', 'mysql']
|
||||
},
|
||||
'mapping_to': 'service_credentials'
|
||||
},
|
||||
'$service': {
|
||||
'_self': {
|
||||
'required_in_whole_config': True,
|
||||
'mapping_to': '$service'
|
||||
},
|
||||
'_self': {
|
||||
'required_in_whole_config': True,
|
||||
'mapping_to': '$service'
|
||||
},
|
||||
'username': {
|
||||
'_self': {
|
||||
'is_required': True,
|
||||
@ -34,18 +34,18 @@ METADATA = {
|
||||
}
|
||||
},
|
||||
'console_credentials': {
|
||||
'_self': {
|
||||
'required_in_whole_config': True,
|
||||
'key_extensions': {
|
||||
'$console': ['admin', 'compute', 'dashboard', 'image', 'metering', 'network', 'object-store', 'volume']
|
||||
},
|
||||
'_self': {
|
||||
'required_in_whole_config': True,
|
||||
'key_extensions': {
|
||||
'$console': ['admin', 'compute', 'dashboard', 'image', 'metering', 'network', 'object-store', 'volume']
|
||||
},
|
||||
'mapping_to': 'console_credentials'
|
||||
},
|
||||
},
|
||||
'$console': {
|
||||
'_self': {
|
||||
'required_in_whole_config': True,
|
||||
'mapping_to': '$console'
|
||||
},
|
||||
'_self': {
|
||||
'required_in_whole_config': True,
|
||||
'mapping_to': '$console'
|
||||
},
|
||||
'username': {
|
||||
'_self': {
|
||||
'is_required': True,
|
||||
@ -63,6 +63,256 @@ METADATA = {
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
'enable_secgroup': {
|
||||
'_self': {
|
||||
'mapping_to': 'enable_secgroup',
|
||||
'field': 'anytype',
|
||||
'is_required':False,
|
||||
'default_value': True
|
||||
}
|
||||
},
|
||||
|
||||
'enable_fwaas': {
|
||||
'_self': {
|
||||
'mapping_to': 'enable_fwaas',
|
||||
'field': 'anytype',
|
||||
'is_required':False,
|
||||
'default_value': True
|
||||
}
|
||||
},
|
||||
|
||||
'enable_vpnaas': {
|
||||
'_self': {
|
||||
'mapping_to': 'enable_vpnaas',
|
||||
'field': 'anytype',
|
||||
'is_required':False,
|
||||
'default_value': True
|
||||
}
|
||||
},
|
||||
'network_cfg': {
|
||||
'_self': {
|
||||
'mapping_to': 'network_cfg'
|
||||
},
|
||||
|
||||
'nic_mappings': {
|
||||
'_self': {
|
||||
'mapping_to': 'nic_mappings',
|
||||
'field': 'general_list'
|
||||
}
|
||||
},
|
||||
|
||||
'bond_mappings': {
|
||||
'_self': {
|
||||
'mapping_to': 'bond_mappings',
|
||||
'field': 'general_list'
|
||||
}
|
||||
},
|
||||
|
||||
'sys_intf_mappings': {
|
||||
'_self': {
|
||||
'mapping_to': 'sys_intf_mappings',
|
||||
'field': 'general_list'
|
||||
}
|
||||
},
|
||||
|
||||
'ip_settings': {
|
||||
'_self': {
|
||||
'mapping_to': 'ip_settings',
|
||||
'field': 'general_list'
|
||||
}
|
||||
},
|
||||
|
||||
'provider_net_mappings': {
|
||||
'_self': {
|
||||
'mapping_to': 'provider_net_mappings',
|
||||
'field': 'general_list'
|
||||
}
|
||||
},
|
||||
|
||||
'ceph_disk': {
|
||||
'_self': {
|
||||
'mapping_to': 'ceph_disk',
|
||||
'field': 'general',
|
||||
'is_required':False
|
||||
}
|
||||
},
|
||||
|
||||
'public_vip': {
|
||||
'_self': {
|
||||
'mapping_to': 'public_vip',
|
||||
'is_required': False
|
||||
},
|
||||
|
||||
'ip': {
|
||||
'_self': {
|
||||
'mapping_to': 'ip',
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
}
|
||||
},
|
||||
'netmask': {
|
||||
'_self': {
|
||||
'mapping_to': 'netmask',
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
}
|
||||
},
|
||||
'interface': {
|
||||
'_self': {
|
||||
'mapping_to': 'interface',
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
'internal_vip': {
|
||||
'_self': {
|
||||
'mapping_to': 'internal_vip',
|
||||
'is_required': False
|
||||
},
|
||||
|
||||
'ip': {
|
||||
'_self': {
|
||||
'mapping_to': 'ip',
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
}
|
||||
},
|
||||
'netmask': {
|
||||
'_self': {
|
||||
'mapping_to': 'netmask',
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
}
|
||||
},
|
||||
'interface': {
|
||||
'_self': {
|
||||
'mapping_to': 'interface',
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
'public_net_info': {
|
||||
'_self': {
|
||||
'mapping_to': 'public_net_info'
|
||||
},
|
||||
|
||||
'enable': {
|
||||
'_self': {
|
||||
'mapping_to': 'enable',
|
||||
'is_required': False,
|
||||
'field': 'anytype',
|
||||
'default_value': True
|
||||
}
|
||||
},
|
||||
|
||||
'network': {
|
||||
'_self': {
|
||||
'mapping_to': 'network',
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
'default_value': 'ext-net'
|
||||
}
|
||||
},
|
||||
|
||||
'type': {
|
||||
'_self': {
|
||||
'mapping_to': 'type',
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
'options': ['flat', 'vlan'],
|
||||
'default_value': 'vlan'
|
||||
}
|
||||
},
|
||||
|
||||
'segment_id': {
|
||||
'_self': {
|
||||
'mapping_to': 'segment_id',
|
||||
'is_required': False,
|
||||
'field': 'anytype'
|
||||
}
|
||||
},
|
||||
|
||||
'subnet': {
|
||||
'_self': {
|
||||
'mapping_to': 'subnet',
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
'default_value': 'ext-subnet'
|
||||
}
|
||||
},
|
||||
|
||||
'provider_network': {
|
||||
'_self': {
|
||||
'mapping_to': 'provider_network',
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
'default_value': 'physnet'
|
||||
}
|
||||
},
|
||||
|
||||
'router': {
|
||||
'_self': {
|
||||
'mapping_to': 'router',
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
'default_value': 'ext-router'
|
||||
}
|
||||
},
|
||||
|
||||
'enable_dhcp': {
|
||||
'_self': {
|
||||
'mapping_to': 'enable_dhcp',
|
||||
'is_required': True,
|
||||
'field': 'anytype'
|
||||
}
|
||||
},
|
||||
|
||||
'no_gateway': {
|
||||
'_self': {
|
||||
'mapping_to': 'no_gateway',
|
||||
'is_required': True,
|
||||
'field': 'anytype'
|
||||
}
|
||||
},
|
||||
|
||||
'external_gw': {
|
||||
'_self': {
|
||||
'mapping_to': 'external_gw',
|
||||
'is_required': False,
|
||||
'field': 'general'
|
||||
}
|
||||
},
|
||||
|
||||
'floating_ip_cidr': {
|
||||
'_self': {
|
||||
'mapping_to': 'floating_ip_cidr',
|
||||
'is_required': True,
|
||||
'field': 'general'
|
||||
}
|
||||
},
|
||||
|
||||
'floating_ip_start': {
|
||||
'_self': {
|
||||
'mapping_to': 'floating_ip_start',
|
||||
'is_required': True,
|
||||
'field': 'general'
|
||||
}
|
||||
},
|
||||
|
||||
'floating_ip_end': {
|
||||
'_self': {
|
||||
'mapping_to': 'floating_ip_end',
|
||||
'is_required': True,
|
||||
'field': 'general'
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
'neutron_config': {
|
||||
'_self': {
|
||||
'mapping_to': 'neutron_config'
|
||||
@ -110,29 +360,30 @@ METADATA = {
|
||||
'network_mapping': {
|
||||
'_self': {
|
||||
'required_in_whole_config': True,
|
||||
'key_extensions': {
|
||||
'$interface_type': ['management', 'external', 'storage', 'tenant']
|
||||
}
|
||||
'key_extensions': {
|
||||
'$interface_type': ['install']
|
||||
}
|
||||
},
|
||||
'$interface_type': {
|
||||
'_self': {
|
||||
'required_in_whole_config': True,
|
||||
'field': 'anytype',
|
||||
'autofill_callback': autofill_network_mapping,
|
||||
'mapping_to': '$interface_type'
|
||||
'autofill_callback': autofill_network_mapping,
|
||||
'mapping_to': '$interface_type'
|
||||
},
|
||||
'interface': {
|
||||
'interface': {
|
||||
'_self': {
|
||||
'is_required': True,
|
||||
'is_required': True,
|
||||
'field': 'general',
|
||||
}
|
||||
},
|
||||
'subnet': {
|
||||
'_self': {
|
||||
'is_required': False,
|
||||
'is_required': False,
|
||||
'field': 'general'
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
}
|
||||
|
@ -63,4 +63,23 @@ ROLES = [{
|
||||
'role': 'network-worker',
|
||||
'display': 'Network worker node',
|
||||
'description': 'Network worker node'
|
||||
}, {
|
||||
'role': 'odl',
|
||||
'display': 'open day light',
|
||||
'description': 'odl node',
|
||||
'optional': True
|
||||
}, {
|
||||
'role': 'onos',
|
||||
'display': 'open network operating system',
|
||||
'description': 'onos node',
|
||||
'optional': True
|
||||
}, {
|
||||
'role': 'ha',
|
||||
'display': 'Cluster with HA',
|
||||
'description': 'Cluster with HA node'
|
||||
}, {
|
||||
'role': 'ceph',
|
||||
'display': 'Ceph storage',
|
||||
'description': 'Ceph storage',
|
||||
'optional': True
|
||||
}]
|
85
conf/role/openstack_kilo_ansible.conf
Normal file
85
conf/role/openstack_kilo_ansible.conf
Normal file
@ -0,0 +1,85 @@
|
||||
ADAPTER_NAME = 'openstack_kilo'
|
||||
ROLES = [{
|
||||
'role': 'allinone-compute',
|
||||
'display_name': 'all in one',
|
||||
'description': 'All in One'
|
||||
}, {
|
||||
'role': 'controller',
|
||||
'display_name': 'controller node',
|
||||
'description': 'Controller Node'
|
||||
}, {
|
||||
'role': 'compute',
|
||||
'display_name': 'compute node',
|
||||
'description': 'Compute Node'
|
||||
}, {
|
||||
'role': 'storage',
|
||||
'display_name': 'storage node',
|
||||
'description': 'Storage Node'
|
||||
}, {
|
||||
'role': 'network',
|
||||
'display_name': 'network node',
|
||||
'description': 'Network Node'
|
||||
}, {
|
||||
'role': 'compute-worker',
|
||||
'display_name': 'Compute worker node',
|
||||
'description': 'Compute worker node'
|
||||
}, {
|
||||
'role': 'compute-controller',
|
||||
'display_name': 'Compute controller node',
|
||||
'description': 'Compute controller node'
|
||||
}, {
|
||||
'role': 'network-server',
|
||||
'display_name': 'Network server node',
|
||||
'description': 'Network server node'
|
||||
}, {
|
||||
'role': 'database',
|
||||
'display_name': 'Database node',
|
||||
'description': 'Database node'
|
||||
}, {
|
||||
'role': 'messaging',
|
||||
'display_name': 'Messaging queue node',
|
||||
'description': 'Messaging queue node'
|
||||
}, {
|
||||
'role': 'image',
|
||||
'display': 'Image node',
|
||||
'description': 'Image node'
|
||||
}, {
|
||||
'role': 'dashboard',
|
||||
'display': 'Dashboard node',
|
||||
'description': 'Dashboard node'
|
||||
}, {
|
||||
'role': 'identity',
|
||||
'display': 'Identity node',
|
||||
'description': 'Identity node'
|
||||
}, {
|
||||
'role': 'storage-controller',
|
||||
'display': 'Storage controller node',
|
||||
'description': 'Storage controller node'
|
||||
}, {
|
||||
'role': 'storage-volume',
|
||||
'display': 'Storage volume node',
|
||||
'description': 'Storage volume node'
|
||||
}, {
|
||||
'role': 'network-worker',
|
||||
'display': 'Network worker node',
|
||||
'description': 'Network worker node'
|
||||
}, {
|
||||
'role': 'odl',
|
||||
'display': 'open day light',
|
||||
'description': 'odl node',
|
||||
'optional': True
|
||||
}, {
|
||||
'role': 'onos',
|
||||
'display': 'open network operating system',
|
||||
'description': 'onos node',
|
||||
'optional': True
|
||||
}, {
|
||||
'role': 'ha',
|
||||
'display': 'Cluster with HA',
|
||||
'description': 'Cluster with HA node'
|
||||
}, {
|
||||
'role': 'ceph',
|
||||
'display': 'Ceph storage',
|
||||
'description': 'Ceph storage',
|
||||
'optional': True
|
||||
}]
|
@ -0,0 +1,7 @@
|
||||
#set cluster_name = $getVar('name', '')
|
||||
[defaults]
|
||||
log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
|
||||
host_key_checking = False
|
||||
callback_plugins = /opt/compass/bin/ansible_callbacks
|
||||
pipelining=True
|
||||
library = /opt/openstack-ansible-modules
|
@ -3,3 +3,4 @@
|
||||
log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
|
||||
host_key_checking = False
|
||||
callback_plugins = /opt/compass/bin/ansible_callbacks
|
||||
pipelining=True
|
||||
|
@ -3,3 +3,4 @@
|
||||
log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
|
||||
host_key_checking = False
|
||||
callback_plugins = /opt/compass/bin/ansible_callbacks
|
||||
pipelining=True
|
||||
|
@ -3,3 +3,4 @@
|
||||
log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
|
||||
host_key_checking = False
|
||||
callback_plugins = /opt/compass/bin/ansible_callbacks
|
||||
pipelining=True
|
||||
|
@ -0,0 +1,22 @@
|
||||
# localhost
|
||||
127.0.0.1 localhost
|
||||
#set controllers = $getVar('controller', [])
|
||||
#set computes = $getVar('compute', [])
|
||||
#if not $isinstance($controllers, list)
|
||||
#set controllers = [$controllers]
|
||||
#end if
|
||||
#if not $isinstance($compute, list)
|
||||
#set computes = [$computes]
|
||||
#end if
|
||||
# controller
|
||||
#for worker in $controllers
|
||||
#set worker_ip = $worker.install.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# compute
|
||||
#for worker in $computes
|
||||
#set worker_ip = $worker.install.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
@ -0,0 +1,63 @@
|
||||
#set controllers = $getVar('controller', [])
|
||||
#set computes = $getVar('compute', [])
|
||||
#set has = $getVar('ha', [])
|
||||
#set odls = $getVar('odl', [])
|
||||
#set onoss = $getVar('onos', [])
|
||||
#set cephs = $getVar('ceph',[])
|
||||
#if not $isinstance($controllers, list)
|
||||
#set controllers = [$controllers]
|
||||
#end if
|
||||
#if not $isinstance($computes, list)
|
||||
#set computes = [$computes]
|
||||
#end if
|
||||
#if not $isinstance(has, list)
|
||||
#set has = [has]
|
||||
#end if
|
||||
#if not $isinstance(odls, list)
|
||||
#set odls = [odls]
|
||||
#end if
|
||||
#if not $isinstance(onoss, list)
|
||||
#set onoss = [onoss]
|
||||
#end if
|
||||
#if not $isinstance(cephs, list)
|
||||
#set cephs = [cephs]
|
||||
#end if
|
||||
#set credentials = $getVar('server_credentials', {})
|
||||
#set username = $credentials.get('username', 'root')
|
||||
#set password = $credentials.get('password', 'root')
|
||||
[controller]
|
||||
#for controller in $controllers
|
||||
#set controller_ip = $controller.install.ip
|
||||
#set controller_hostname = $controller.hostname
|
||||
$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[compute]
|
||||
#for compute in $computes
|
||||
#set compute_ip = $compute.install.ip
|
||||
#set compute_hostname = $compute.hostname
|
||||
$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[ha]
|
||||
#for ha in $has
|
||||
#set ha_ip = $ha.install.ip
|
||||
#set ha_hostname = $ha.hostname
|
||||
$ha_hostname ansible_ssh_host=$ha_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[odl]
|
||||
#for odl in $odls
|
||||
#set odl_ip = $odl.install.ip
|
||||
#set odl_hostname = $odl.hostname
|
||||
$odl_hostname ansible_ssh_host=$odl_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[onos]
|
||||
#for onos in $onoss
|
||||
#set onos_ip = $onos.install.ip
|
||||
#set onos_hostname = $onos.hostname
|
||||
$onos_hostname ansible_ssh_host=$onos_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[ceph]
|
||||
#for ceph in $cephs
|
||||
#set ceph_ip = $ceph.install.ip
|
||||
#set ceph_hostname = $ceph.hostname
|
||||
$ceph_hostname ansible_ssh_host=$ceph_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
@ -2,6 +2,8 @@
|
||||
#set computes = $getVar('compute', [])
|
||||
#set storages = $getVar('storage', [])
|
||||
#set networks = $getVar('network', [])
|
||||
#set odls = $getVar('odl', [])
|
||||
#set onoss = $getVar('onos', [])
|
||||
#if not $isinstance($controllers, list)
|
||||
#set controllers = [$controllers]
|
||||
#end if
|
||||
@ -14,6 +16,12 @@
|
||||
#if not $isinstance($networks, list)
|
||||
#set networks = [$networks]
|
||||
#end if
|
||||
#if not $isinstance($odls, list)
|
||||
#set odls = [$odls]
|
||||
#end if
|
||||
#if not $isinstance($onoss, list)
|
||||
#set onoss = [$onoss]
|
||||
#end if
|
||||
|
||||
#set credentials = $getVar('server_credentials', {})
|
||||
#set username = $credentials.get('username', 'root')
|
||||
@ -45,3 +53,15 @@ $network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansibl
|
||||
#set storage_hostname = $storage.hostname
|
||||
$storage_hostname ansible_ssh_host=$storage_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[odl]
|
||||
#for odl in odls
|
||||
#set odl_ip = $odl.management.ip
|
||||
#set odl_hostname = $odl.hostname
|
||||
$odl_hostname ansible_ssh_host=$odl_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[storage]
|
||||
#for storage in storages
|
||||
#set storage_ip = $storage.management.ip
|
||||
#set storage_hostname = $storage.hostname
|
||||
$storage_hostname ansible_ssh_host=$storage_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
@ -0,0 +1,184 @@
|
||||
#from random import randint
|
||||
#set cluster_name = $getVar('name', '')
|
||||
#set network_cfg = $getVar('network_cfg', {})
|
||||
#set ntp_server = $getVar('ntp_server', "")
|
||||
#set ceph_disk = $getVar('ceph_disk',"")
|
||||
#set $sys_intf_mappings= {}
|
||||
#for $intf_info in $network_cfg.sys_intf_mappings
|
||||
#set $sys_intf_mappings[$intf_info["name"]] = $intf_info
|
||||
#end for
|
||||
|
||||
#set ip_settings={}
|
||||
#for k,v in $getVar('ip_settings', {}).items()
|
||||
#set host_ip_settings={}
|
||||
#for intf in v
|
||||
#set $host_ip_settings[$intf["alias"]]=intf
|
||||
#end for
|
||||
#set $ip_settings[$k]=$host_ip_settings
|
||||
#end for
|
||||
|
||||
#set neutron_cfg = $getVar('neutron_config', {})
|
||||
#set ovs_config = $neutron_cfg.openvswitch
|
||||
|
||||
#set has = $getVar('ha', [])
|
||||
#set ha_vip = $getVar('ha_vip', [])
|
||||
|
||||
#set controllers = $getVar('controller', [])
|
||||
#set computers = $getVar('compute', [])
|
||||
|
||||
enable_secgroup: $getVar('enable_secgroup', True)
|
||||
enable_fwaas: $getVar('enable_fwaas', True)
|
||||
enable_vpnaas: $getVar('enable_vpnaas', True)
|
||||
ip_settings: $ip_settings
|
||||
network_cfg: $network_cfg
|
||||
sys_intf_mappings: $sys_intf_mappings
|
||||
deploy_type: $getVar('deploy_type', 'virtual')
|
||||
|
||||
public_net_info: "{{ network_cfg.public_net_info }}"
|
||||
host_ip_settings: "{{ ip_settings[inventory_hostname] }}"
|
||||
|
||||
ntp_server: $ntp_server
|
||||
internal_vip:
|
||||
ip: $network_cfg["internal_vip"]["ip"]
|
||||
netmask: $network_cfg["internal_vip"]["netmask"]
|
||||
#if "vlan_tag" in $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]
|
||||
interface: $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]["name"]
|
||||
#else
|
||||
interface: $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]["interface"]
|
||||
#end if
|
||||
|
||||
public_vip:
|
||||
ip: $network_cfg["public_vip"]["ip"]
|
||||
netmask: $network_cfg["public_vip"]["netmask"]
|
||||
#if "vlan_tag" in $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]
|
||||
interface: $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]["name"]
|
||||
#else
|
||||
interface: $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]["interface"]
|
||||
#end if
|
||||
|
||||
db_host: "{{ internal_vip.ip }}"
|
||||
rabbit_host: "{{ internal_vip.ip }}"
|
||||
|
||||
internal_ip: "{{ ip_settings[inventory_hostname]['mgmt']['ip'] }}"
|
||||
internal_nic: mgmt
|
||||
|
||||
#set random_id = randint(1, 255)
|
||||
vrouter_id_internal: $random_id
|
||||
vrouter_id_public: $random_id
|
||||
|
||||
identity_host: "{{ internal_ip }}"
|
||||
controllers_host: "{{ internal_ip }}"
|
||||
storage_controller_host: "{{ internal_ip }}"
|
||||
compute_controller_host: "{{ internal_ip }}"
|
||||
image_host: "{{ internal_ip }}"
|
||||
network_server_host: "{{ internal_ip }}"
|
||||
dashboard_host: "{{ internal_ip }}"
|
||||
|
||||
haproxy_hosts:
|
||||
#for $item in $has
|
||||
#set $hostname=$item["hostname"]
|
||||
$hostname: $ip_settings[$hostname]["mgmt"]["ip"]
|
||||
#end for
|
||||
|
||||
ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD
|
||||
#set credentials = $getVar('service_credentials', {})
|
||||
#set console_credentials = $getVar('console_credentials', {})
|
||||
#set rabbit_username = $credentials.rabbitmq.username
|
||||
#set rabbit_password = $credentials.rabbitmq.password
|
||||
#set rabbit_username = $credentials.rabbitmq.username
|
||||
#set rabbit_password = $credentials.rabbitmq.password
|
||||
#set keystone_dbpass = $credentials.identity.password
|
||||
#set glance_dbpass = $credentials.image.password
|
||||
#set glance_pass = $console_credentials.image.password
|
||||
#set nova_dbpass = $credentials.compute.password
|
||||
#set nova_pass = $console_credentials.compute.password
|
||||
#set dash_dbpass = $credentials.dashboard.password
|
||||
#set cinder_dbpass = $credentials.volume.password
|
||||
#set cinder_pass = $console_credentials.volume.password
|
||||
#set admin_pass = $console_credentials.admin.password
|
||||
#set neutron_pass = $console_credentials.network.password
|
||||
|
||||
cluster_name: $cluster_name
|
||||
|
||||
odl_controller: 10.1.0.15
|
||||
|
||||
DEBUG: true
|
||||
VERBOSE: true
|
||||
NTP_SERVER_LOCAL: "{{ controllers_host }}"
|
||||
DB_HOST: "{{ db_host }}"
|
||||
MQ_BROKER: rabbitmq
|
||||
|
||||
OPENSTACK_REPO: cloudarchive-juno.list
|
||||
juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
|
||||
ADMIN_TOKEN: admin
|
||||
CEILOMETER_TOKEN: c095d479023a0fd58a54
|
||||
erlang.cookie: DJJVECFMCJPVYQTJTDWG
|
||||
|
||||
RABBIT_USER: $rabbit_username
|
||||
RABBIT_PASS: $rabbit_password
|
||||
KEYSTONE_DBPASS: $keystone_dbpass
|
||||
CEILOMETER_DBPASS: service
|
||||
CEILOMETER_PASS: console
|
||||
DEMO_PASS: demo_secret
|
||||
ADMIN_PASS: $admin_pass
|
||||
GLANCE_DBPASS: $glance_dbpass
|
||||
GLANCE_PASS: $glance_pass
|
||||
NOVA_DBPASS: $nova_dbpass
|
||||
NOVA_PASS: $nova_pass
|
||||
DASH_DBPASS: $dash_dbpass
|
||||
CINDER_DBPASS: $cinder_dbpass
|
||||
CINDER_PASS: $cinder_pass
|
||||
NEUTRON_DBPASS: $neutron_pass
|
||||
NEUTRON_PASS: $neutron_pass
|
||||
|
||||
#set neutron_service_plugins=['router']
|
||||
|
||||
#if $getVar('enable_fwaas', True)
|
||||
#neutron_service_plugins.append('firewall')
|
||||
#end if
|
||||
|
||||
#if $getVar('enable_vpnaas', True)
|
||||
#neutron_service_plugins.append('vpnaas')
|
||||
#end if
|
||||
|
||||
NEUTRON_SERVICE_PLUGINS: $neutron_service_plugins
|
||||
NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan', 'vlan']
|
||||
NEUTRON_TENANT_NETWORK_TYPES: ['$ovs_config["tenant_network_type"]']
|
||||
NEUTRON_OVS_BRIDGE_MAPPINGS: $ovs_config['bridge_mappings']
|
||||
#if 'vlan_ranges' in $ovs_config
|
||||
NEUTRON_VLAN_RANGES: $ovs_config['vlan_ranges']
|
||||
#else
|
||||
NEUTRON_VLAN_RANGES: []
|
||||
#end if
|
||||
#if 'tunnel_id_ranges' in $ovs_config
|
||||
NEUTRON_TUNNEL_ID_RANGES: $ovs_config['tunnel_id_ranges']
|
||||
#else
|
||||
NEUTRON_TUNNEL_ID_RANGES: []
|
||||
#end if
|
||||
|
||||
#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
|
||||
NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
|
||||
NEUTRON_TUNNEL_TYPES: ['vxlan']
|
||||
METADATA_SECRET: metadata_secret
|
||||
WSREP_SST_USER: wsrep_sst
|
||||
WSREP_SST_PASS: wsrep_sst_sercet
|
||||
|
||||
INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: "{{ internal_ip }}"
|
||||
|
||||
#build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
|
||||
build_in_image: http://192.168.121.12:9999/img/cirros-0.3.3-x86_64-disk.img
|
||||
build_in_image_name: cirros-0.3.3-x86_64-disk.img
|
||||
|
||||
physical_device: /dev/sdb
|
||||
|
||||
odl_username: admin
|
||||
odl_password: admin
|
||||
odl_api_port: 8080
|
||||
|
||||
odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz
|
||||
odl_pkg_name: karaf.tar.gz
|
||||
odl_home: "/opt/opendaylight-0.2.2/"
|
||||
odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'http', 'odl-base-all','odl-aaa-authn','odl-restconf','odl-nsf-all','odl-adsal-northbound','odl-mdsal-apidocs', 'odl-openflowplugin-all']
|
||||
odl_extra_features: ['odl-l2switch-switch', 'odl-ovsdb-plugin', 'odl-ovsdb-openstack', 'odl-ovsdb-northbound','odl-dlux-core', 'odl-restconf-all', 'odl-mdsal-clustering', 'odl-openflowplugin-flow-services', 'odl-netconf-connector', 'odl-netconf-connector-ssh', 'jolokia-osgi']
|
||||
odl_features: "{{ odl_base_features + odl_extra_features }}"
|
||||
odl_api_port: 8080
|
@ -44,8 +44,8 @@ dashboard_host: "{{ controller_host }}"
|
||||
cluster_name: $cluster_name
|
||||
odl_controller: 10.1.0.15
|
||||
|
||||
DEBUG: False
|
||||
VERBOSE: False
|
||||
DEBUG: true
|
||||
VERBOSE: true
|
||||
NTP_SERVER_LOCAL: "{{ controller_host }}"
|
||||
DB_HOST: "{{ controller_host }}"
|
||||
MQ_BROKER: rabbitmq
|
||||
@ -55,6 +55,7 @@ juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-
|
||||
ADMIN_TOKEN: admin
|
||||
CEILOMETER_TOKEN: c095d479023a0fd58a54
|
||||
|
||||
RABBIT_USER: $rabbit_username
|
||||
RABBIT_PASS: $rabbit_password
|
||||
KEYSTONE_DBPASS: $keystone_dbpass
|
||||
DEMO_PASS: demo_secret
|
||||
@ -88,6 +89,7 @@ physical_device: /dev/sdb
|
||||
|
||||
internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
|
||||
internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
|
||||
HA_VIP: "{{ internal_ip }}"
|
||||
|
||||
odl_username: admin
|
||||
odl_password: admin
|
||||
|
@ -105,8 +105,8 @@ cluster_name: $cluster_name
|
||||
|
||||
odl_controller: 10.1.0.15
|
||||
|
||||
DEBUG: False
|
||||
VERBOSE: False
|
||||
DEBUG: true
|
||||
VERBOSE: true
|
||||
NTP_SERVER_LOCAL: "{{ compute_controller_host }}"
|
||||
DB_HOST: "{{ db_host }}"
|
||||
MQ_BROKER: rabbitmq
|
||||
@ -116,6 +116,7 @@ juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-
|
||||
ADMIN_TOKEN: admin
|
||||
CEILOMETER_TOKEN: c095d479023a0fd58a54
|
||||
|
||||
RABBIT_USER: $rabbit_username
|
||||
RABBIT_PASS: $rabbit_password
|
||||
KEYSTONE_DBPASS: $keystone_dbpass
|
||||
DEMO_PASS: demo_secret
|
||||
@ -149,7 +150,16 @@ physical_device: /dev/sdb
|
||||
|
||||
internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
|
||||
internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
|
||||
|
||||
HA_VIP: "{{ internal_ip }}"
|
||||
odl_username: admin
|
||||
odl_password: admin
|
||||
odl_api_port: 8080
|
||||
|
||||
odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz
|
||||
odl_pkg_name: karaf.tar.gz
|
||||
odl_home: "/opt/opendaylight-0.2.2/"
|
||||
odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'http', 'odl-base-all','odl-aaa-authn','odl-restconf','odl-nsf-all','odl-adsal-northbound','odl-mdsal-apidocs', 'odl-openflowplugin-all']
|
||||
odl_extra_features: ['odl-l2switch-switch', 'odl-ovsdb-plugin', 'odl-ovsdb-openstack', 'odl-ovsdb-northbound','odl-dlux-core', 'odl-restconf-all', 'odl-mdsal-clustering', 'odl-openflowplugin-flow-services', 'odl-netconf-connector', 'odl-netconf-connector-ssh', 'jolokia-osgi']
|
||||
odl_features: "{{ odl_base_features + odl_extra_features }}"
|
||||
odl_api_port: 8080
|
||||
|
||||
|
@ -45,7 +45,7 @@ INTERNAL_INTERFACE: $network_internal_nic
|
||||
#set neutron_pass = $console_credentials.network.password
|
||||
|
||||
cluster_name: $cluster_name
|
||||
|
||||
deploy_type: $getVar('deploy_type', 'virtual')
|
||||
compute_controller_host: "{{ controller_host }}"
|
||||
db_host: "{{ controller_host }}"
|
||||
rabbit_host: "{{ controller_host }}"
|
||||
@ -56,8 +56,8 @@ network_server_host: "{{ controller_host }}"
|
||||
dashboard_host: "{{ controller_host }}"
|
||||
odl_controller: 10.1.0.15
|
||||
|
||||
DEBUG: False
|
||||
VERBOSE: False
|
||||
DEBUG: true
|
||||
VERBOSE: true
|
||||
NTP_SERVER_LOCAL: "{{ controller_host }}"
|
||||
DB_HOST: "{{ controller_host }}"
|
||||
MQ_BROKER: rabbitmq
|
||||
@ -67,6 +67,7 @@ juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-
|
||||
ADMIN_TOKEN: admin
|
||||
CEILOMETER_TOKEN: c095d479023a0fd58a54
|
||||
|
||||
RABBIT_USER: $rabbit_username
|
||||
RABBIT_PASS: $rabbit_password
|
||||
KEYSTONE_DBPASS: $keystone_dbpass
|
||||
DEMO_PASS: demo_secret
|
||||
@ -101,7 +102,7 @@ physical_device: /dev/sdb
|
||||
|
||||
internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
|
||||
internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
|
||||
|
||||
HA_VIP: "{{ internal_ip }}"
|
||||
odl_username: admin
|
||||
odl_password: admin
|
||||
odl_api_port: 8080
|
||||
|
@ -0,0 +1,7 @@
|
||||
#set cluster_name = $getVar('name', '')
|
||||
[defaults]
|
||||
log_path = /var/ansible/run/openstack_kilo-$cluster_name/ansible.log
|
||||
host_key_checking = False
|
||||
callback_plugins = /opt/compass/bin/ansible_callbacks
|
||||
pipelining=True
|
||||
library = /opt/openstack-ansible-modules
|
@ -0,0 +1,6 @@
|
||||
#set cluster_name = $getVar('name', '')
|
||||
[defaults]
|
||||
log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
|
||||
host_key_checking = False
|
||||
callback_plugins = /opt/compass/bin/ansible_callbacks
|
||||
pipelining=True
|
@ -0,0 +1,6 @@
|
||||
#set cluster_name = $getVar('name', '')
|
||||
[defaults]
|
||||
log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
|
||||
host_key_checking = False
|
||||
callback_plugins = /opt/compass/bin/ansible_callbacks
|
||||
pipelining=True
|
@ -0,0 +1,6 @@
|
||||
#set cluster_name = $getVar('name', '')
|
||||
[defaults]
|
||||
log_path = /var/ansible/run/openstack_juno-$cluster_name/ansible.log
|
||||
host_key_checking = False
|
||||
callback_plugins = /opt/compass/bin/ansible_callbacks
|
||||
pipelining=True
|
@ -0,0 +1,22 @@
|
||||
# localhost
|
||||
127.0.0.1 localhost
|
||||
#set controllers = $getVar('controller', [])
|
||||
#set computes = $getVar('compute', [])
|
||||
#if not $isinstance($controllers, list)
|
||||
#set controllers = [$controllers]
|
||||
#end if
|
||||
#if not $isinstance($compute, list)
|
||||
#set computes = [$computes]
|
||||
#end if
|
||||
# controller
|
||||
#for worker in $controllers
|
||||
#set worker_ip = $worker.install.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# compute
|
||||
#for worker in $computes
|
||||
#set worker_ip = $worker.install.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
@ -0,0 +1,10 @@
|
||||
#set controllers = $getVar('allinone_compute', [])
|
||||
#if not $isinstance($controllers, list)
|
||||
#set controllers = [$controllers]
|
||||
#end if
|
||||
# allinone
|
||||
#for controller in $controllers
|
||||
#set controller_ip = $controller.management.ip
|
||||
#set controller_hostname = $controller.hostname
|
||||
$controller_ip $controller_hostname
|
||||
#end for
|
@ -0,0 +1,110 @@
|
||||
#set compute_controllers = $getVar('compute_controller', [])
|
||||
#set compute_workers = $getVar('compute_worker', [])
|
||||
#set network_servers = $getVar('network_server', [])
|
||||
#set network_workers = $getVar('network_worker', [])
|
||||
#set databases = $getVar('database', [])
|
||||
#set messagings = $getVar('messaging', [])
|
||||
#set images = $getVar('image', [])
|
||||
#set dashboards = $getVar('dashboard', [])
|
||||
#set identities = $getVar('identity', [])
|
||||
#set storage_controllers = $getVar('storage_controller', [])
|
||||
#set storage_volumes = $getVar('storage_volume', [])
|
||||
#if not $isinstance($compute_controllers, list)
|
||||
#set compute_controllers = [$compute_controllers]
|
||||
#end if
|
||||
#if not $isinstance($compute_workers, list)
|
||||
#set compute_workers = [$compute_workers]
|
||||
#end if
|
||||
#if not $isinstance($network_servers, list)
|
||||
#set network_servers = [$network_servers]
|
||||
#end if
|
||||
#if not $isinstance($network_workers, list)
|
||||
#set network_workers = [$network_workers]
|
||||
#end if
|
||||
#if not $isinstance($databases, list)
|
||||
#set databases = [$databases]
|
||||
#end if
|
||||
#if not $isinstance($messagings, list)
|
||||
#set messagings = [$messagings]
|
||||
#end if
|
||||
#if not $isinstance($images, list)
|
||||
#set images = [$images]
|
||||
#end if
|
||||
#if not $isinstance($dashboards, list)
|
||||
#set dashboards = [$dashboards]
|
||||
#end if
|
||||
#if not $isinstance($identities, list)
|
||||
#set identities = [$identities]
|
||||
#end if
|
||||
#if not $isinstance($storage_controllers, list)
|
||||
#set storage_controllers = [$storage_controllers]
|
||||
#end if
|
||||
#if not $isinstance($storage_volumes, list)
|
||||
#set storage_volumes = [$storage_volumes]
|
||||
#end if
|
||||
# compute-controller
|
||||
#for worker in $compute_controllers
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# database
|
||||
#for worker in $databases
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# messaging
|
||||
#for worker in $messagings
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# storage-controller
|
||||
#for worker in $storage_controllers
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# image
|
||||
#for worker in $images
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# identity
|
||||
#for worker in $identities
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# network-server
|
||||
#for worker in $network_servers
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# dashboard
|
||||
#for worker in $dashboards
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# storage-volume
|
||||
#for worker in $storage_volumes
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# network-worker
|
||||
#for worker in $network_workers
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# compute-worker
|
||||
#for worker in $compute_workers
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
@ -0,0 +1,40 @@
|
||||
#set controllers = $getVar('controller', [])
|
||||
#set computes = $getVar('compute', [])
|
||||
#set storages = $getVar('storage', [])
|
||||
#set networks = $getVar('network', [])
|
||||
#if not $isinstance($controllers, list)
|
||||
#set controllers = [$controllers]
|
||||
#end if
|
||||
#if not $isinstance($computes, list)
|
||||
#set computes = [$computes]
|
||||
#end if
|
||||
#if not $isinstance($storages, list)
|
||||
#set storages = [$storages]
|
||||
#end if
|
||||
#if not $isinstance($networks, list)
|
||||
#set networks = [$networks]
|
||||
#end if
|
||||
# controller
|
||||
#for controller in $controllers
|
||||
#set controller_ip = $controller.management.ip
|
||||
#set controller_hostname = $controller.hostname
|
||||
$controller_ip $controller_hostname
|
||||
#end for
|
||||
# compute
|
||||
#for worker in $computes
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# storage
|
||||
#for worker in $storages
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
||||
# network
|
||||
#for worker in $networks
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_ip $worker_hostname
|
||||
#end for
|
@ -0,0 +1,63 @@
|
||||
#set controllers = $getVar('controller', [])
|
||||
#set computes = $getVar('compute', [])
|
||||
#set has = $getVar('ha', [])
|
||||
#set odls = $getVar('odl', [])
|
||||
#set onoss = $getVar('onos', [])
|
||||
#set cephs = $getVar('ceph',[])
|
||||
#if not $isinstance($controllers, list)
|
||||
#set controllers = [$controllers]
|
||||
#end if
|
||||
#if not $isinstance($computes, list)
|
||||
#set computes = [$computes]
|
||||
#end if
|
||||
#if not $isinstance(has, list)
|
||||
#set has = [has]
|
||||
#end if
|
||||
#if not $isinstance(odls, list)
|
||||
#set odls = [odls]
|
||||
#end if
|
||||
#if not $isinstance(onoss, list)
|
||||
#set onoss = [onoss]
|
||||
#end if
|
||||
#if not $isinstance(cephs, list)
|
||||
#set cephs = [cephs]
|
||||
#end if
|
||||
#set credentials = $getVar('server_credentials', {})
|
||||
#set username = $credentials.get('username', 'root')
|
||||
#set password = $credentials.get('password', 'root')
|
||||
[controller]
|
||||
#for controller in $controllers
|
||||
#set controller_ip = $controller.install.ip
|
||||
#set controller_hostname = $controller.hostname
|
||||
$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[compute]
|
||||
#for compute in $computes
|
||||
#set compute_ip = $compute.install.ip
|
||||
#set compute_hostname = $compute.hostname
|
||||
$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[ha]
|
||||
#for ha in $has
|
||||
#set ha_ip = $ha.install.ip
|
||||
#set ha_hostname = $ha.hostname
|
||||
$ha_hostname ansible_ssh_host=$ha_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[odl]
|
||||
#for odl in $odls
|
||||
#set odl_ip = $odl.install.ip
|
||||
#set odl_hostname = $odl.hostname
|
||||
$odl_hostname ansible_ssh_host=$odl_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[onos]
|
||||
#for onos in $onoss
|
||||
#set onos_ip = $onos.install.ip
|
||||
#set onos_hostname = $onos.hostname
|
||||
$onos_hostname ansible_ssh_host=$onos_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[ceph]
|
||||
#for ceph in $cephs
|
||||
#set ceph_ip = $ceph.install.ip
|
||||
#set ceph_hostname = $ceph.hostname
|
||||
$ceph_hostname ansible_ssh_host=$ceph_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
@ -0,0 +1,47 @@
|
||||
#set controllers = $getVar('allinone_compute', [])
|
||||
#set computes = $getVar('allinone_compute', [])
|
||||
#set storages = $getVar('allinone_compute', [])
|
||||
#set networks = $getVar('allinone_compute', [])
|
||||
#if not $isinstance($controllers, list)
|
||||
#set controllers = [$controllers]
|
||||
#end if
|
||||
#if not $isinstance($computes, list)
|
||||
#set computes = [$computes]
|
||||
#end if
|
||||
#if not $isinstance($storages, list)
|
||||
#set storages = [$storages]
|
||||
#end if
|
||||
#if not $isinstance($networks, list)
|
||||
#set networks = [$networks]
|
||||
#end if
|
||||
|
||||
#set credentials = $getVar('server_credentials', {})
|
||||
#set username = $credentials.get('username', 'root')
|
||||
#set password = $credentials.get('password', 'root')
|
||||
[controller]
|
||||
#for controller in $controllers
|
||||
#set controller_ip = $controller.management.ip
|
||||
#set controller_hostname = $controller.hostname
|
||||
$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[compute]
|
||||
#for compute in $computes
|
||||
#set compute_ip = $compute.management.ip
|
||||
#set compute_hostname = $compute.hostname
|
||||
$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[network]
|
||||
#for network in $networks
|
||||
#set network_ip = $network.management.ip
|
||||
#set network_hostname = $network.hostname
|
||||
$network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[storage]
|
||||
#for storage in storages
|
||||
#set storage_ip = $storage.management.ip
|
||||
#set storage_hostname = $storage.hostname
|
||||
$storage_hostname ansible_ssh_host=$storage_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
@ -0,0 +1,123 @@
|
||||
#set compute_controllers = $getVar('compute_controller', [])
|
||||
#set compute_workers = $getVar('compute_worker', [])
|
||||
#set network_servers = $getVar('network_server', [])
|
||||
#set network_workers = $getVar('network_worker', [])
|
||||
#set databases = $getVar('database', [])
|
||||
#set messagings = $getVar('messaging', [])
|
||||
#set images = $getVar('image', [])
|
||||
#set dashboards = $getVar('dashboard', [])
|
||||
#set identities = $getVar('identity', [])
|
||||
#set storage_controllers = $getVar('storage_controller', [])
|
||||
#set storage_volumes = $getVar('storage_volume', [])
|
||||
#if not $isinstance($compute_controllers, list)
|
||||
#set compute_controllers = [$compute_controllers]
|
||||
#end if
|
||||
#if not $isinstance($compute_workers, list)
|
||||
#set compute_workers = [$compute_workers]
|
||||
#end if
|
||||
#if not $isinstance($network_servers, list)
|
||||
#set network_servers = [$network_servers]
|
||||
#end if
|
||||
#if not $isinstance($network_workers, list)
|
||||
#set network_workers = [$network_workers]
|
||||
#end if
|
||||
#if not $isinstance($databases, list)
|
||||
#set databases = [$databases]
|
||||
#end if
|
||||
#if not $isinstance($messagings, list)
|
||||
#set messagings = [$messagings]
|
||||
#end if
|
||||
#if not $isinstance($images, list)
|
||||
#set images = [$images]
|
||||
#end if
|
||||
#if not $isinstance($dashboards, list)
|
||||
#set dashboards = [$dashboards]
|
||||
#end if
|
||||
#if not $isinstance($identities, list)
|
||||
#set identities = [$identities]
|
||||
#end if
|
||||
#if not $isinstance($storage_controllers, list)
|
||||
#set storage_controllers = [$storage_controllers]
|
||||
#end if
|
||||
#if not $isinstance($storage_volumes, list)
|
||||
#set storage_volumes = [$storage_volumes]
|
||||
#end if
|
||||
#set credentials = $getVar('server_credentials', {})
|
||||
#set username = $credentials.get('username', 'root')
|
||||
#set password = $credentials.get('password', 'root')
|
||||
[compute-controller]
|
||||
#for controller in $compute_controllers
|
||||
#set controller_ip = $controller.management.ip
|
||||
#set controller_hostname = $controller.hostname
|
||||
$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[compute-worker]
|
||||
#for compute in $compute_workers
|
||||
#set compute_ip = $compute.management.ip
|
||||
#set compute_hostname = $compute.hostname
|
||||
$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[network-server]
|
||||
#for network in $network_servers
|
||||
#set network_ip = $network.management.ip
|
||||
#set network_hostname = $network.hostname
|
||||
$network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[network-worker]
|
||||
#for network in $network_workers
|
||||
#set network_ip = $network.management.ip
|
||||
#set network_hostname = $network.hostname
|
||||
$network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[database]
|
||||
#for worker in $databases
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[messaging]
|
||||
#for worker in $messagings
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[image]
|
||||
#for worker in $images
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[dashboard]
|
||||
#for worker in $dashboards
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[identity]
|
||||
#for worker in $identities
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[storage-controller]
|
||||
#for worker in $storage_controllers
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[storage-volume]
|
||||
#for worker in $storage_volumes
|
||||
#set worker_ip = $worker.management.ip
|
||||
#set worker_hostname = $worker.hostname
|
||||
$worker_hostname ansible_ssh_host=$worker_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
@ -0,0 +1,67 @@
|
||||
#set controllers = $getVar('controller', [])
|
||||
#set computes = $getVar('compute', [])
|
||||
#set storages = $getVar('storage', [])
|
||||
#set networks = $getVar('network', [])
|
||||
#set odls = $getVar('odl', [])
|
||||
#set onoss = $getVar('onos', [])
|
||||
#if not $isinstance($controllers, list)
|
||||
#set controllers = [$controllers]
|
||||
#end if
|
||||
#if not $isinstance($computes, list)
|
||||
#set computes = [$computes]
|
||||
#end if
|
||||
#if not $isinstance($storages, list)
|
||||
#set storages = [$storages]
|
||||
#end if
|
||||
#if not $isinstance($networks, list)
|
||||
#set networks = [$networks]
|
||||
#end if
|
||||
#if not $isinstance($odls, list)
|
||||
#set odls = [$odls]
|
||||
#end if
|
||||
#if not $isinstance($onoss, list)
|
||||
#set onoss = [$onoss]
|
||||
#end if
|
||||
|
||||
#set credentials = $getVar('server_credentials', {})
|
||||
#set username = $credentials.get('username', 'root')
|
||||
#set password = $credentials.get('password', 'root')
|
||||
[controller]
|
||||
#for controller in $controllers
|
||||
#set controller_ip = $controller.management.ip
|
||||
#set controller_hostname = $controller.hostname
|
||||
$controller_hostname ansible_ssh_host=$controller_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[compute]
|
||||
#for compute in $computes
|
||||
#set compute_ip = $compute.management.ip
|
||||
#set compute_hostname = $compute.hostname
|
||||
$compute_hostname ansible_ssh_host=$compute_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[network]
|
||||
#for network in $networks
|
||||
#set network_ip = $network.management.ip
|
||||
#set network_hostname = $network.hostname
|
||||
$network_hostname ansible_ssh_host=$network_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
|
||||
[storage]
|
||||
#for storage in storages
|
||||
#set storage_ip = $storage.management.ip
|
||||
#set storage_hostname = $storage.hostname
|
||||
$storage_hostname ansible_ssh_host=$storage_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[odl]
|
||||
#for odl in odls
|
||||
#set odl_ip = $odl.management.ip
|
||||
#set odl_hostname = $odl.hostname
|
||||
$odl_hostname ansible_ssh_host=$odl_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
||||
[storage]
|
||||
#for storage in storages
|
||||
#set storage_ip = $storage.management.ip
|
||||
#set storage_hostname = $storage.hostname
|
||||
$storage_hostname ansible_ssh_host=$storage_ip ansible_ssh_user=$username ansible_ssh_password=$password
|
||||
#end for
|
@ -0,0 +1,173 @@
|
||||
#from random import randint
|
||||
#set cluster_name = $getVar('name', '')
|
||||
#set network_cfg = $getVar('network_cfg', {})
|
||||
#set ntp_server = $getVar('ntp_server', "")
|
||||
#set ceph_disk = $getVar('ceph_disk',"")
|
||||
#set $sys_intf_mappings= {}
|
||||
#for $intf_info in $network_cfg.sys_intf_mappings
|
||||
#set $sys_intf_mappings[$intf_info["name"]] = $intf_info
|
||||
#end for
|
||||
|
||||
#set ip_settings={}
|
||||
#for k,v in $getVar('ip_settings', {}).items()
|
||||
#set host_ip_settings={}
|
||||
#for intf in v
|
||||
#set $host_ip_settings[$intf["alias"]]=intf
|
||||
#end for
|
||||
#set $ip_settings[$k]=$host_ip_settings
|
||||
#end for
|
||||
|
||||
#set neutron_cfg = $getVar('neutron_config', {})
|
||||
#set ovs_config = $neutron_cfg.openvswitch
|
||||
|
||||
#set has = $getVar('ha', [])
|
||||
#set ha_vip = $getVar('ha_vip', [])
|
||||
|
||||
#set controllers = $getVar('controller', [])
|
||||
#set computers = $getVar('compute', [])
|
||||
|
||||
enable_secgroup: $getVar('enable_secgroup', True)
|
||||
enable_fwaas: $getVar('enable_fwaas', True)
|
||||
enable_vpnaas: $getVar('enable_vpnaas', True)
|
||||
ip_settings: $ip_settings
|
||||
network_cfg: $network_cfg
|
||||
sys_intf_mappings: $sys_intf_mappings
|
||||
deploy_type: $getVar('deploy_type', 'virtual')
|
||||
|
||||
public_net_info: "{{ network_cfg.public_net_info }}"
|
||||
host_ip_settings: "{{ ip_settings[inventory_hostname] }}"
|
||||
|
||||
ntp_server: $ntp_server
|
||||
internal_vip:
|
||||
ip: $network_cfg["internal_vip"]["ip"]
|
||||
netmask: $network_cfg["internal_vip"]["netmask"]
|
||||
#if "vlan_tag" in $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]
|
||||
interface: $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]["name"]
|
||||
#else
|
||||
interface: $sys_intf_mappings[$network_cfg["internal_vip"]["interface"]]["interface"]
|
||||
#end if
|
||||
|
||||
public_vip:
|
||||
ip: $network_cfg["public_vip"]["ip"]
|
||||
netmask: $network_cfg["public_vip"]["netmask"]
|
||||
#if "vlan_tag" in $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]
|
||||
interface: $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]["name"]
|
||||
#else
|
||||
interface: $sys_intf_mappings[$network_cfg["public_vip"]["interface"]]["interface"]
|
||||
#end if
|
||||
|
||||
db_host: "{{ internal_vip.ip }}"
|
||||
rabbit_host: "{{ internal_vip.ip }}"
|
||||
|
||||
internal_ip: "{{ ip_settings[inventory_hostname]['mgmt']['ip'] }}"
|
||||
internal_nic: mgmt
|
||||
|
||||
#set random_id = randint(1, 255)
|
||||
vrouter_id_internal: $random_id
|
||||
vrouter_id_public: $random_id
|
||||
|
||||
identity_host: "{{ internal_ip }}"
|
||||
controllers_host: "{{ internal_ip }}"
|
||||
storage_controller_host: "{{ internal_ip }}"
|
||||
compute_controller_host: "{{ internal_ip }}"
|
||||
image_host: "{{ internal_ip }}"
|
||||
network_server_host: "{{ internal_ip }}"
|
||||
dashboard_host: "{{ internal_ip }}"
|
||||
|
||||
haproxy_hosts:
|
||||
#for $item in $has
|
||||
#set $hostname=$item["hostname"]
|
||||
$hostname: $ip_settings[$hostname]["mgmt"]["ip"]
|
||||
#end for
|
||||
|
||||
ERLANG_TOKEN: YOWSJSJIGGAUFZTIBRAD
|
||||
#set credentials = $getVar('service_credentials', {})
|
||||
#set console_credentials = $getVar('console_credentials', {})
|
||||
#set rabbit_username = $credentials.rabbitmq.username
|
||||
#set rabbit_password = $credentials.rabbitmq.password
|
||||
#set rabbit_username = $credentials.rabbitmq.username
|
||||
#set rabbit_password = $credentials.rabbitmq.password
|
||||
#set keystone_dbpass = $credentials.identity.password
|
||||
#set glance_dbpass = $credentials.image.password
|
||||
#set glance_pass = $console_credentials.image.password
|
||||
#set nova_dbpass = $credentials.compute.password
|
||||
#set nova_pass = $console_credentials.compute.password
|
||||
#set dash_dbpass = $credentials.dashboard.password
|
||||
#set cinder_dbpass = $credentials.volume.password
|
||||
#set cinder_pass = $console_credentials.volume.password
|
||||
#set admin_pass = $console_credentials.admin.password
|
||||
#set neutron_pass = $console_credentials.network.password
|
||||
|
||||
cluster_name: $cluster_name
|
||||
|
||||
odl_controller: 10.1.0.15
|
||||
|
||||
DEBUG: true
|
||||
VERBOSE: true
|
||||
NTP_SERVER_LOCAL: "{{ controllers_host }}"
|
||||
DB_HOST: "{{ db_host }}"
|
||||
MQ_BROKER: rabbitmq
|
||||
|
||||
OPENSTACK_REPO: cloudarchive-juno.list
|
||||
juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
|
||||
ADMIN_TOKEN: admin
|
||||
CEILOMETER_TOKEN: c095d479023a0fd58a54
|
||||
erlang.cookie: DJJVECFMCJPVYQTJTDWG
|
||||
|
||||
RABBIT_USER: $rabbit_username
|
||||
RABBIT_PASS: $rabbit_password
|
||||
KEYSTONE_DBPASS: $keystone_dbpass
|
||||
CEILOMETER_DBPASS: service
|
||||
CEILOMETER_PASS: console
|
||||
DEMO_PASS: demo_secret
|
||||
ADMIN_PASS: $admin_pass
|
||||
GLANCE_DBPASS: $glance_dbpass
|
||||
GLANCE_PASS: $glance_pass
|
||||
NOVA_DBPASS: $nova_dbpass
|
||||
NOVA_PASS: $nova_pass
|
||||
DASH_DBPASS: $dash_dbpass
|
||||
CINDER_DBPASS: $cinder_dbpass
|
||||
CINDER_PASS: $cinder_pass
|
||||
NEUTRON_DBPASS: $neutron_pass
|
||||
NEUTRON_PASS: $neutron_pass
|
||||
|
||||
NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan', 'vlan']
|
||||
NEUTRON_TENANT_NETWORK_TYPES: ['$ovs_config["tenant_network_type"]']
|
||||
NEUTRON_OVS_BRIDGE_MAPPINGS: $ovs_config['bridge_mappings']
|
||||
#if 'vlan_ranges' in $ovs_config
|
||||
NEUTRON_VLAN_RANGES: $ovs_config['vlan_ranges']
|
||||
#else
|
||||
NEUTRON_VLAN_RANGES: []
|
||||
#end if
|
||||
#if 'tunnel_id_ranges' in $ovs_config
|
||||
NEUTRON_TUNNEL_ID_RANGES: $ovs_config['tunnel_id_ranges']
|
||||
#else
|
||||
NEUTRON_TUNNEL_ID_RANGES: []
|
||||
#end if
|
||||
|
||||
#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
|
||||
NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
|
||||
NEUTRON_TUNNEL_TYPES: ['vxlan']
|
||||
METADATA_SECRET: metadata_secret
|
||||
WSREP_SST_USER: wsrep_sst
|
||||
WSREP_SST_PASS: wsrep_sst_sercet
|
||||
|
||||
INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: "{{ internal_ip }}"
|
||||
|
||||
#build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
|
||||
build_in_image: http://192.168.121.12:9999/img/cirros-0.3.3-x86_64-disk.img
|
||||
build_in_image_name: cirros-0.3.3-x86_64-disk.img
|
||||
|
||||
physical_device: /dev/sdb
|
||||
|
||||
odl_username: admin
|
||||
odl_password: admin
|
||||
odl_api_port: 8080
|
||||
|
||||
odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz
|
||||
odl_pkg_name: karaf.tar.gz
|
||||
odl_home: "/opt/opendaylight-0.2.2/"
|
||||
odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'http', 'odl-base-all','odl-aaa-authn','odl-restconf','odl-nsf-all','odl-adsal-northbound','odl-mdsal-apidocs', 'odl-openflowplugin-all']
|
||||
odl_extra_features: ['odl-l2switch-switch', 'odl-ovsdb-plugin', 'odl-ovsdb-openstack', 'odl-ovsdb-northbound','odl-dlux-core', 'odl-restconf-all', 'odl-mdsal-clustering', 'odl-openflowplugin-flow-services', 'odl-netconf-connector', 'odl-netconf-connector-ssh', 'jolokia-osgi']
|
||||
odl_features: "{{ odl_base_features + odl_extra_features }}"
|
||||
odl_api_port: 8080
|
@ -0,0 +1,96 @@
|
||||
#set cluster_name = $getVar('name', '')
|
||||
#set controllers = $getVar('allinone_compute', [])
|
||||
#if not $isinstance($controllers, list)
|
||||
#set controllers = [$controllers]
|
||||
#end if
|
||||
|
||||
#for controller in $controllers
|
||||
#set controller_ip = $controller.management.ip
|
||||
#set controller_hostname = $controller.hostname
|
||||
controller_host: $controller_ip
|
||||
#end for
|
||||
#for network in $controllers
|
||||
#set network_external_nic = $network.external.interface
|
||||
#set network_external_subnet = $network.external.subnet
|
||||
#set network_internal_nic = $network.management.interface
|
||||
INTERFACE_NAME: $network_external_nic
|
||||
INTERNAL_INTERFACE: $network_internal_nic
|
||||
#end for
|
||||
|
||||
#set credentials = $getVar('service_credentials', {})
|
||||
#set console_credentials = $getVar('console_credentials', {})
|
||||
#set rabbit_username = $credentials.rabbitmq.username
|
||||
#set rabbit_password = $credentials.rabbitmq.password
|
||||
#set keystone_dbpass = $credentials.identity.password
|
||||
#set glance_dbpass = $credentials.image.password
|
||||
#set glance_pass = $console_credentials.image.password
|
||||
#set nova_dbpass = $credentials.compute.password
|
||||
#set nova_pass = $console_credentials.compute.password
|
||||
#set dash_dbpass = $credentials.dashboard.password
|
||||
#set cinder_dbpass = $credentials.volume.password
|
||||
#set cinder_pass = $console_credentials.volume.password
|
||||
#set admin_pass = $console_credentials.admin.password
|
||||
#set neutron_pass = $console_credentials.network.password
|
||||
|
||||
compute_controller_host: "{{ controller_host }}"
|
||||
db_host: "{{ controller_host }}"
|
||||
rabbit_host: "{{ controller_host }}"
|
||||
storage_controller_host: "{{ controller_host }}"
|
||||
image_host: "{{ controller_host }}"
|
||||
identity_host: "{{ controller_host }}"
|
||||
network_server_host: "{{ controller_host }}"
|
||||
dashboard_host: "{{ controller_host }}"
|
||||
|
||||
cluster_name: $cluster_name
|
||||
odl_controller: 10.1.0.15
|
||||
|
||||
DEBUG: true
|
||||
VERBOSE: true
|
||||
NTP_SERVER_LOCAL: "{{ controller_host }}"
|
||||
DB_HOST: "{{ controller_host }}"
|
||||
MQ_BROKER: rabbitmq
|
||||
|
||||
OPENSTACK_REPO: cloudarchive-juno.list
|
||||
juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
|
||||
ADMIN_TOKEN: admin
|
||||
CEILOMETER_TOKEN: c095d479023a0fd58a54
|
||||
|
||||
RABBIT_USER: $rabbit_username
|
||||
RABBIT_PASS: $rabbit_password
|
||||
KEYSTONE_DBPASS: $keystone_dbpass
|
||||
DEMO_PASS: demo_secret
|
||||
ADMIN_PASS: $admin_pass
|
||||
GLANCE_DBPASS: $glance_dbpass
|
||||
GLANCE_PASS: $glance_pass
|
||||
NOVA_DBPASS: $nova_dbpass
|
||||
NOVA_PASS: $nova_pass
|
||||
DASH_DBPASS: $dash_dbpass
|
||||
CINDER_DBPASS: $cinder_dbpass
|
||||
CINDER_PASS: $cinder_pass
|
||||
NEUTRON_DBPASS: $neutron_pass
|
||||
NEUTRON_PASS: $neutron_pass
|
||||
NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
|
||||
NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
|
||||
#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
|
||||
NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
|
||||
NEUTRON_TUNNEL_TYPES: ['vxlan']
|
||||
METADATA_SECRET: metadata_secret
|
||||
INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
|
||||
|
||||
EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
|
||||
EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
|
||||
FLOATING_IP_START: 203.0.113.101
|
||||
FLOATING_IP_END: 203.0.113.200
|
||||
|
||||
build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
|
||||
build_in_image_name: cirros-0.3.3-x86_64-disk.img
|
||||
|
||||
physical_device: /dev/sdb
|
||||
|
||||
internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
|
||||
internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
|
||||
HA_VIP: "{{ internal_ip }}"
|
||||
|
||||
odl_username: admin
|
||||
odl_password: admin
|
||||
odl_api_port: 8080
|
@ -0,0 +1,165 @@
|
||||
#set cluster_name = $getVar('name', '')
|
||||
#set compute_controllers = $getVar('compute_controller', [])
|
||||
#set compute_workers = $getVar('compute_worker', [])
|
||||
#set network_servers = $getVar('network_server', [])
|
||||
#set network_workers = $getVar('network_worker', [])
|
||||
#set databases = $getVar('database', [])
|
||||
#set messagings = $getVar('messaging', [])
|
||||
#set images = $getVar('image', [])
|
||||
#set dashboards = $getVar('dashboard', [])
|
||||
#set identities = $getVar('identity', [])
|
||||
#set storage_controllers = $getVar('storage_controller', [])
|
||||
#set storage_volumes = $getVar('storage_volume', [])
|
||||
#if not $isinstance($compute_controllers, list)
|
||||
#set compute_controllers = [$compute_controllers]
|
||||
#end if
|
||||
#if not $isinstance($compute_workers, list)
|
||||
#set compute_workers = [$compute_workers]
|
||||
#end if
|
||||
#if not $isinstance($network_servers, list)
|
||||
#set network_servers = [$network_servers]
|
||||
#end if
|
||||
#if not $isinstance($network_workers, list)
|
||||
#set network_workers = [$network_workers]
|
||||
#end if
|
||||
#if not $isinstance($databases, list)
|
||||
#set databases = [$databases]
|
||||
#end if
|
||||
#if not $isinstance($messagings, list)
|
||||
#set messagings = [$messagings]
|
||||
#end if
|
||||
#if not $isinstance($images, list)
|
||||
#set images = [$images]
|
||||
#end if
|
||||
#if not $isinstance($dashboards, list)
|
||||
#set dashboards = [$dashboards]
|
||||
#end if
|
||||
#if not $isinstance($identities, list)
|
||||
#set identities = [$identities]
|
||||
#end if
|
||||
#if not $isinstance($storage_controllers, list)
|
||||
#set storage_controllers = [$storage_controllers]
|
||||
#end if
|
||||
#if not $isinstance($storage_volumes, list)
|
||||
#set storage_volumes = [$storage_volumes]
|
||||
#end if
|
||||
#for worker in $compute_controllers
|
||||
#set worker_ip = $worker.management.ip
|
||||
compute_controller_host: $worker_ip
|
||||
#end for
|
||||
#for worker in $databases
|
||||
#set worker_ip = $worker.management.ip
|
||||
db_host: $worker_ip
|
||||
#end for
|
||||
#for worker in $messagings
|
||||
#set worker_ip = $worker.management.ip
|
||||
rabbit_host: $worker_ip
|
||||
#end for
|
||||
#for worker in $storage_controllers
|
||||
#set worker_ip = $worker.management.ip
|
||||
storage_controller_host: $worker_ip
|
||||
#end for
|
||||
#for worker in $images
|
||||
#set worker_ip = $worker.management.ip
|
||||
image_host: $worker_ip
|
||||
#end for
|
||||
#for worker in $identities
|
||||
#set worker_ip = $worker.management.ip
|
||||
identity_host: $worker_ip
|
||||
#end for
|
||||
#for worker in $compute_controllers
|
||||
#set worker_ip = $worker.management.ip
|
||||
compute_controller_host: $worker_ip
|
||||
#end for
|
||||
#for worker in $network_servers
|
||||
#set worker_ip = $worker.management.ip
|
||||
network_server_host: $worker_ip
|
||||
#end for
|
||||
#for worker in $dashboards
|
||||
#set worker_ip = $worker.management.ip
|
||||
dashboard_host: $worker_ip
|
||||
#end for
|
||||
#for network in $network_workers
|
||||
#set network_external_nic = $network.external.interface
|
||||
#set network_internal_nic = $network.management.interface
|
||||
INTERFACE_NAME: $network_external_nic
|
||||
INTERNAL_INTERFACE: $network_internal_nic
|
||||
#end for
|
||||
|
||||
#set credentials = $getVar('service_credentials', {})
|
||||
#set console_credentials = $getVar('console_credentials', {})
|
||||
#set rabbit_username = $credentials.rabbitmq.username
|
||||
#set rabbit_password = $credentials.rabbitmq.password
|
||||
#set keystone_dbpass = $credentials.identity.password
|
||||
#set glance_dbpass = $credentials.image.password
|
||||
#set glance_pass = $console_credentials.image.password
|
||||
#set nova_dbpass = $credentials.compute.password
|
||||
#set nova_pass = $console_credentials.compute.password
|
||||
#set dash_dbpass = $credentials.dashboard.password
|
||||
#set cinder_dbpass = $credentials.volume.password
|
||||
#set cinder_pass = $console_credentials.volume.password
|
||||
#set admin_pass = $console_credentials.admin.password
|
||||
#set neutron_pass = $console_credentials.network.password
|
||||
|
||||
cluster_name: $cluster_name
|
||||
|
||||
odl_controller: 10.1.0.15
|
||||
|
||||
DEBUG: true
|
||||
VERBOSE: true
|
||||
NTP_SERVER_LOCAL: "{{ compute_controller_host }}"
|
||||
DB_HOST: "{{ db_host }}"
|
||||
MQ_BROKER: rabbitmq
|
||||
|
||||
OPENSTACK_REPO: cloudarchive-juno.list
|
||||
juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
|
||||
ADMIN_TOKEN: admin
|
||||
CEILOMETER_TOKEN: c095d479023a0fd58a54
|
||||
|
||||
RABBIT_USER: $rabbit_username
|
||||
RABBIT_PASS: $rabbit_password
|
||||
KEYSTONE_DBPASS: $keystone_dbpass
|
||||
DEMO_PASS: demo_secret
|
||||
ADMIN_PASS: $admin_pass
|
||||
GLANCE_DBPASS: $glance_dbpass
|
||||
GLANCE_PASS: $glance_pass
|
||||
NOVA_DBPASS: $nova_dbpass
|
||||
NOVA_PASS: $nova_pass
|
||||
DASH_DBPASS: $dash_dbpass
|
||||
CINDER_DBPASS: $cinder_dbpass
|
||||
CINDER_PASS: $cinder_pass
|
||||
NEUTRON_DBPASS: $neutron_pass
|
||||
NEUTRON_PASS: $neutron_pass
|
||||
NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
|
||||
NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
|
||||
#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
|
||||
NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
|
||||
NEUTRON_TUNNEL_TYPES: ['vxlan']
|
||||
METADATA_SECRET: metadata_secret
|
||||
INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
|
||||
|
||||
EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
|
||||
EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
|
||||
FLOATING_IP_START: 203.0.113.101
|
||||
FLOATING_IP_END: 203.0.113.200
|
||||
|
||||
build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
|
||||
build_in_image_name: cirros-0.3.3-x86_64-disk.img
|
||||
|
||||
physical_device: /dev/sdb
|
||||
|
||||
internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
|
||||
internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
|
||||
HA_VIP: "{{ internal_ip }}"
|
||||
odl_username: admin
|
||||
odl_password: admin
|
||||
odl_api_port: 8080
|
||||
|
||||
odl_pkg_url: https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.2-Helium-SR2/distribution-karaf-0.2.2-Helium-SR2.tar.gz
|
||||
odl_pkg_name: karaf.tar.gz
|
||||
odl_home: "/opt/opendaylight-0.2.2/"
|
||||
odl_base_features: ['config', 'standard', 'region', 'package', 'kar', 'ssh', 'management', 'http', 'odl-base-all','odl-aaa-authn','odl-restconf','odl-nsf-all','odl-adsal-northbound','odl-mdsal-apidocs', 'odl-openflowplugin-all']
|
||||
odl_extra_features: ['odl-l2switch-switch', 'odl-ovsdb-plugin', 'odl-ovsdb-openstack', 'odl-ovsdb-northbound','odl-dlux-core', 'odl-restconf-all', 'odl-mdsal-clustering', 'odl-openflowplugin-flow-services', 'odl-netconf-connector', 'odl-netconf-connector-ssh', 'jolokia-osgi']
|
||||
odl_features: "{{ odl_base_features + odl_extra_features }}"
|
||||
odl_api_port: 8080
|
||||
|
@ -0,0 +1,108 @@
|
||||
#set cluster_name = $getVar('name', '')
|
||||
#set controllers = $getVar('controller', [])
|
||||
#set computes = $getVar('compute', [])
|
||||
#set networks = $getVar('network', [])
|
||||
#set storages = $getVar('storage', [])
|
||||
#if not $isinstance($controllers, list)
|
||||
#set controllers = [$controllers]
|
||||
#end if
|
||||
#if not $isinstance($computes, list)
|
||||
#set computes = [$computes]
|
||||
#end if
|
||||
#if not $isinstance($networks, list)
|
||||
#set networks = [$networks]
|
||||
#end if
|
||||
#if not $isinstance($storages, list)
|
||||
#set storages = [$storages]
|
||||
#end if
|
||||
|
||||
#for controller in $controllers
|
||||
#set controller_ip = $controller.management.ip
|
||||
#set controller_hostname = $controller.hostname
|
||||
controller_host: $controller_ip
|
||||
#end for
|
||||
#for network in $networks
|
||||
#set network_external_nic = $network.external.interface
|
||||
#set network_external_subnet = $network.external.subnet
|
||||
#set network_internal_nic = $network.management.interface
|
||||
INTERFACE_NAME: $network_external_nic
|
||||
INTERNAL_INTERFACE: $network_internal_nic
|
||||
#end for
|
||||
|
||||
#set credentials = $getVar('service_credentials', {})
|
||||
#set console_credentials = $getVar('console_credentials', {})
|
||||
#set rabbit_username = $credentials.rabbitmq.username
|
||||
#set rabbit_password = $credentials.rabbitmq.password
|
||||
#set keystone_dbpass = $credentials.identity.password
|
||||
#set glance_dbpass = $credentials.image.password
|
||||
#set glance_pass = $console_credentials.image.password
|
||||
#set nova_dbpass = $credentials.compute.password
|
||||
#set nova_pass = $console_credentials.compute.password
|
||||
#set dash_dbpass = $credentials.dashboard.password
|
||||
#set cinder_dbpass = $credentials.volume.password
|
||||
#set cinder_pass = $console_credentials.volume.password
|
||||
#set admin_pass = $console_credentials.admin.password
|
||||
#set neutron_pass = $console_credentials.network.password
|
||||
|
||||
cluster_name: $cluster_name
|
||||
deploy_type: $getVar('deploy_type', 'virtual')
|
||||
compute_controller_host: "{{ controller_host }}"
|
||||
db_host: "{{ controller_host }}"
|
||||
rabbit_host: "{{ controller_host }}"
|
||||
storage_controller_host: "{{ controller_host }}"
|
||||
image_host: "{{ controller_host }}"
|
||||
identity_host: "{{ controller_host }}"
|
||||
network_server_host: "{{ controller_host }}"
|
||||
dashboard_host: "{{ controller_host }}"
|
||||
odl_controller: 10.1.0.15
|
||||
|
||||
DEBUG: true
|
||||
VERBOSE: true
|
||||
NTP_SERVER_LOCAL: "{{ controller_host }}"
|
||||
DB_HOST: "{{ controller_host }}"
|
||||
MQ_BROKER: rabbitmq
|
||||
|
||||
OPENSTACK_REPO: cloudarchive-juno.list
|
||||
juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
|
||||
ADMIN_TOKEN: admin
|
||||
CEILOMETER_TOKEN: c095d479023a0fd58a54
|
||||
|
||||
RABBIT_USER: $rabbit_username
|
||||
RABBIT_PASS: $rabbit_password
|
||||
KEYSTONE_DBPASS: $keystone_dbpass
|
||||
DEMO_PASS: demo_secret
|
||||
ADMIN_PASS: $admin_pass
|
||||
GLANCE_DBPASS: $glance_dbpass
|
||||
GLANCE_PASS: $glance_pass
|
||||
NOVA_DBPASS: $nova_dbpass
|
||||
NOVA_PASS: $nova_pass
|
||||
DASH_DBPASS: $dash_dbpass
|
||||
CINDER_DBPASS: $cinder_dbpass
|
||||
CINDER_PASS: $cinder_pass
|
||||
NEUTRON_DBPASS: $neutron_pass
|
||||
NEUTRON_PASS: $neutron_pass
|
||||
NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
|
||||
NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
|
||||
#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
|
||||
NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
|
||||
NEUTRON_TUNNEL_TYPES: ['vxlan']
|
||||
METADATA_SECRET: metadata_secret
|
||||
INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
|
||||
|
||||
EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
|
||||
# EXTERNAL_NETWORK_CIDR: $network_external_subnet
|
||||
EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
|
||||
FLOATING_IP_START: 203.0.113.101
|
||||
FLOATING_IP_END: 203.0.113.200
|
||||
|
||||
build_in_image: http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
|
||||
build_in_image_name: cirros-0.3.3-x86_64-disk.img
|
||||
|
||||
physical_device: /dev/sdb
|
||||
|
||||
internal_interface: "ansible_{{ INTERNAL_INTERFACE }}"
|
||||
internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
|
||||
HA_VIP: "{{ internal_ip }}"
|
||||
odl_username: admin
|
||||
odl_password: admin
|
||||
odl_api_port: 8080
|
@ -0,0 +1,3 @@
|
||||
{
|
||||
"repos": "$getVar('repo_name', '')"
|
||||
}
|
@ -0,0 +1,76 @@
|
||||
{
|
||||
"name": "$hostname",
|
||||
"hostname": "$hostname",
|
||||
"profile": "$profile",
|
||||
"gateway": "$gateway",
|
||||
#import simplejson as json
|
||||
#set nameservers = json.dumps($nameservers, encoding='utf-8')
|
||||
"name_servers": $nameservers,
|
||||
#set search_path = ' '.join($search_path)
|
||||
"name_servers_search": "$search_path",
|
||||
"proxy": "$getVar('http_proxy', '')",
|
||||
"modify_interface":
|
||||
#set networks = $networks
|
||||
#set rekeyed = {}
|
||||
#set promic_nic = ""
|
||||
#for $nic, $val in $networks.iteritems():
|
||||
#set ip_key = '-'.join(('ipaddress', $nic))
|
||||
#set netmask_key = '-'.join(('netmask', $nic))
|
||||
#set mgmt_key = '-'.join(('management', $nic))
|
||||
#set static_key = '-'.join(('static', $nic))
|
||||
#set $rekeyed[$ip_key] = $val.ip
|
||||
#set $rekeyed[$netmask_key] = $val.netmask
|
||||
#set $rekeyed[$mgmt_key] = $val.is_mgmt
|
||||
#set $rekeyed[$static_key] = True
|
||||
|
||||
#set dns_key = '-'.join(('dnsname', $nic))
|
||||
#if $val.is_mgmt
|
||||
#set $rekeyed[$dns_key] = $dns
|
||||
#else
|
||||
#if '.' in $dns
|
||||
#set $dns_name, $dns_domain = $dns.split('.', 1)
|
||||
#set $dns_nic = '%s-%s.%s' % ($dns_name, $nic, $dns_domain)
|
||||
#else
|
||||
#set $dns_nic = '%s-%s' % ($dns, $nic)
|
||||
#end if
|
||||
#set $rekeyed[$dns_key] = $dns_nic
|
||||
#end if
|
||||
|
||||
#if $val.is_promiscuous:
|
||||
#set promic_nic = $nic
|
||||
#end if
|
||||
#if $val.is_mgmt:
|
||||
#set mac_key = '-'.join(('macaddress', $nic))
|
||||
#set $rekeyed[$mac_key] = $mac
|
||||
#end if
|
||||
#end for
|
||||
#set nic_info = json.dumps($rekeyed, encoding='utf-8')
|
||||
$nic_info
|
||||
,
|
||||
"ksmeta":{
|
||||
#set partition_config = ''
|
||||
#for k, v in $partition.iteritems():
|
||||
#set path = ''
|
||||
#if v['vol_percentage']:
|
||||
#set $path = k + ' ' + str(v['vol_percentage']) + '%'
|
||||
#else:
|
||||
#set $path = k + ' ' + str(v['vol_size'])
|
||||
#end if
|
||||
#set partition_config = ';'.join((partition_config, $path))
|
||||
#end for
|
||||
#set partition_config = partition_config[1:]
|
||||
#import crypt
|
||||
#set $password = crypt.crypt($server_credentials.password, "az")
|
||||
#set no_proxy = ','.join($getVar('no_proxy', []))
|
||||
"username": "$server_credentials.username",
|
||||
"password": "$password",
|
||||
"promisc_nics": "$promic_nic",
|
||||
"partition": "$partition_config",
|
||||
"https_proxy": "$getVar('https_proxy', '')",
|
||||
"ntp_server": "$ntp_server",
|
||||
"timezone": "$timezone",
|
||||
"ignore_proxy": "$no_proxy",
|
||||
"local_repo": "$getVar('local_repo', '')",
|
||||
"disk_num": "1"
|
||||
}
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
{
|
||||
"repos": "$getVar('repo_name', '')"
|
||||
}
|
@ -0,0 +1,75 @@
|
||||
{
|
||||
"name": "$hostname",
|
||||
"hostname": "$hostname",
|
||||
"profile": "$profile",
|
||||
"gateway": "$gateway",
|
||||
#import simplejson as json
|
||||
#set nameservers = json.dumps($nameservers, encoding='utf-8')
|
||||
"name_servers": $nameservers,
|
||||
#set search_path = ' '.join($search_path)
|
||||
"name_servers_search": "$search_path",
|
||||
"proxy": "$getVar('http_proxy', '')",
|
||||
"modify_interface":
|
||||
#set networks = $networks
|
||||
#set rekeyed = {}
|
||||
#set promic_nic = ""
|
||||
#for $nic, $val in $networks.iteritems():
|
||||
#set ip_key = '-'.join(('ipaddress', $nic))
|
||||
#set netmask_key = '-'.join(('netmask', $nic))
|
||||
#set mgmt_key = '-'.join(('management', $nic))
|
||||
#set static_key = '-'.join(('static', $nic))
|
||||
#set $rekeyed[$ip_key] = $val.ip
|
||||
#set $rekeyed[$netmask_key] = $val.netmask
|
||||
#set $rekeyed[$static_key] = True
|
||||
|
||||
#set dns_key = '-'.join(('dnsname', $nic))
|
||||
#if $val.is_mgmt
|
||||
#set $rekeyed[$dns_key] = $dns
|
||||
#else
|
||||
#if '.' in $dns
|
||||
#set $dns_name, $dns_domain = $dns.split('.', 1)
|
||||
#set $dns_nic = '%s-%s.%s' % ($dns_name, $nic, $dns_domain)
|
||||
#else
|
||||
#set $dns_nic = '%s-%s' % ($dns, $nic)
|
||||
#end if
|
||||
#set $rekeyed[$dns_key] = $dns_nic
|
||||
#end if
|
||||
|
||||
#if $val.is_promiscuous:
|
||||
#set promic_nic = $nic
|
||||
#end if
|
||||
#if $val.is_mgmt:
|
||||
#set mac_key = '-'.join(('macaddress', $nic))
|
||||
#set $rekeyed[$mac_key] = $mac
|
||||
#end if
|
||||
#end for
|
||||
#set nic_info = json.dumps($rekeyed, encoding='utf-8')
|
||||
$nic_info
|
||||
,
|
||||
"ksmeta":{
|
||||
#set partition_config = ''
|
||||
#for k, v in $partition.iteritems():
|
||||
#set path = ''
|
||||
#if v['vol_percentage']:
|
||||
#set $path = k + ' ' + str(v['vol_percentage']) + '%'
|
||||
#else:
|
||||
#set $path = k + ' ' + str(v['vol_size'])
|
||||
#end if
|
||||
#set partition_config = ';'.join((partition_config, $path))
|
||||
#end for
|
||||
#set partition_config = partition_config[1:]
|
||||
#import crypt
|
||||
#set $password = crypt.crypt($server_credentials.password, "az")
|
||||
#set no_proxy = ','.join($getVar('no_proxy', []))
|
||||
"username": "$server_credentials.username",
|
||||
"password": "$password",
|
||||
"promisc_nics": "$promic_nic",
|
||||
"partition": "$partition_config",
|
||||
"https_proxy": "$getVar('https_proxy', '')",
|
||||
"ntp_server": "$ntp_server",
|
||||
"timezone": "$timezone",
|
||||
"ignore_proxy": "$no_proxy",
|
||||
"local_repo": "$getVar('local_repo', '')",
|
||||
"disk_num": "1"
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user