add regtest framework

Change-Id: Iee78d5445f1c8e2687e527d3505a191074d69776
This commit is contained in:
xiaodongwang 2014-03-27 11:44:40 -07:00
parent 21cdb772ef
commit 74f1bc08a4
23 changed files with 1168 additions and 42 deletions

584
bin/client.py Executable file
View File

@ -0,0 +1,584 @@
#!/usr/bin/python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""binary to deploy a cluster by compass client api."""
import logging
import re
import requests
import time
from compass.apiclient.restful import Client
from compass.utils import flags
from compass.utils import logsetting
flags.add('compass_server',
help='compass server url',
default='http://127.0.0.1/api')
flags.add('switch_ips',
help='comma seperated switch ips',
default='')
flags.add('switch_credential',
help='comma separated <credential key>=<credential value>',
default='version=v2c,community=public')
flags.add('switch_max_retries', type='int',
help='max retries of poll switch',
default=-1)
flags.add('machines',
help='comma separated mac addresses of machines',
default='')
flags.add('adapter_os_name',
help='adapter os name',
default=r'(?i)centos.*')
flags.add('adapter_target_system',
help='adapter target system name',
default='openstack')
flags.add('cluster_name',
help='cluster name',
default='cluster1')
flags.add('credentials',
help=(
'comma separated credentials formatted as '
'<credential_name>:<username>=<password>'
),
default=(
'server:root=root,service:service=service,'
'console:console=console'
))
flags.add('networking',
help=(
'semicomma seperated network property and its value '
'<network_property_name>=<value>'
),
default='')
flags.add('partitions',
help=(
'comma seperated partitions '
'<partition name>:<partition_type>=<partition_value>'
),
default='tmp:percentage=10,var:percentage=20,home:percentage=40')
flags.add('host_roles',
help=(
'semicomma separated host roles '
'<hostname>=<comma separated roles>',
),
default='')
flags.add('deployment_timeout',
type='int',
help='deployment timeout',
default=60 * 60)
flags.add('progress_update_check_interval',
type='int',
help='progress update status check interval',
default=30)
flags.add('dashboard_role',
help='dashboard role name',
default='os-dashboard')
flags.add('dashboard_link_pattern',
help='dashboard link pattern',
default=r'(?m)(http://\d+\.\d+\.\d+\.\d+:5000/v2\.0)')
def _get_client():
"""get apiclient object."""
return Client(flags.OPTIONS.compass_server)
def _get_machines(client):
"""get machines connected to the switch."""
status, resp = client.get_machines()
logging.info(
'get all machines status: %s, resp: %s', status, resp)
if status >= 400:
msg = 'failed to get machines'
raise Exception(msg)
machines_to_add = set([
machine for machine in flags.OPTIONS.machines.split(',')
if machine
])
logging.info('machines to add: %s', list(machines_to_add))
machines = {}
for machine in resp['machines']:
mac = machine['mac']
if mac in machines_to_add:
machines[machine['id']] = mac
logging.info('found machines: %s', machines.values())
if set(machines.values()) != machines_to_add:
msg = 'machines %s is missing' % (
list(machines_to_add - set(machines.values()))
)
raise Exception(msg)
return machines
def _poll_switches(client):
"""get all switches."""
status, resp = client.get_switches()
logging.info('get all switches status: %s resp: %s', status, resp)
if status >= 400:
msg = 'failed to get switches'
raise Exception(msg)
all_switches = {}
for switch in resp['switches']:
all_switches[switch['ip']] = switch
# add a switch.
switch_ips = [
switch_ip for switch_ip in flags.OPTIONS.switch_ips.split(',')
if switch_ip
]
switch_credential = dict([
credential.split('=', 1)
for credential in flags.OPTIONS.switch_credential.split(',')
if '=' in credential
])
for switch_ip in switch_ips:
if switch_ip not in all_switches:
status, resp = client.add_switch(switch_ip, **switch_credential)
logging.info('add switch %s status: %s resp: %s',
switch_ip, status, resp)
if status >= 400:
msg = 'failed to add switch %s' % switch_ip
raise Exception(msg)
all_switches[switch_ip] = resp['switch']
else:
logging.info('switch %s is already added', switch_ip)
for switch_ip, switch in all_switches.items():
switch_id = switch['id']
# if the switch is not in under_monitoring, wait for the
# poll switch task update the switch information and change
# the switch state.
remain_retries = flags.OPTIONS.switch_max_retries
while True:
if remain_retries != 0:
logging.info(
'waiting for the switch %s into under_monitoring',
switch_ip)
status, resp = client.get_switch(switch_id)
logging.info('get switch %s status: %s, resp: %s',
switch_ip, status, resp)
if status >= 400:
msg = 'failed to get switch %s' % switch_ip
raise Exception(msg)
switch = resp['switch']
all_switches[switch_ip] = switch
if switch['state'] == 'notsupported':
msg = 'switch %s is not supported', switch_ip
raise Exception(msg)
elif switch['state'] in ['initialized', 'repolling']:
logging.info('switch %s is not updated', switch_ip)
else:
if switch['state'] == 'under_monitoring':
logging.info('switch %s is ready', switch_ip)
try:
return _get_machines(client)
except Exception as error:
logging.exception(error)
status, resp = client.update_switch(
switch_id, switch_ip, **switch_credential)
if status >= 400:
msg = 'failed to update switch %s' % switch_ip
raise Exception(msg)
time.sleep(10)
remain_retries -= 1
else:
msg = 'max retries reached for switch %s' % switch_ip
raise Exception(msg)
def _get_adapter(client):
"""get adapter."""
status, resp = client.get_adapters()
logging.info('get all adapters status: %s, resp: %s', status, resp)
if status >= 400:
msg = 'failed to get adapters'
raise Exception(msg)
os_name_pattern = flags.OPTIONS.adapter_os_name
os_name_re = re.compile(os_name_pattern)
target_system = flags.OPTIONS.adapter_target_system
adapter_id = None
for adapter in resp['adapters']:
if (
os_name_re.match(adapter['os']) and
target_system == adapter['target_system']
):
adapter_id = adapter['id']
if not adapter_id:
msg = 'no adapter found for %s and %s' % (
os_name_pattern, target_system)
raise Exception(msg)
logging.info('adpater for deploying a cluster: %s', adapter_id)
return adapter_id
def _add_cluster(client, adapter_id, machines):
"""add a cluster."""
cluster_name = flags.OPTIONS.cluster_name
status, resp = client.add_cluster(
cluster_name=cluster_name, adapter_id=adapter_id)
logging.info('add cluster %s status: %s, resp: %s',
cluster_name, status, resp)
if status >= 400:
msg = 'failed to add cluster %s with adapter %s' % (
cluster_name, adapter_id)
raise Exception(msg)
cluster = resp['cluster']
cluster_id = cluster['id']
# add hosts to the cluster.
status, resp = client.add_hosts(
cluster_id=cluster_id,
machine_ids=machines.keys())
logging.info('add hosts to cluster %s status: %s, resp: %s',
cluster_id, status, resp)
if status >= 400:
msg = 'failed to add machines %s to cluster %s' % (
machines, cluster_name)
raise Exception(msg)
host_ids = []
for host in resp['cluster_hosts']:
host_ids.append(host['id'])
logging.info('added hosts in cluster %s: %s', cluster_id, host_ids)
if len(host_ids) != len(machines):
msg = 'machines %s to add to the cluster %s while hosts %s' % (
machines, cluster_name, host_ids)
raise Exception(msg)
return {cluster_id: host_ids}
def _set_cluster_security(client, cluster_hosts):
"""set cluster security."""
credentials = [
credential for credential in flags.OPTIONS.credentials.split(',')
if ':' in credential
]
logging.info('set cluster security: %s', credentials)
credential_mapping = {}
for credential in credentials:
credential_name, username_and_password = credential.split(':', 1)
if not credential_name:
raise Exception('there is no credential name in %s' % credential)
if not username_and_password:
raise Exception('there is no username/password in %s' % credential)
if '=' not in username_and_password:
raise Exception('there is no = in %s' % username_and_password)
username, password = username_and_password.split('=', 1)
if not username or not password:
raise Exception(
'there is no username or password in %s' % (
username_and_password))
credential_mapping['%s_username' % credential_name] = username
credential_mapping['%s_password' % credential_name] = password
for cluster_id, host_ids in cluster_hosts.items():
status, resp = client.set_security(
cluster_id, **credential_mapping)
logging.info(
'set security config to cluster %s status: %s, resp: %s',
cluster_id, status, resp)
if status >= 400:
msg = 'failed to set security %s for cluster %s' % (
credential_mapping, cluster_id)
raise Exception(msg)
def _set_cluster_networking(client, cluster_hosts):
"""set cluster networking."""
networking_map = {}
networkings = [
network for network in flags.OPTIONS.networking.split(';')
if '=' in network
]
logging.info('set cluster networking: %s', networkings)
for networking in networkings:
networking_name, networking_value = networking.split('=', 1)
if not networking_name:
raise Exception(
'there is no networking name in %s' % networking)
if networking_name.endswith('_promisc'):
networking_map[networking_name] = int(networking_value)
else:
networking_map[networking_name] = networking_value
for cluster_id, host_ids in cluster_hosts.items():
status, resp = client.set_networking(
cluster_id, **networking_map)
logging.info(
'set networking config %s to cluster %s status: %s, resp: %s',
networking_map, cluster_id, status, resp)
if status >= 400:
msg = 'failed to set networking config %s to cluster %s' % (
networking_map, cluster_id)
raise Exception(msg)
def _set_cluster_partition(client, cluster_hosts):
"""set partiton of each host in cluster."""
partitions = [
partition for partition in flags.OPTIONS.partitions.split(',')
if ':' in partition
]
logging.info('set cluster partition: %s', partitions)
partiton_mapping = {}
for partition in partitions:
partition_name, partition_pair = partition.split(':', 1)
if not partition_name:
raise Exception(
'there is no partition name in %s' % partition)
if not partition_pair:
raise Exception(
'there is no partition pair in %s' % partition)
if '=' not in partition_pair:
raise Exception(
'there is no = in %s' % partition_pair)
partition_type, partition_value = partition_pair.split('=', 1)
if partition_type == 'percentage':
partition_value = int(partition_value)
elif partition_type == 'mbytes':
partition_value = int(partition_value)
else:
raise Exception(
'unsupported partition type %s' % partition_type)
partiton_mapping[
'%s_%s' % (partition_name, partition_type)
] = partition_value
for cluster_id, host_ids in cluster_hosts.items():
status, resp = client.set_partition(
cluster_id, **partiton_mapping)
logging.info(
'set partition config %s to cluster %s status: %s, resp: %s',
partiton_mapping, cluster_id, status, resp)
if status >= 400:
msg = 'failed to set partition %s to cluster %s' % (
partiton_mapping, cluster_id)
raise Exception(msg)
def _set_host_config(client, cluster_hosts):
host_configs = []
for host in flags.OPTIONS.host_roles.split(';'):
if not host:
continue
hostname, roles = host.split('=', 1)
if hostname:
roles = [role for role in roles.split(',') if role]
host_configs.append({
'hostname': hostname,
'roles': roles
})
total_hosts = 0
for cluster_id, host_ids in cluster_hosts.items():
total_hosts += len(host_ids)
if total_hosts != len(host_configs):
msg = '%s host to assign but got %s host configs' % (
total_hosts, len(host_configs))
raise Exception(msg)
for cluster_id, host_ids in cluster_hosts.items():
for hostid in host_ids:
host_config = host_configs.pop(0)
status, resp = client.update_host_config(
hostid, **host_config)
logging.info(
'set host %s config %s status: %s, resp: %s',
hostid, host_config, status, resp
)
if status >= 400:
msg = 'failed to set host %s config %s' % (
hostid, host_config)
raise Exception(msg)
def _deploy_clusters(client, cluster_hosts):
"""deploy cluster."""
for cluster_id, host_ids in cluster_hosts.items():
status, resp = client.deploy_hosts(cluster_id)
logging.info(
'deploy cluster %s status: %s, resp: %s',
cluster_id, status, resp)
if status >= 400:
msg = 'failed to deploy cluster %s' % cluster_id
raise Exception(msg)
def _get_installing_progress(client, cluster_hosts):
"""get intalling progress."""
timeout = time.time() + flags.OPTIONS.deployment_timeout
clusters_progress = {}
hosts_progress = {}
install_finished = False
failed_hosts = {}
failed_clusters = {}
while time.time() < timeout:
found_installing_clusters = False
found_installing_hosts = False
for cluster_id, host_ids in cluster_hosts.items():
for hostid in host_ids:
if hostid in hosts_progress:
continue
status, resp = client.get_host_installing_progress(hostid)
logging.info(
'get host %s installing progress status: %s, resp: %s',
hostid, status, resp)
if status >= 400:
msg = 'failed to get host %s progress' % hostid
raise Exception(msg)
progress = resp['progress']
if (
progress['state'] not in ['UNINITIALIZED', 'INSTALLING'] or
progress['percentage'] >= 1.0
):
hosts_progress[hostid] = progress
if progress['state'] in ['ERROR']:
failed_hosts[hostid] = progress
else:
found_installing_hosts = True
if cluster_id in clusters_progress:
continue
status, resp = client.get_cluster_installing_progress(cluster_id)
logging.info(
'get cluster %s installing progress status: %s, resp: %s',
cluster_id, status, resp)
if status >= 400:
msg = 'failed to get cluster %s intsalling progress' % (
cluster_id)
raise Exception(msg)
progress = resp['progress']
if (
progress['state'] not in ['UNINITIALIZED', 'INSTALLING'] or
progress['percentage'] >= 1.0
):
clusters_progress[cluster_id] = progress
if progress['state'] in ['ERROR']:
failed_clusters[cluster_id] = progress
else:
found_installing_clusters = True
if found_installing_clusters and found_installing_hosts:
logging.info(
'there are some clusters/hosts in installing.'
'sleep %s seconds and retry',
flags.OPTIONS.progress_update_check_interval)
time.sleep(flags.OPTIONS.progress_update_check_interval)
else:
install_finished = True
logging.info('all clusters/hosts are installed.')
break
if not install_finished:
msg = 'installing %s is not all finished: hosts %s clusters %s' % (
cluster_hosts, hosts_progress, clusters_progress)
raise Exception(msg)
if failed_hosts:
msg = 'installing hosts failed: %s' % failed_hosts
raise Exception(msg)
if failed_clusters:
msg = 'installing clusters failed: %s' % failed_clusters
raise Exception(msg)
def _check_dashboard_links(client, cluster_hosts):
dashboard_role = flags.OPTIONS.dashboard_role
dashboard_link_pattern = re.compile(
flags.OPTIONS.dashboard_link_pattern)
for cluster_id, host_ids in cluster_hosts.items():
status, resp = client.get_dashboard_links(cluster_id)
logging.info(
'get cluster %s dashboard links status: %s, resp: %s',
cluster_id, status, resp)
if status >= 400:
msg = 'failed to get cluster %s dashboard links' % cluster_id
raise Exception(msg)
dashboardlinks = resp['dashboardlinks']
if dashboard_role not in dashboardlinks:
msg = 'no dashboard role %s found in %s' % (
dashboard_role, dashboardlinks)
raise Exception(msg)
r = requests.get(dashboardlinks[dashboard_role], verify=False)
r.raise_for_status()
match = dashboard_link_pattern.search(r.text)
if match:
logging.info(
'dashboard login page for cluster %s can be downloaded',
cluster_id)
else:
msg = (
'%s dashboard login page failed to be downloaded\n'
'the context is:\n%s\n'
) % (dashboard_role, r.text)
raise Exception(msg)
def main():
flags.init()
logsetting.init()
client = _get_client()
machines = _poll_switches(client)
adapter_id = _get_adapter(client)
cluster_hosts = _add_cluster(client, adapter_id, machines)
_set_cluster_security(client, cluster_hosts)
_set_cluster_networking(client, cluster_hosts)
_set_cluster_partition(client, cluster_hosts)
_set_host_config(client, cluster_hosts)
_deploy_clusters(client, cluster_hosts)
_get_installing_progress(client, cluster_hosts)
_check_dashboard_links(client, cluster_hosts)
if __name__ == "__main__":
main()

View File

@ -32,7 +32,11 @@ def clean_deployment(cluster_hosts):
.. note::
The function should be called out of database session.
"""
with util.lock('serialized_action'):
with util.lock('serialized_action') as lock:
if not lock:
raise Exception(
'failed to acquire lock to clean deployment')
logging.debug('clean cluster_hosts: %s', cluster_hosts)
with database.session():
cluster_hosts, os_versions, target_systems = (

View File

@ -32,8 +32,12 @@ def clean_installing_progress(cluster_hosts):
.. note::
The function should be called out of database session.
"""
with util.lock('serialized_action'):
logging.debug(
with util.lock('serialized_action') as lock:
if not lock:
raise Exception(
'failed to acquire lock to clean installation progress')
logging.info(
'clean installing progress of cluster_hosts: %s',
cluster_hosts)
with database.session():

View File

@ -32,7 +32,10 @@ def deploy(cluster_hosts):
.. note::
The function should be called out of database session.
"""
with util.lock('serialized_action'):
with util.lock('serialized_action') as lock:
if not lock:
raise Exception('failed to acquire lock to deploy')
logging.debug('deploy cluster_hosts: %s', cluster_hosts)
with database.session():
cluster_hosts, os_versions, target_systems = (

View File

@ -32,7 +32,11 @@ def reinstall(cluster_hosts):
.. note::
The function should be called out of database session.
"""
with util.lock('serialized_action'):
with util.lock('serialized_action') as lock:
if not lock:
raise Exception(
'failed to acquire lock to reinstall')
logging.debug('reinstall cluster_hosts: %s', cluster_hosts)
with database.session():
cluster_hosts, os_versions, target_systems = (

View File

@ -72,8 +72,13 @@ def update_progress(cluster_hosts):
After the progress got updated, these information will be stored back
to the log_progressing_history for next time run.
"""
with util.lock('log_progressing', blocking=False):
logging.debug('update installing progress of cluster_hosts: %s',
with util.lock('log_progressing', blocking=False) as lock:
if not lock:
logging.error(
'failed to acquire lock to calculate installation progress')
return
logging.info('update installing progress of cluster_hosts: %s',
cluster_hosts)
os_versions = {}
target_systems = {}

View File

@ -37,11 +37,13 @@ def lock(lock_name, blocking=True, timeout=10):
yield instance_lock
else:
logging.info('lock %s is already hold', lock_name)
yield None
except Exception as error:
logging.info(
'redis fails to acquire the lock %s', lock_name)
logging.exception(error)
yield None
finally:
instance_lock.acquired_until = 0

View File

@ -396,11 +396,11 @@ class Client(object):
def parse_networking(cls, kwargs):
"""parse arguments to network data."""
data = {}
possible_keys = [
global_keys = [
'nameservers', 'search_path', 'gateway',
'proxy', 'ntp_server', 'ha_vip']
for key, value in kwargs.items():
if key in possible_keys:
if key in global_keys:
data.setdefault('global', {})[key] = value
else:
if '_' not in key:
@ -430,6 +430,8 @@ class Client(object):
:type proxy: str.
:param ntp_server: ntp server ip address to sync timestamp.
:type ntp_server: str.
:param ha_vip: ha vip address to run ha proxy.
:type ha_vip: str.
:param <interface>_ip_start: start ip address to host's interface.
:type <interface>_ip_start: str.
:param <interface>_ip_end: end ip address to host's interface.

View File

@ -78,7 +78,14 @@ TO_HOST_TRANSLATORS = {
), KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern='/node_mapping/%(node_name)s'
to_pattern='/node_mapping/%(node_name)s/roles'
)],
from_keys={'node_name': '/node_name'}
)],
'/networking/interfaces/management/ip': [KeyTranslator(
translated_keys=[functools.partial(
config_translator_callbacks.get_key_from_pattern,
to_pattern='/node_mapping/%(node_name)s/management_ip'
)],
from_keys={'node_name': '/node_name'}
)],

View File

@ -44,7 +44,7 @@ CLUSTER_HOST_MERGER = ConfigMerger(
'default': 'default'},
to_key='/roles',
value=config_merger_callbacks.assign_roles_by_host_numbers,
override=config_merger_callbacks.override_if_empty
override=True
),
ConfigMapping(
path_list=['/config_mapping']
@ -132,7 +132,8 @@ CLUSTER_HOST_MERGER = ConfigMerger(
to_key='/haproxy/priority',
value=functools.partial(
config_merger_callbacks.assign_by_order,
orders=config_merger_callbacks.generate_order(0, -1)
orders=config_merger_callbacks.generate_order(0, -1),
reverse=True
),
from_upper_keys={'prefix': '/haproxy/default_priority'},
from_lower_keys={'conditions': '/haproxy_roles'},

View File

@ -154,6 +154,9 @@ def _update_assigned_roles(lower_refs, to_key, bundle_mapping,
bundled_role = bundle_mapping[role]
bundled_roles.add(bundled_role)
roles |= set(role_bundles[bundled_role])
else:
roles.add(role)
for bundled_role in bundled_roles:
bundled_maxs[bundled_role] = _dec_max_min(
bundled_maxs[bundled_role])
@ -303,15 +306,21 @@ def assign_roles(_upper_ref, _from_key, lower_refs, to_key,
lower_roles, unassigned_hosts = _update_assigned_roles(
lower_refs, to_key, bundle_mapping, role_bundles,
bundled_maxs, bundled_mins)
_update_exclusive_roles(bundled_exclusives, lower_roles, unassigned_hosts,
if not unassigned_hosts:
logging.debug(
'there is not unassigned hosts, assigned roles by host is: %s',
lower_roles)
else:
_update_exclusive_roles(
bundled_exclusives, lower_roles, unassigned_hosts,
bundled_maxs, bundled_mins, role_bundles)
_assign_roles_by_mins(
role_bundles, lower_roles, unassigned_hosts,
bundled_maxs, bundled_mins)
_assign_roles_by_maxs(
role_bundles, lower_roles, unassigned_hosts,
bundled_maxs)
_sort_roles(lower_roles, roles)
return lower_roles
@ -427,12 +436,16 @@ def generate_order(start=0, end=-1):
def assign_by_order(_upper_ref, _from_key, lower_refs, _to_key,
prefix='',
orders=[], default_order=0,
orders=[], default_order=0, reverse=False,
conditions={}, **kwargs):
"""assign to_key by order."""
host_values = {}
orders = iter(orders)
for lower_key, _ in lower_refs.items():
lower_keys = lower_refs.keys()
if reverse:
lower_keys = reversed(lower_keys)
for lower_key in lower_keys:
if lower_key in conditions and conditions[lower_key]:
try:
order = orders.next()

View File

@ -6,7 +6,7 @@ ADAPTERS = [
ROLES = [
{'name': 'os-single-controller', 'target_system': 'openstack'},
{'name': 'os-network', 'target_system': 'openstack'},
{'name': 'os-compute', 'target_system': 'openstack'},
{'name': 'os-compute-worker', 'target_system': 'openstack'},
]
SWITCHES = [
{'ip': '1.2.3.4', 'vendor_info': 'huawei', 'credential_data': json.dumps({'version': 'v2c', 'community': 'public'})},
@ -85,14 +85,14 @@ HOSTS_BY_CLUSTER = {
'mac': '00:00:01:02:03:04',
'mutable': False,
'config_data': json.dumps({
'netwoon.dumps(king': {
'networking': {
'interfaces': {
'management': {
'ip': '192.168.20.100',
},
},
},
'roles': ["os-single-controller", "os-network", "os-compute"],
'roles': ["os-single-controller", "os-network", "os-compute-worker"],
}),
},
],
@ -210,7 +210,7 @@ cobbler_EXPECTED = {
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-single-controller]","role[os-network]","role[os-compute]"',
'run_list': '"role[os-single-controller]","role[os-compute-worker]","role[os-network]"',
'ignore_proxy': '127.0.0.1,localhost,host1.1,192.168.20.100',
'ntp_server': '192.168.20.254',
'chef_client_name': 'openstack.host1.1',

View File

@ -6,7 +6,7 @@ ADAPTERS = [
ROLES = [
{'name': 'os-single-controller', 'target_system': 'openstack'},
{'name': 'os-network', 'target_system': 'openstack'},
{'name': 'os-compute', 'target_system': 'openstack'},
{'name': 'os-compute-worker', 'target_system': 'openstack'},
]
SWITCHES = [
{'ip': '1.2.3.4', 'vendor_info': 'huawei', 'credential_data': json.dumps({'version': 'v2c', 'community': 'public'})},
@ -93,7 +93,7 @@ HOSTS_BY_CLUSTER = {
},
},
},
'roles': ["os-single-controller", "os-network"],
'roles': ["os-single-controller"],
}),
},{
'hostname': 'host2',
@ -107,7 +107,7 @@ HOSTS_BY_CLUSTER = {
},
},
},
'roles': ["os-compute"],
'roles': ["os-network", "os-compute-worker"],
}),
},
@ -226,7 +226,7 @@ cobbler_EXPECTED = {
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-single-controller]","role[os-network]"',
'run_list': '"role[os-single-controller]"',
'ignore_proxy': '127.0.0.1,localhost,host1.1,192.168.20.100,host2.1,192.168.20.101',
'ntp_server': '192.168.20.254',
'chef_client_name': 'openstack.host1.1',
@ -260,7 +260,7 @@ cobbler_EXPECTED = {
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-compute]"',
'run_list': '"role[os-compute-worker]","role[os-network]"',
'ignore_proxy': '127.0.0.1,localhost,host1.1,192.168.20.100,host2.1,192.168.20.101',
'ntp_server': '192.168.20.254',
'chef_client_name': 'openstack.host2.1',

View File

@ -6,7 +6,7 @@ ADAPTERS = [
ROLES = [
{'name': 'os-single-controller', 'target_system': 'openstack'},
{'name': 'os-network', 'target_system': 'openstack'},
{'name': 'os-compute', 'target_system': 'openstack'},
{'name': 'os-compute-worker', 'target_system': 'openstack'},
]
SWITCHES = [
{'ip': '1.2.3.4', 'vendor_info': 'huawei', 'credential_data': json.dumps({'version': 'v2c', 'community': 'public'})},
@ -169,7 +169,7 @@ HOSTS_BY_CLUSTER = {
},
},
},
'roles': ["os-network", "os-compute"],
'roles': ["os-network", "os-compute-worker"],
}),
},
],
@ -200,7 +200,7 @@ HOSTS_BY_CLUSTER = {
},
},
},
'roles': ["os-network", "os-compute"],
'roles': ["os-network", "os-compute-worker"],
}),
},
],
@ -352,7 +352,7 @@ cobbler_EXPECTED = {
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-network]","role[os-compute]"',
'run_list': '"role[os-compute-worker]","role[os-network]"',
'ignore_proxy': '127.0.0.1,localhost,host1.1,192.168.20.100,host2.1,192.168.20.101',
'ntp_server': '192.168.20.254',
'chef_client_name': 'openstack.host2.1',
@ -420,7 +420,7 @@ cobbler_EXPECTED = {
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-network]","role[os-compute]"',
'run_list': '"role[os-compute-worker]","role[os-network]"',
'ignore_proxy': '127.0.0.1,localhost,host1.2,192.168.20.110,host2.2,192.168.20.111',
'ntp_server': '192.168.20.254',
'chef_client_name': 'openstack.host2.2',

View File

@ -0,0 +1,258 @@
import simplejson as json
ADAPTERS = [
{'name': 'CentOS_openstack', 'os': 'CentOS', 'target_system': 'openstack'},
]
ROLES = [
{'name': 'os-single-controller', 'target_system': 'openstack'},
{'name': 'os-network', 'target_system': 'openstack'},
{'name': 'os-compute-worker', 'target_system': 'openstack'},
]
SWITCHES = [
{'ip': '1.2.3.4', 'vendor_info': 'huawei', 'credential_data': json.dumps({'version': 'v2c', 'community': 'public'})},
]
MACHINES_BY_SWITCH = {
'1.2.3.4': [
{'mac': '00:00:01:02:03:04', 'port': 1, 'vlan': 1},
],
}
CLUSTERS = [
{
'name': 'cluster1',
'adapter': 'CentOS_openstack',
'mutable': False,
'security_config': json.dumps({
'server_credentials': {
'username': 'root', 'password': 'huawei'
},
'service_credentials': {
'username': 'service', 'password': 'huawei'
},
'console_credentials': {
'username': 'admin', 'password': 'huawei'
}
}),
'networking_config': json.dumps({
'interfaces': {
'management': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.255.0',
'ip_end': '192.168.20.200',
'gateway': '',
'ip_start': '192.168.20.100'
},
'storage': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.200',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.100'
},
'public': {
'nic': 'eth2',
'promisc': 1,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.255',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.100'
},
'tenant': {
'nic': 'eth0',
'promisc': 0,
'netmask': '255.255.254.0',
'ip_end': '10.145.88.120',
'gateway': '10.145.88.1',
'ip_start': '10.145.88.100'
}
},
'global': {
'nameservers': '192.168.20.254',
'proxy': 'http://192.168.20.254:3128',
'ntp_server': '192.168.20.254',
'search_path': 'ods.com',
'gateway': '10.145.88.1'
},
}),
'partition_config': json.dumps('/home 20%%;/tmp 10%%;/var 30%%;'),
},
]
HOSTS_BY_CLUSTER = {
'cluster1': [
{
'hostname': 'host1',
'mac': '00:00:01:02:03:04',
'mutable': False,
'config_data': json.dumps({
'netwoon.dumps(king': {
'interfaces': {
'management': {
'ip': '192.168.20.100',
},
},
},
'roles': ["os-dashboard"],
}),
},
],
}
cobbler_MOCK = {
'host_configs': []
}
chef_MOCK = {
'configs': {
'env_default': {
'all_roles': {
'os-single-controller': 'openstack controller node',
'os-network': 'openstack network node',
'os-compute-worker': 'openstack nova node'
},
'config_mapping': {
'/credential/identity/users/admin': '/security/console_credentials',
'/credential/identity/users/compute': '/security/service_credentials',
'/credential/identity/users/image': '/security/service_credentials',
'/credential/identity/users/metering': '/security/service_credentials',
'/credential/identity/users/network': '/security/service_credentials',
'/credential/identity/users/object-store': '/security/service_credentials',
'/credential/identity/users/volume': '/security/service_credentials',
'/credential/mysql/compute': '/security/service_credentials',
'/credential/mysql/dashboard': '/security/service_credentials',
'/credential/mysql/identity': '/security/service_credentials',
'/credential/mysql/image': '/security/service_credentials',
'/credential/mysql/metering': '/security/service_credentials',
'/credential/mysql/network': '/security/service_credentials',
'/credential/mysql/volume': '/security/service_credentials',
'/credential/mysql/super/password': '/security/service_credentials/password',
'/networking/control/interface': '/networking/interfaces/management/nic',
'/ntp/ntpserver': '/networking/global/ntp_server',
'/networking/storage/interface': '/networking/interfaces/storage/nic',
'/networking/public/interface': '/networking/interfaces/public/nic',
'/networking/tenant/interface': '/networking/interfaces/tenant/nic',
'/networking/plugins/ovs/gre/local_ip_interface': '/networking/interfaces/tenant/nic',
},
'role_mapping': {
'os-single-controller': {
'/db/mysql/bind_address': '/networking/interfaces/management/ip',
'/mq/rabbitmq/bind_address': '/networking/interfaces/management/ip',
'/endpoints/compute/metadata/host': '/networking/interfaces/management/ip',
'/endpoints/compute/novnc/host': '/networking/interfaces/management/ip',
'/endpoints/compute/service/host': '/networking/interfaces/management/ip',
'/endpoints/compute/xvpvnc/host': '/networking/interfaces/management/ip',
'/endpoints/ec2/admin/host': '/networking/interfaces/management/ip',
'/endpoints/ec2/service/host': '/networking/interfaces/management/ip',
'/endpoints/identity/admin/host': '/networking/interfaces/management/ip',
'/endpoints/identity/service/host': '/networking/interfaces/management/ip',
'/endpoints/image/registry/host': '/networking/interfaces/management/ip',
'/endpoints/image/service/host': '/networking/interfaces/management/ip',
'/endpoints/metering/service/host': '/networking/interfaces/management/ip',
'/endpoints/network/service/host': '/networking/interfaces/management/ip',
'/endpoints/volume/service/host': '/networking/interfaces/management/ip'
},
'os-network': {
},
'os-compute-worker': {
}
},
'dashboard_roles': ['os-single-controller', 'os-dashboard'],
'role_assign_policy': {
'default':{
'bundles': [],
'exclusives': ['os-single-controller', 'os-network'],
'roles': ['os-single-controller', 'os-compute-worker', 'os-network'],
'default_min': 1,
'default_max': 1,
'maxs': {'os-compute-worker':-1}
},
'policy_by_host_numbers':{
'1': {
'bundles': [['os-single-controller','os-compute-worker','os-network']],
'exclusives':[]
},
'2': {
'bundles': [['os-compute-worker','os-network']],
'exclusives':['os-single-controller']
},
},
},
},
},
}
cobbler_EXPECTED = {
'expected_host_configs': [{
'profile': 'CentOS',
'name_servers_search': '1.ods.com ods.com',
'name': 'host1.1',
'hostname': 'host1',
'modify_interface': {
'dnsname-eth2': 'floating-host1.1.ods.com',
'dnsname-eth0': u'host1.1.ods.com',
'ipaddress-eth2': '10.145.88.100',
'static-eth2': True,
'static-eth0': True,
'netmask-eth0': '255.255.255.0',
'ipaddress-eth0': u'192.168.20.100',
'netmask-eth2': '255.255.254.0',
'macaddress-eth0': '00:00:01:02:03:04',
'management-eth2': False,
'management-eth0': True
},
'name_servers': '192.168.20.254',
'gateway': '10.145.88.1',
'ksmeta': {
'username': u'root',
'promisc_nics': 'eth2',
'chef_url': 'https://localhost/',
'tool': 'chef',
'partition': '/home 20%%;/tmp 10%%;/var 30%%;',
'proxy': 'http://192.168.20.254:3128',
'run_list': '"role[os-dashboard]"',
'ignore_proxy': '127.0.0.1,localhost,host1.1,192.168.20.100',
'ntp_server': '192.168.20.254',
'chef_client_name': 'openstack.host1.1',
'cluster_databag': 'openstack_1',
'chef_node_name': u'openstack.host1.1'
},
}],
}
chef_EXPECTED = {
'expected_configs': {
'openstack_1': {
'credential': {
'identity': {
'users': {
'compute': {'username': 'service', 'password': 'huawei'},
'network': {'username': 'service', 'password': 'huawei'},
'admin': {'username': 'admin', 'password': 'huawei'},
'image': {'username': 'service', 'password': 'huawei'},
'metering': {'username': 'service', 'password': 'huawei'},
'volume': {'username': 'service', 'password': 'huawei'},
'object-store': {'username': 'service', 'password': 'huawei'}
}
},
'mysql': {
'compute': {'username': 'service', 'password': 'huawei'},
'network': {'username': 'service', 'password': 'huawei'},
'image': {'username': 'service', 'password': 'huawei'},
'metering': {'username': 'service', 'password': 'huawei'},
'volume': {'username': 'service', 'password': 'huawei'},
'dashboard': {'username': 'service', 'password': 'huawei'},
'super': {'password': 'huawei'},
'identity': {'username': 'service', 'password': 'huawei'}
}
},
'networking': {
'control': {'interface': 'eth0'},
'storage': {'interface': 'eth0'},
'public': {'interface': 'eth2'},
'tenant': {'interface': 'eth0'}
},
'ntp': {'ntpserver': '192.168.20.254'},
},
},
}

View File

@ -407,6 +407,10 @@ class TestEndToEnd(unittest2.TestCase):
"""test multi clusters multi hosts."""
self._test('test3')
def test_4(self):
"""test deploy unexist roles."""
self._test('test4')
if __name__ == '__main__':
flags.init()

View File

@ -96,7 +96,7 @@ class TestAssignRoles(unittest2.TestCase):
assigned = config_merger_callbacks.assign_roles(
None, None, lower_refs, 'roles', roles=self.roles_,
maxs=self.maxs_, default_min=self.default_min_)
self.assertEqual(assigned, {1: ['control', 'api', 'compute']})
self.assertEqual(assigned, {1: ['control', 'api', 'compute', 'mysql']})
def test_assign_roles_allinone_roles_set_less_roles(self):
lower_configs = {
@ -105,10 +105,10 @@ class TestAssignRoles(unittest2.TestCase):
lower_refs = {}
for hostid, config in lower_configs.items():
lower_refs[hostid] = config_reference.ConfigReference(config)
self.assertRaises(
ValueError, config_merger_callbacks.assign_roles,
assigned = config_merger_callbacks.assign_roles(
None, None, lower_refs, 'roles', roles=self.roles_,
maxs=self.maxs_, default_min=self.default_min_)
self.assertEqual(assigned, {1: ['control', 'api']})
def test_assign_roles_allinone_exclusives(self):
exclusives = ['control']

View File

@ -257,7 +257,14 @@ else
echo "/mnt/${IMAGE_NAME}-${IMAGE_ARCH} is imported"
fi
else
echo "distro $IMAGE_NAME has already existed"
echo "distro ${IMAGE_NAME}-${IMAGE_ARCH} has already existed"
sudo cobbler distro edit --name=${IMAGE_NAME}-${IMAGE_ARCH} --arch=${IMAGE_ARCH} --breed=redhat
if [[ "$?" != "0" ]]; then
echo "failed to edit distro ${IMAGE_NAME}-${IMAGE_ARCH}"
exit 1
else
echo "distro ${IMAGE_NAME}-${IMAGE_ARCH} is updated"
fi
fi
# add profile
@ -278,7 +285,7 @@ else
fi
else
echo "profile $IMAGE_NAME has already existed."
sudo cobbler profile edit --name="${IMAGE_NAME}-${IMAGE_ARCH}" --repo=ppa_repo
sudo cobbler profile edit --name="${IMAGE_NAME}-${IMAGE_ARCH}" --repo=ppa_repo --distro="${IMAGE_NAME}-${IMAGE_ARCH}" --ksmeta="tree=http://$ipaddr/cobbler/ks_mirror/${IMAGE_NAME}-${IMAGE_ARCH}" --kickstart=/var/lib/cobbler/kickstarts/default.ks
if [[ "$?" != "0" ]]; then
echo "failed to edit profile ${IMAGE_NAME}-${IMAGE_ARCH}"
exit 1

14
regtest/prepare.sh Normal file
View File

@ -0,0 +1,14 @@
#!/bin/bash -x
# create a bridge named 'installation' so that compass and pxeboot vm are in the
# same l2 network.
brctl show |grep installation > /dev/null
if [[ $? -eq 0 ]] ; then
echo "bridge already exists"
else
brctl addbr installation
brctl addif installation eth1
fi
ifconfig installation 172.16.0.1 broadcast 172.16.0.0 netmask 255.255.0.0 up
ifconfig eth1 up

54
regtest/regtest.conf Normal file
View File

@ -0,0 +1,54 @@
# Set test script variables
export VIRT_NUM=${VIRT_NUM:-'1'}
export VIRT_CPUS=${VIRT_CPUS:-'10'}
export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'20G'}
export SWITCH_IPS=${SWITCH_IPS:-'10.145.81.219'}
export SWITCH_VERSION=${SWITCH_VERSION:-'2c'}
export SWITCH_COMMUNITY=${SWITCH_COMMUNITY:-'public'}
export SWITCH_CREDENTIAL=${SWITCH_CREDENTIAL:-"version=${SWITCH_VERSION},community=${SWITCH_COMMUNITY}"}
export HOST_ROLES=${HOST_ROLES:-''}
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-'172.16.1.1'}
export MANAGEMENT_IP_END=${MANAGEMENT_IP_END:-'172.16.1.254'}
export MANAGEMENT_NETMASK=${MANAGEMENT_NETMASK:-'255.255.0.0'}
export MANAGEMENT_NIC=${MANAGEMENT_NIC:-'eth0'}
export MANAGEMENT_PROMISC=${MANAGEMENT_PROMISC:-'0'}
export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.1'}
export TENANT_IP_END=${TENANT_IP_END:-'172.16.2.254'}
export TENANT_NETMASK=${TENANT_NETMASK:-'255.255.255.0'}
export TENANT_NIC=${TENANT_NIC:-'eth1'}
export TENANT_PROMISC=${TENANT_PROMISC:-'0'}
export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.1'}
export PUBLIC_IP_END=${PUBLIC_IP_END:-'172.16.3.254'}
export PUBLIC_NETMASK=${PUBLIC_NETMASK:-'255.255.255.0'}
export PUBLIC_NIC=${PUBLIC_NIC:-'eth2'}
export PUBLIC_PROMISC=${PUBLIC_PROMISC:-'1'}
export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.1'}
export STORAGE_IP_END=${STORAGE_IP_END:-'172.16.4.254'}
export STORAGE_NETMASK=${STORAGE_NETMASK:-'255.255.255.0'}
export STORAGE_NIC=${STORAGE_NIC:-'eth3'}
export STORAGE_PROMISC=${STORAGE_PROMISC:-'0'}
export NAMESERVERS=${NAMESERVERS:-'172.16.0.1'}
export NTP_SERVER=${NTP_SERVER:-'172.16.0.1'}
export GATEWAY=${GATEWAY:-'172.16.0.1'}
export PROXY=${PROXY:-"http://172.16.0.1:3128"}
export SEARCH_PATH=${SEARCH_PATH:-'ods.com'}
export HA_VIP=${HA_VIP:-''}
export NETWORKING=${NETWORKING:-"nameservers=$NAMESERVERS;search_path=$SEARCH_PATH;gateway=$GATEWAY;proxy=$PROXY;ntp_server=$NTP_SERVER;ha_vip=$HA_VIP;management_ip_start=$MANAGEMENT_IP_START;management_ip_end=$MANAGEMENT_IP_END;management_netmask=$MANAGEMENT_NETMASK;management_gateway=;management_nic=$MANAGEMENT_NIC;management_promisc=$MANAGEMENT_PROMISC;tenant_ip_start=$TENANT_IP_START;tenant_ip_end=$TENANT_IP_END;tenant_netmask=$TENANT_NETMASK;tenant_gateway=;tenant_nic=$TENANT_NIC;tenant_promisc=$TENANT_PROMISC;public_ip_start=$PUBLIC_IP_START;public_ip_end=$PUBLIC_IP_END;public_netmask=$PUBLIC_NETMASK;public_gateway=;public_nic=$PUBLIC_NIC;public_promisc=$PUBLIC_PROMISC;storage_ip_start=$STORAGE_IP_START;storage_ip_end=$STORAGE_IP_END;storage_netmask=$STORAGE_NETMASK;storage_gateway=;storage_nic=$STORAGE_NIC;storage_promisc=$STORAGE_PROMISC"}
export HOME_PERCENTAGE=${HOME_PERCENTAGE:-'40'}
export TMP_PERCENTAGE=${TMP_PERCENTAGE:-'10'}
export VAR_PERCENTAGE=${VAR_PERCENTAGE:-'15'}
export PARTITION=${PARTITION:-"home:percentage=${HOME_PERCENTAGE},tmp:percentage=${TMP_PERCENTAGE},var:percentage=${VAR_PERCENTAGE}"}
export SERVER_USERNAME=${SERVER_USERNAME:-root}
export SERVER_PASSWORD=${SERVER_PASSWORD:-root}
export SERVICE_USERNAME=${SERVICE_USERNAME:-service}
export SERVICE_PASSWORD=${SERVICE_PASSWORD:-service}
export CONSOLE_USERNAME=${CONSOLE_USERNAME:-console}
export CONSOLE_PASSWORD=${CONSOLE_PASSWORD:-console}
export SECURITY=${SECURITY:-"server:${SERVER_USERNAME}=${SERVER_PASSWORD},service:${SERVICE_USERNAME}=${SERVICE_PASSWORD},console:${CONSOLE_USERNAME}=${CONSOLE_PASSWORD}"}
export DASHBOARD_ROLE=${DASHBOARD_ROLE:-"os-controller"}

141
regtest/regtest.sh Executable file
View File

@ -0,0 +1,141 @@
#!/bin/bash -x
function mac_address_part() {
hex_number=$(printf '%02x' $RANDOM)
number_length=${#hex_number}
number_start=$(expr $number_length - 2)
echo ${hex_number:$number_start:2}
}
function mac_address() {
echo "00:00:$(mac_address_part):$(mac_address_part):$(mac_address_part):$(mac_address_part)"
}
REGTEST_CONF=${REGTEST_CONF:-"regtest.conf"}
REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source ${REGTEST_DIR}/${REGTEST_CONF}
declare -A roles_list
machines=''
for roles in ${HOST_ROLES//;/ }; do
roles_list[${#roles_list[@]}]=${roles}
done
echo "role list: ${roles_list[@]}"
roles_offset=0
host_roles_list=''
virtmachines=$(virsh list --name)
for virtmachine in $virtmachines; do
echo "destroy $virtmachine"
virsh destroy $virtmachine
if [[ "$?" != "0" ]]; then
echo "destroy instance $virtmachine failed"
exit 1
fi
done
virtmachines=$(virsh list --all --name)
for virtmachine in $virtmachines; do
echo "undefine $virtmachine"
virsh undefine $virtmachine
if [[ "$?" != "0" ]]; then
echo "undefine instance $virtmachine failed"
exit 1
fi
done
echo "setup $VIRT_NUM virt machines"
for i in `seq $VIRT_NUM`; do
if [[ ! -e /tmp/pxe${i}.raw ]]; then
echo "create image for instance pxe$i"
qemu-img create -f raw /tmp/pxe${i}.raw ${VIRT_DISK}
if [[ "$?" != "0" ]]; then
echo "create image /tmp/pxe${i}.raw failed"
exit 1
fi
else
echo "recreate image for instance pxe$i"
rm -rf /tmp/pxe${i}.raw
qemu-img create -f raw /tmp/pxe${i}.raw ${VIRT_DISK}
if [[ "$?" != "0" ]]; then
echo "create image /tmp/pxe${i}.raw failed"
exit 1
fi
fi
mac=$(mac_address)
echo "virt-install instance pxe$i on mac ${mac}"
virt-install --accelerate --hvm --connect qemu:///system \
--network=bridge:installation,mac=${mac} --pxe \
--network=bridge:installation \
--network=bridge:installation \
--network=bridge:installation \
--name pxe${i} --ram=${VIRT_MEM} \
--disk /tmp/pxe${i}.raw,format=raw \
--vcpus=${VIRT_CPU} \
--graphics vnc,listen=0.0.0.0 \
--noautoconsole \
--autostart \
--os-type=linux --os-variant=rhel6
if [[ "$?" != "0" ]]; then
echo "install instance pxe${i} failed"
exit 1
fi
echo "make pxe${i} reboot if installation failing."
sed -i "/<boot dev='hd'\/>/ a\ <bios useserial='yes' rebootTimeout='0'\/>" /etc/libvirt/qemu/pxe${i}.xml
echo "check pxe${i} state"
state=$(virsh domstate pxe${i})
if [[ "$state" == "running" ]]; then
echo "pxe${i} is already running"
virsh destroy pxe${i}
if [[ "$?" != "0" ]]; then
echo "detroy intsance pxe${i} failed"
exit 1
fi
fi
echo "start pxe${i}"
virsh start pxe${i}
if [[ "$?" != "0" ]]; then
echo "start instance pxe${i} failed"
exit 1
fi
if [ -z "$machines" ]; then
machines="${mac}"
else
machines="${machines,${mac}"
fi
if [ $roles_offset -lt ${#roles_list[@]} ]; then
host_roles="host${i}=${roles_list[$roles_offset]}"
roles_offset=$(expr $roles_offset + 1)
else
host_roles="host${i}="
fi
if [ -z "$host_roles_list" ]l then
host_roles_list="$host_roles"
else
host_roles_list="${host_roles_list};$host_roles"
fi
done
echo "machines: $machines"
echo "host roles: $host_roles_list"
virsh list
ln -sf /var/log/cobbler/anamon cobbler_logs
ln -sf /var/log/compass compass_logs
CLIENT_SCRIPT=/opt/compass/bin/client.py
/opt/compass/bin/refresh.sh
if [[ "$?" != "0" ]]; then
echo "failed to refresh"
exit 1
fi
${CLIENT_SCRIPT} --logfile= --loglevel=info --logdir= --networking="${NETWORKING}" --partitions="${PARTITION}" --credentials="${SECURITY}" --host_roles="${host_roles_list}" --dashboard_role="${DASHBOARD_ROLE}" --switch_ips="${SWITCH_IPS}" --machines="${machines}" --switch_credential="${SWITCH_CREDENTIAL}"
if [[ "$?" != "0" ]]; then
echo "deploy cluster failed"
exit 1
fi

9
regtest/regtest2.conf Normal file
View File

@ -0,0 +1,9 @@
# Set test script variables
export VIRT_NUM=${VIRT_NUM:-'2'}
export VIRT_CPUS=${VIRT_CPUS:-'5'}
export VIRT_MEM=${VIRT_MEM:-'8192'}
export VIRT_DISK=${VIRT_DISK:-'20G'}
export HOST_ROLES=${HOST_ROLES:-'os-controller,os-ops-database,os-ops-messaging,os-image'}
REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source ${REGTEST_DIR}/regtest.conf

10
regtest/regtest3.conf Normal file
View File

@ -0,0 +1,10 @@
# Set test script variables
export VIRT_NUM=${VIRT_NUM:-'7'}
export VIRT_CPUS=${VIRT_CPUS:-'4'}
export VIRT_MEM=${VIRT_MEM:-'6144'}
export VIRT_DISK=${VIRT_DISK:-'20G'}
export HOST_ROLES=${HOST_ROLES:-'os-controller,os-image;os-ops-database,os-ops-messaging;os-network,os-block-storage-worker;os-ha;os-ha'}
export HA_VIP=${HA_VIP:-'172.16.1.253'}
REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source ${REGTEST_DIR}/regtest.conf