add flavor support

Change-Id: I9dd49c4302c7b1a82a88f08e2d1b439bb26176bf
This commit is contained in:
xiaodongwang 2014-08-14 11:56:36 -07:00
parent f0124ca7b1
commit 359f20489e
114 changed files with 1875 additions and 415 deletions

View File

@ -115,38 +115,6 @@ def dropdb():
database.drop_db()
@app_manager.command
def createtable():
"""Create database table."""
database.init()
if not flags.OPTIONS.table_name:
print 'flag --table_name is missing'
return
table_name = flags.OPTIONS.table_name
if table_name not in TABLE_MAPPING:
print '--table_name should be in %s' % TABLE_MAPPING.keys()
return
database.create_table(TABLE_MAPPING[table_name])
@app_manager.command
def droptable():
"""Drop database table."""
database.init()
if not flags.OPTIONS.table_name:
print 'flag --table_name is missing'
return
table_name = flags.OPTIONS.table_name
if table_name not in TABLE_MAPPING:
print '--table_name should be in %s' % TABLE_MAPPING.keys()
return
database.drop_table(TABLE_MAPPING[table_name])
@app_manager.command
def reinstall_clusters():
"""Reinstall hosts in clusters.

View File

@ -83,6 +83,8 @@ def update_progress(cluster_hosts):
cluster_hosts)
os_names = {}
distributed_systems = {}
os_installers = {}
package_installers = {}
with database.session() as session:
clusters = session.query(models.Cluster).all()
for cluster in clusters:
@ -91,14 +93,14 @@ def update_progress(cluster_hosts):
adapter = cluster.adapter
os_installer = adapter.adapter_os_installer
if os_installer:
os_installer_name = os_installer.name
os_installers[clusterid] = os_installer.name
else:
os_installer_name = None
os_installers[clusterid] = None
package_installer = adapter.adapter_package_installer
if package_installer:
package_installer_name = package_installer.name
package_installers[clusterid] = package_installer.name
else:
package_installer_name = None
package_installers[clusterid] = None
distributed_system_name = cluster.distributed_system_name
os_name = cluster.os_name
@ -111,20 +113,20 @@ def update_progress(cluster_hosts):
logging.info(
'update progress for '
'os_installer_name %s,'
'os_installers %s,'
'os_names %s,'
'package_installer_name %s,'
'package_installers %s,'
'distributed_systems %s,'
'cluster_hosts %s',
os_installer_name,
os_installers,
os_names,
package_installer_name,
package_installers,
distributed_systems,
cluster_hosts
)
progress_calculator.update_progress(
os_installer_name,
os_installers,
os_names,
package_installer_name,
package_installers,
distributed_systems,
cluster_hosts)

View File

@ -29,7 +29,7 @@ app.debug = True
# app.register_blueprint(blueprint, url_prefix='/api')
app.config['SECRET_KEY'] = setting.USER_SECRET_KEY
app.config['SECRET_KEY'] = 'abcd'
app.config['AUTH_HEADER_NAME'] = setting.USER_AUTH_HEADER_NAME
app.config['REMEMBER_COOKIE_DURATION'] = (
datetime.timedelta(

View File

@ -302,11 +302,25 @@ def take_user_action(user_id):
),
200
)
disable_user_func = _wrap_response(
functools.partial(
user_api.update_user, current_user, user_id, active=False
),
200
)
enable_user_func = _wrap_response(
functools.partial(
user_api.update_user, current_user, user_id, active=True
),
200
)
return _group_data_action(
data,
add_permission=update_permissions_func,
remove_permissions=update_permissions_func,
set_permissions=update_permissions_func
set_permissions=update_permissions_func,
enable_user=enable_user_func,
disable_user=disable_user_func
)
@ -851,6 +865,42 @@ def take_switch_action(switch_id):
)
@app.route("/machines/<int:machine_id>/action", methods=['POST'])
@log_user_action
@login_required
def take_machine_action(machine_id):
"""update machine."""
data = _get_request_data()
tag_func = _wrap_response(
functools.partial(
machine_api.update_machine, current_user, machine_id
),
200
)
poweron_func = _wrap_response(
functools.partial(
machine_api.poweron_machine, current_user, machine_id
)
)
poweroff_func = _wrap_response(
functools.partial(
machine_api.poweroff_machine, current_user, machine_id
)
)
reset_func = _wrap_response(
functools.partial(
machine_api.reset_machine, current_user, machine_id
)
)
return _group_data_action(
data,
tag=tag_func,
poweron=poweron_func,
poweroff=poweroff_func,
reset=reset_func
)
@app.route("/switch-machines", methods=['GET'])
@log_user_action
@login_required
@ -1167,6 +1217,23 @@ def show_os_metadata(os_id):
)
@app.route(
"/adapters/<int:adapter_id>/oses/<int:os_id>/metadata",
methods=['GET']
)
@log_user_action
@login_required
def show_adapter_os_metadata(adapter_id, os_id):
"""Get adapter metadata."""
data = _get_request_args()
return utils.make_json_response(
200,
metadata_api.get_package_os_metadata(
current_user, adapter_id, os_id, **data
)
)
@app.route("/clusters", methods=['GET'])
@log_user_action
@login_required
@ -1417,6 +1484,74 @@ def add_cluster_host(cluster_id):
)
@app.route(
'/clusters/<int:cluster_id>/hosts/<int:host_id>',
methods=['PUT']
)
@log_user_action
@login_required
def update_cluster_host(cluster_id, host_id):
"""Update cluster host."""
data = _get_request_data()
return utils.make_json_response(
200,
cluster_api.update_cluster_host(
current_user, cluster_id, host_id, **data
)
)
@app.route(
'/clusterhosts/<int:clusterhost_id>',
methods=['PUT']
)
@log_user_action
@login_required
def update_clusterhost(clusterhost_id):
"""Update cluster host."""
data = _get_request_data()
return utils.make_json_response(
200,
cluster_api.update_clusterhost(
current_user, clusterhost_id, **data
)
)
@app.route(
'/clusters/<int:cluster_id>/hosts/<int:host_id>',
methods=['PATCH']
)
@log_user_action
@login_required
def patch_cluster_host(cluster_id, host_id):
"""Update cluster host."""
data = _get_request_data()
return utils.make_json_response(
200,
cluster_api.patch_cluster_host(
current_user, cluster_id, host_id, **data
)
)
@app.route(
'/clusterhosts/<int:clusterhost_id>',
methods=['PATCH']
)
@log_user_action
@login_required
def patch_clusterhost(clusterhost_id):
"""Update cluster host."""
data = _get_request_data()
return utils.make_json_response(
200,
cluster_api.patch_clusterhost(
current_user, clusterhost_id, **data
)
)
@app.route(
'/clusters/<int:cluster_id>/hosts/<int:host_id>',
methods=['DELETE']

View File

@ -34,7 +34,7 @@ def _add_system(session, model, configs):
)
object = utils.add_db_object(
session, model,
True, config['NAME'],
False, config['NAME'],
deployable=config.get('DEPLOYABLE', False)
)
parents[config['NAME']] = (
@ -73,20 +73,20 @@ def add_adapters_internal(session):
if 'OS_INSTALLER' in config:
os_installer = utils.get_db_object(
session, models.OSInstaller,
instance_name=config['OS_INSTALLER']
alias=config['OS_INSTALLER']
)
else:
os_installer = None
if 'PACKAGE_INSTALLER' in config:
package_installer = utils.get_db_object(
session, models.PackageInstaller,
instance_name=config['PACKAGE_INSTALLER']
alias=config['PACKAGE_INSTALLER']
)
else:
package_installer = None
adapter = utils.add_db_object(
session, models.Adapter,
True,
False,
config['NAME'],
display_name=config.get('DISPLAY_NAME', None),
distributed_system=distributed_system,
@ -127,7 +127,7 @@ def add_roles_internal(session):
configs = util.load_configs(setting.ADAPTER_ROLE_DIR)
for config in configs:
logging.info(
'add config to role', config
'add config %s to role', config
)
adapter = utils.get_db_object(
session, models.Adapter,
@ -136,13 +136,40 @@ def add_roles_internal(session):
for role_dict in config['ROLES']:
utils.add_db_object(
session, models.AdapterRole,
True, role_dict['role'], adapter.id,
False, role_dict['role'], adapter.id,
display_name=role_dict.get('display_name', None),
description=role_dict.get('description', None),
optional=role_dict.get('optional', False)
)
def add_flavors_internal(session):
configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR)
for config in configs:
logging.info('add config %s to flavor', config)
adapter = utils.get_db_object(
session, models.Adapter,
name=config['ADAPTER_NAME']
)
for flavor_dict in config['FLAVORS']:
flavor = utils.add_db_object(
session, models.AdapterFlavor,
False, flavor_dict['flavor'], adapter.id,
display_name=flavor_dict.get('display_name', None),
template=flavor_dict.get('template', None)
)
role_names = flavor_dict.get('roles', [])
for role_name in role_names:
role = utils.get_db_object(
session, models.AdapterRole,
name=role_name, adapter_id=adapter.id
)
utils.add_db_object(
session, models.AdapterFlavorRole,
False, flavor.id, role.id
)
def get_adapters_internal(session):
adapter_mapping = {}
adapters = utils.list_db_objects(

View File

@ -28,7 +28,7 @@ SUPPORTED_FIELDS = [
'distributed_system_name',
]
RESP_FIELDS = [
'id', 'name', 'roles',
'id', 'name', 'roles', 'flavors',
'os_installer', 'package_installer',
'distributed_system_id',
'distributed_system_name',
@ -40,6 +40,9 @@ RESP_OS_FIELDS = [
RESP_ROLES_FIELDS = [
'id', 'name', 'display_name', 'description', 'optional'
]
RESP_FLAVORS_FIELDS = [
'id', 'name', 'display_name', 'template', 'roles'
]
@database.run_in_session()
@ -84,13 +87,24 @@ def _filter_adapters(adapter_config, filter_name, filter_value):
)
@utils.wrap_to_dict(
RESP_FIELDS,
supported_oses=RESP_OS_FIELDS
supported_oses=RESP_OS_FIELDS,
roles=RESP_ROLES_FIELDS,
flavors=RESP_FLAVORS_FIELDS
)
def list_adapters(session, lister, **filters):
"""list adapters."""
return ADAPTER_MAPPING.values()
def get_adapter_internal(adapter_id):
"""get adapter."""
if adapter_id not in ADAPTER_MAPPING:
raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id
)
return ADAPTER_MAPPING[adapter_id]
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission_in_session(
@ -98,15 +112,13 @@ def list_adapters(session, lister, **filters):
)
@utils.wrap_to_dict(
RESP_FIELDS,
supported_oses=RESP_OS_FIELDS
supported_oses=RESP_OS_FIELDS,
roles=RESP_ROLES_FIELDS,
flavors=RESP_FLAVORS_FIELDS
)
def get_adapter(session, getter, adapter_id, **kwargs):
"""get adapter."""
if adapter_id not in ADAPTER_MAPPING:
raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id
)
return ADAPTER_MAPPING[adapter_id]
return get_adapter_internal(adapter_id)
@utils.supported_filters([])

View File

@ -40,7 +40,7 @@ RESP_FIELDS = [
]
RESP_CLUSTERHOST_FIELDS = [
'id', 'host_id', 'clusterhost_id', 'machine_id',
'name', 'hostname',
'name', 'hostname', 'roles', 'os_installer',
'cluster_id', 'clustername', 'location', 'tag',
'networks', 'mac', 'switch_ip', 'port', 'switches',
'os_installed', 'distributed_system_installed',
@ -64,7 +64,7 @@ RESP_DEPLOYED_CONFIG_FIELDS = [
'updated_at'
]
RESP_METADATA_FIELDS = [
'metadata'
'os_config', 'package_config'
]
RESP_CLUSTERHOST_CONFIG_FIELDS = [
'package_config',
@ -97,10 +97,12 @@ RESP_DEPLOY_FIELDS = [
'status', 'cluster', 'clusterhosts'
]
ADDED_FIELDS = ['name', 'adapter_id', 'os_id']
OPTIONAL_ADDED_FIELDS = ['flavor']
UPDATED_FIELDS = ['name', 'reinstall_distributed_system', 'flavor']
OPTIONAL_ADDED_FIELDS = ['flavor_id']
UPDATED_FIELDS = ['name', 'reinstall_distributed_system']
ADDED_HOST_FIELDS = ['machine_id']
UPDATED_HOST_FIELDS = ['name', 'reinstall_os']
UPDATED_CLUSTERHOST_FIELDS = ['roles']
PATCHED_CLUSTERHOST_FIELDS = ['patched_roles']
UPDATED_CONFIG_FIELDS = [
'put_os_config', 'put_package_config', 'config_step'
]
@ -211,7 +213,8 @@ def is_cluster_editable(
@utils.supported_filters(
ADDED_FIELDS, optional_support_keys=OPTIONAL_ADDED_FIELDS
ADDED_FIELDS,
optional_support_keys=OPTIONAL_ADDED_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission_in_session(
@ -314,7 +317,7 @@ def get_cluster_metadata(session, getter, cluster_id, **kwargs):
metadatas['package_config'] = (
metadata_api.get_package_metadata_internal(adapter.id)
)
return {'metadata': metadatas}
return metadatas
@user_api.check_user_permission_in_session(
@ -446,6 +449,13 @@ def add_clusterhost_internal(
machine_id=None, **kwargs
):
from compass.db.api import host as host_api
clusterhost_dict = {}
host_dict = {}
for key, value in kwargs.items():
if key in UPDATED_CLUSTERHOST_FIELDS:
clusterhost_dict[key] = value
else:
host_dict[key] = value
host = utils.get_db_object(
session, models.Host, False, id=machine_id
)
@ -457,20 +467,21 @@ def add_clusterhost_internal(
):
utils.update_db_object(
session, host,
**kwargs
**host_dict
)
else:
logging.info('host %s is not editable', host.name)
else:
utils.add_db_object(
host = utils.add_db_object(
session, models.Host, False, machine_id,
os=cluster.os,
os_installer=cluster.adapter.adapter_os_installer,
creator=cluster.creator,
**kwargs
**host_dict
)
utils.add_db_object(
return utils.add_db_object(
session, models.ClusterHost, exception_when_existing,
cluster.id, machine_id
cluster.id, host.id, **clusterhost_dict
)
@ -562,10 +573,6 @@ def get_clusterhost(
)
@utils.supported_filters(
ADDED_HOST_FIELDS,
optional_support_keys=UPDATED_HOST_FIELDS
)
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_UPDATE_CLUSTER_HOSTS
@ -579,12 +586,121 @@ def add_cluster_host(
cluster = utils.get_db_object(
session, models.Cluster, id=cluster_id
)
is_cluster_editable(session, cluster, creator)
return add_clusterhost_internal(
session, cluster, exception_when_existing,
**kwargs
)
@user_api.check_user_permission_in_session(
permission.PERMISSION_UPDATE_CLUSTER_HOSTS
)
@utils.wrap_to_dict(RESP_CLUSTERHOST_FIELDS)
def _update_clusterhost(session, updater, clusterhost, **kwargs):
def roles_validates(roles):
cluster_roles = []
cluster = clusterhost.cluster
flavor = cluster.flavor
if not flavor:
raise exception.InvalidParameter(
'not flavor in cluster %s' % cluster.name
)
for flavor_roles in flavor.flavor_roles:
cluster_roles.append(flavor_roles.role.name)
for role in roles:
if role not in cluster_roles:
raise exception.InvalidParameter(
'role %s is not in cluster roles %s' % (
role, cluster_roles
)
)
@utils.input_validates(
roles=roles_validates,
patched_roles=roles_validates
)
def update_internal(clusterhost, **in_kwargs):
return utils.update_db_object(
session, clusterhost, **in_kwargs
)
return update_internal(
clusterhost, **kwargs
)
is_cluster_editable(session, clusterhost.cluster, updater)
return utils.update_db_object(
session, clusterhost, **kwargs
)
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_FIELDS
)
@database.run_in_session()
def update_cluster_host(
session, updater, cluster_id, host_id,
**kwargs
):
"""Update cluster host."""
clusterhost = utils.get_db_object(
session, models.ClusterHost, cluster_id=cluster_id, host_id=host_id
)
return _update_clusterhost(session, updater, clusterhost, **kwargs)
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_FIELDS
)
@database.run_in_session()
def update_clusterhost(
session, updater, clusterhost_id,
**kwargs
):
"""Update cluster host."""
clusterhost = utils.get_db_object(
session, models.ClusterHost, clusterhost_id=clusterhost_id
)
return _update_clusterhost(session, updater, clusterhost, **kwargs)
@utils.replace_filters(
roles='patched_roles'
)
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_FIELDS
)
@database.run_in_session()
def patch_cluster_host(
session, updater, cluster_id, host_id,
**kwargs
):
"""Update cluster host."""
clusterhost = utils.get_db_object(
session, models.Cluster, cluster_id=cluster_id, host_id=host_id
)
return _update_clusterhost(session, updater, clusterhost, **kwargs)
@utils.replace_filters(
roles='patched_roles'
)
@utils.supported_filters(
optional_support_keys=UPDATED_CLUSTERHOST_FIELDS
)
@database.run_in_session()
def patch_clusterhost(
session, updater, clusterhost_id,
**kwargs
):
"""Update cluster host."""
clusterhost = utils.get_db_object(
session, models.Cluster, clusterhost_id=clusterhost_id
)
return _update_clusterhost(session, updater, clusterhost, **kwargs)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission_in_session(

View File

@ -147,7 +147,13 @@ def _setup_switch_table(switch_session):
logging.info('setup switch table')
from compass.db.api import switch
switch.add_switch_internal(
switch_session, long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP))
switch_session, long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP)),
True, filters=[{
'filter_name': 'deny-all',
'filter_type': 'deny',
'port_prefix': '.*',
'port_suffix': '.*'
}]
)
@ -224,12 +230,35 @@ def _setup_package_metadatas(metadata_session):
def _setup_adapter_roles(role_session):
"""Initialize package adapter role table."""
"""Initialize adapter role table."""
logging.info('setup adapter role table')
from compass.db.api import adapter
adapter.add_roles_internal(role_session)
def _setup_adapter_flavors(flavor_session):
"""Initialize adapter flavor table."""
logging.info('setup adapter flavor table')
from compass.db.api import adapter
adapter.add_flavors_internal(flavor_session)
def _update_others(other_session):
"""Update other tables."""
logging.info('update other tables')
from compass.db.api import utils
from compass.db import models
utils.update_db_objects(
other_session, models.Cluster
)
utils.update_db_objects(
other_session, models.Host
)
utils.update_db_objects(
other_session, models.ClusterHost
)
@run_in_session()
def create_db(my_session):
"""Create database."""
@ -243,62 +272,14 @@ def create_db(my_session):
_setup_distributed_systems(my_session)
_setup_adapters(my_session)
_setup_adapter_roles(my_session)
_setup_adapter_flavors(my_session)
_setup_os_fields(my_session)
_setup_package_fields(my_session)
_setup_os_metadatas(my_session)
_setup_package_metadatas(my_session)
_update_others(my_session)
def drop_db():
"""Drop database."""
models.BASE.metadata.drop_all(bind=ENGINE)
@run_in_session()
def create_table(my_session, table):
"""Create table.
:param table: Class of the Table defined in the model.
"""
table.__table__.create(bind=ENGINE, checkfirst=True)
if table == models.User:
_setup_user_table(my_session)
elif table == models.Permission:
_setup_permission_table(my_session)
elif table == models.Switch:
_setup_switch_table(my_session)
elif table in [
models.OSInstaller,
models.PackageInstaller,
models.OperatingSystem,
models.DistributedSystems,
models.Adapter
]:
_setup_os_installers(my_session)
_setup_package_installers(my_session)
_setup_adapter_roles(my_session)
_setup_adapters(my_session)
_setup_os_fields(my_session)
_setup_os_metadatas(my_session)
_setup_package_fields(my_session)
_setup_package_metadatas(my_session)
elif table == models.AdapterRole:
_setup_adapter_roles(my_session)
elif table in [
models.OSConfigField,
models.PackageConfigField,
models.OSConfigMetadata,
models.PackageConfigMetadata
]:
_setup_os_fields(my_session)
_setup_os_metadatas(my_session)
_setup_package_fields(my_session)
_setup_package_metadatas(my_session)
def drop_table(table):
"""Drop table.
:param table: Class of the Table defined in the model.
"""
table.__table__.drop(bind=ENGINE, checkfirst=True)

View File

@ -32,7 +32,7 @@ SUPPORTED_NETOWORK_FIELDS = [
]
RESP_FIELDS = [
'id', 'name', 'hostname', 'os_name', 'os_id', 'owner', 'mac',
'switch_ip', 'port', 'switches',
'switch_ip', 'port', 'switches', 'os_installer',
'reinstall_os', 'os_installed', 'tag', 'location', 'networks',
'created_at', 'updated_at'
]
@ -472,6 +472,9 @@ def get_hostnetwork(session, getter, host_network_id, **kwargs):
@utils.supported_filters(
ADDED_NETWORK_FIELDS, optional_support_keys=OPTIONAL_ADDED_NETWORK_FIELDS
)
@utils.input_validates(
ip=utils.check_ip
)
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_ADD_HOST_NETWORK
@ -498,6 +501,9 @@ def add_host_network(
optional_support_keys=UPDATED_NETWORK_FIELDS,
ignore_support_keys=IGNORED_NETWORK_FIELDS
)
@utils.input_validates(
ip=utils.check_ip
)
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_ADD_HOST_NETWORK
@ -525,6 +531,9 @@ def update_host_network(
optional_support_keys=UPDATED_NETWORK_FIELDS,
ignore_support_keys=IGNORED_NETWORK_FIELDS
)
@utils.input_validates(
ip=utils.check_ip
)
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_ADD_HOST_NETWORK

View File

@ -33,9 +33,13 @@ PATCHED_FIELDS = [
'patched_location'
]
RESP_FIELDS = [
'id', 'mac', 'ipmi_credentials',
'id', 'mac', 'ipmi_credentials', 'switches', 'switch_ip',
'port', 'vlans',
'tag', 'location', 'created_at', 'updated_at'
]
RESP_DEPLOY_FIELDS = [
'status', 'machine'
]
@utils.supported_filters([])
@ -118,3 +122,84 @@ def del_machine(session, deleter, machine_id, **kwargs):
"""Delete a machine."""
machine = utils.get_db_object(session, models.Machine, id=machine_id)
return utils.del_db_object(session, machine)
@utils.supported_filters(optional_support_keys=['poweron'])
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEPLOY_HOST
)
@utils.wrap_to_dict(
RESP_DEPLOY_FIELDS,
machine=RESP_FIELDS
)
def poweron_machine(
session, deployer, machine_id, poweron={}, **kwargs
):
"""power on machine."""
from compass.tasks import client as celery_client
machine = utils.get_db_object(
session, models.Machine, id=machine_id
)
celery_client.celery.send_task(
'compass.tasks.poweron_machine',
(machine_id,)
)
return {
'status': 'poweron %s action sent' % machine.mac,
'machine': machine
}
@utils.supported_filters(optional_support_keys=['poweroff'])
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEPLOY_HOST
)
@utils.wrap_to_dict(
RESP_DEPLOY_FIELDS,
machine=RESP_FIELDS
)
def poweroff_machine(
session, deployer, machine_id, poweroff={}, **kwargs
):
"""power off machine."""
from compass.tasks import client as celery_client
machine = utils.get_db_object(
session, models.Machine, id=machine_id
)
celery_client.celery.send_task(
'compass.tasks.poweroff_machine',
(machine_id,)
)
return {
'status': 'poweroff %s action sent' % machine.mac,
'machine': machine
}
@utils.supported_filters(optional_support_keys=['reset'])
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEPLOY_HOST
)
@utils.wrap_to_dict(
RESP_DEPLOY_FIELDS,
machine=RESP_FIELDS
)
def reset_machine(
session, deployer, machine_id, reset={}, **kwargs
):
"""reset machine."""
from compass.tasks import client as celery_client
machine = utils.get_db_object(
session, models.Machine, id=machine_id
)
celery_client.celery.send_task(
'compass.tasks.reset_machine',
(machine_id,)
)
return {
'status': 'reset %s action sent' % machine.mac,
'machine': machine
}

View File

@ -29,7 +29,7 @@ def _add_field_internal(session, model, configs):
fields = []
for config in configs:
fields.append(utils.add_db_object(
session, model, True,
session, model, False,
config['NAME'],
field_type=config.get('FIELD_TYPE', basestring),
display_type=config.get('DISPLAY_TYPE', 'text'),
@ -72,7 +72,7 @@ def _add_metadata(
else:
field = None
metadata = utils.add_db_object(
session, metadata_model, True,
session, metadata_model, False,
path, name=name, parent=parent, field=field,
display_name=metadata_self.get('display_name', name),
description=metadata_self.get('description', None),

View File

@ -23,6 +23,11 @@ from compass.db.api import utils
from compass.db import exception
RESP_METADATA_FIELDS = [
'os_config', 'package_config'
]
@database.run_in_session()
def load_metadatas(session):
global OS_METADATA_MAPPING
@ -103,8 +108,9 @@ def get_package_metadata_internal(adapter_id):
@user_api.check_user_permission_in_session(
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_package_metadata(session, getter, adapter_id, **kwargs):
return get_package_metadata_internal(adapter_id)
return {'package_config': get_package_metadata_internal(adapter_id)}
def get_os_metadata_internal(os_id):
@ -121,6 +127,33 @@ def get_os_metadata_internal(os_id):
@user_api.check_user_permission_in_session(
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_os_metadata(session, getter, os_id, **kwargs):
"""get os metadatas."""
return get_os_metadata_internal(os_id)
return {'os_config': get_os_metadata_internal(os_id)}
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_package_os_metadata(session, getter, adapter_id, os_id, **kwargs):
from compass.db.api import adapter_holder as adapter_api
adapter = adapter_api.get_adapter_internal(adapter_id)
os_ids = [os['os_id'] for os in adapter['supported_oses']]
if os_id not in os_ids:
raise exception.InvalidParameter(
'os %s is not in the supported os list of adapter %s' % (
os_id, adapter_id
)
)
metadatas = {}
metadatas['os_config'] = get_os_metadata_internal(
os_id
)
metadatas['package_config'] = get_package_metadata_internal(
adapter_id
)
return metadatas

View File

@ -134,12 +134,13 @@ def _check_vlans(vlans):
def add_switch_internal(
session, ip_int, exception_when_existing=True, **kwargs
session, ip_int, exception_when_existing=True,
filters=setting.SWITCHES_DEFAULT_FILTERS, **kwargs
):
with session.begin(subtransactions=True):
return utils.add_db_object(
session, models.Switch, exception_when_existing, ip_int,
filters=setting.SWITCHES_DEFAULT_FILTERS, **kwargs
**kwargs
)
@ -325,44 +326,6 @@ def patch_switch_filter(session, updater, switch_id, **kwargs):
return utils.update_db_object(session, switch, **kwargs)
def filter_machine_internal(filters, port):
for port_filter in filters:
logging.debug('apply filter %s on port %s', port_filter, port)
filter_allowed = port_filter['filter_type'] == 'allow'
if 'ports' in port_filter:
if port in port_filter['ports']:
logging.debug('port is allowed? %s', filter_allowed)
return filter_allowed
else:
logging.debug('port is allowed? %s', not filter_allowed)
return not filter_allowed
port_prefix = port_filter.get('port_prefix', '')
port_suffix = port_filter.get('port_suffix', '')
pattern = re.compile(r'%s(\d+)%s' % (port_prefix, port_suffix))
match = pattern.match(port)
if match:
logging.debug(
'port %s matches pattern %s',
port, pattern.pattern
)
port_number = match.group(1)
if (
'port_start' not in port_filter or
port_number >= port_filter['port_start']
) and (
'port_end' not in port_filter or
port_number <= port_filter['port_end']
):
logging.debug('port is allowed? %s', filter_allowed)
return filter_allowed
else:
logging.debug(
'port %s does not match pattern %s',
port, pattern.pattern
)
return True
def get_switch_machines_internal(session, **filters):
return utils.list_db_objects(
session, models.SwitchMachine, **filters
@ -427,13 +390,13 @@ def _filter_vlans(vlan_filter, obj):
)
@utils.wrap_to_dict(RESP_MACHINES_FIELDS)
def _filter_switch_machines(session, user, switch_machines, **filters):
return [
switch_machine for switch_machine in switch_machines
if filter_machine_internal(
switch_machine.switch.filters,
switch_machine.port
)
]
if 'ip_int' in filters:
return switch_machines
else:
return [
switch_machine for switch_machine in switch_machines
if not switch_machine.filtered
]
@user_api.check_user_permission_in_session(
@ -452,29 +415,21 @@ def _filter_switch_machines(session, user, switch_machines, **filters):
clusters=RESP_CLUSTER_FIELDS
)
def _filter_switch_machines_hosts(session, user, switch_machines, **filters):
filtered_switch_machines = [
switch_machine for switch_machine in switch_machines
if filter_machine_internal(
switch_machine.switch.filters,
switch_machine.port
)
]
if 'ip_int' in filters:
filtered_switch_machines = switch_machines
else:
filtered_switch_machines = [
switch_machine for switch_machine in switch_machines
if not switch_machine.filtered
]
switch_machines_hosts = []
for switch_machine in filtered_switch_machines:
switch_machine_host_dict = {}
machine = switch_machine.machine
host = machine.host
if host:
clusters = [
clusterhost.cluster
for clusterhost in host.clusterhosts
]
switch_machine_host_dict.update(
host.to_dict()
)
switch_machine_host_dict['clusters'] = [
cluster.to_dict() for cluster in clusters
]
switch_machine_host_dict = host.to_dict()
else:
switch_machine_host_dict = machine.to_dict()
switch_machine_host_dict.update(
switch_machine.to_dict()
)
@ -506,15 +461,8 @@ def list_switchmachines(session, lister, **filters):
switch_machines = get_switch_machines_internal(
session, **filters
)
if 'ip_int' in filters:
filtered_switch_machines = switch_machines
else:
filtered_switch_machines = [
switch_machine for switch_machine in switch_machines
if switch_machine.switch_ip != setting.DEFAULT_SWITCH_IP
]
return _filter_switch_machines(
session, lister, filtered_switch_machines, **filters
session, lister, switch_machines, **filters
)

View File

@ -470,6 +470,22 @@ def del_db_objects(session, table, **filters):
return db_objects
def update_db_objects(session, table, **filters):
"""Update db objects."""
with session.begin(subtransactions=True):
logging.debug('update db objects by filters %s in table %s',
filters, table.__name__)
query = model_filter(
model_query(session, table), table, **filters
)
db_objects = query.all()
for db_object in db_objects:
logging.debug('update db object %s', db_object)
db_object.update()
db_object.validate()
return db_objects
def update_db_object(session, db_object, **kwargs):
"""Update db object."""
with session.begin(subtransactions=True):

View File

@ -16,6 +16,7 @@
import datetime
import logging
import netaddr
import re
import simplejson as json
from sqlalchemy import BigInteger
@ -80,6 +81,8 @@ class HelperMixin(object):
keys = self.__mapper__.columns.keys()
dict_info = {}
for key in keys:
if key.startswith('_'):
continue
value = getattr(self, key)
if value is not None:
if isinstance(value, datetime.datetime):
@ -252,13 +255,13 @@ class FieldMixin(HelperMixin):
class InstallerMixin(HelperMixin):
name = Column(String(80))
instance_name = Column(String(80), unique=True)
alias = Column(String(80), unique=True)
settings = Column(JSONEncoded, default={})
def validate(self):
if not self.name:
raise exception.InvalidParameter(
'name is not set in installer %s' % self.instance_name
'name is not set in installer %s' % self.name
)
super(InstallerMixin, self).validate()
@ -307,7 +310,7 @@ class HostNetwork(BASE, TimestampMixin, HelperMixin):
String(80))
subnet_id = Column(
Integer,
ForeignKey('network.id', onupdate='CASCADE', ondelete='CASCADE')
ForeignKey('subnet.id', onupdate='CASCADE', ondelete='CASCADE')
)
ip_int = Column(BigInteger, unique=True)
is_mgmt = Column(Boolean, default=False)
@ -342,6 +345,9 @@ class HostNetwork(BASE, TimestampMixin, HelperMixin):
def netmask(self):
return str(netaddr.IPNetwork(self.subnet).netmask)
def update(self):
self.host.config_validated = False
def validate(self):
if not self.network:
raise exception.InvalidParameter(
@ -355,14 +361,6 @@ class HostNetwork(BASE, TimestampMixin, HelperMixin):
self.host_id, self.interface
)
)
try:
netaddr.IPAddress(self.ip_int)
except Exception:
raise exception.InvalidParameter(
'ip %s format is uncorrect in %s interface %s' % (
self.ip_int, self.host_id, self.interface
)
)
ip = netaddr.IPAddress(self.ip_int)
subnet = netaddr.IPNetwork(self.subnet)
if ip not in subnet:
@ -425,6 +423,7 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
Integer,
ForeignKey('host.id', onupdate='CASCADE', ondelete='CASCADE')
)
_roles = Column(JSONEncoded, default=[])
config_step = Column(String(80), default='')
package_config = Column(JSONEncoded, default={})
config_validated = Column(Boolean, default=False)
@ -468,10 +467,6 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
@patched_package_config.setter
def patched_package_config(self, value):
self.package_config = util.merge_dict(dict(self.package_config), value)
logging.info(
'patch clusterhost %s package config: %s',
self.id, value
)
self.config_validated = False
@property
@ -483,10 +478,6 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
package_config = dict(self.package_config)
package_config.update(value)
self.package_config = package_config
logging.info(
'put clusterhost %s package config: %s',
self.id, value
)
self.config_validated = False
@property
@ -557,20 +548,34 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
@property
def roles(self):
package_config = self.package_config
if 'roles' in package_config:
role_names = package_config['roles']
roles = self.cluster.adapter.roles
role_mapping = {}
for role in roles:
role_mapping[role.name] = role
filtered_roles = []
for role_name in role_names:
if role_name in role_mapping:
filtered_roles.append(role_mapping[role_name])
return filtered_roles
else:
role_names = list(self._roles)
if not role_names:
return None
flavor = self.cluster.flavor
if not flavor:
return None
roles = []
for flavor_role in flavor.flavor_roles:
role = flavor_role.role
if role.name in role_names:
roles.append(role)
return roles
@roles.setter
def roles(self, value):
self._roles = value
self.config_validated = False
@property
def patched_roles(self):
return self.roles
@patched_roles.setter
def patched_roles(self, value):
roles = list(self._roles)
roles.extend(value)
self._roles = roles
self.config_validated = False
@hybrid_property
def owner(self):
@ -602,9 +607,13 @@ class ClusterHost(BASE, TimestampMixin, HelperMixin):
'owner': self.owner,
'clustername': self.clustername,
'name': self.name,
'state': state_dict['state'],
'roles': self.roles
'state': state_dict['state']
})
roles = self.roles
if roles:
dict_info['roles'] = [
role.to_dict() for role in roles
]
return dict_info
@ -657,6 +666,12 @@ class Host(BASE, TimestampMixin, HelperMixin):
deployed_os_config = Column(JSONEncoded, default={})
os_name = Column(String(80))
creator_id = Column(Integer, ForeignKey('user.id'))
owner = Column(String(80))
os_installer_id = Column(
Integer,
ForeignKey('os_installer.id')
)
id = Column(
Integer,
ForeignKey('machine.id', onupdate='CASCADE', ondelete='CASCADE'),
@ -725,6 +740,9 @@ class Host(BASE, TimestampMixin, HelperMixin):
super(Host, self).initialize()
def update(self):
creator = self.creator
if creator:
self.owner = creator.email
if self.reinstall_os:
if self.state in ['SUCCESSFUL', 'ERROR']:
if self.config_validated:
@ -740,30 +758,27 @@ class Host(BASE, TimestampMixin, HelperMixin):
super(Host, self).update()
def validate(self):
os = self.os
if not os:
raise exception.InvalidParameter(
'os is not set in host %s' % self.id
)
if not os.deployable:
raise exception.InvalidParameter(
'os %s is not deployable' % os.name
)
creator = self.creator
if not creator:
raise exception.InvalidParameter(
'creator is not set in host %s' % self.id
)
os = self.os
if not os:
raise exception.InvalidParameter(
'os is not set in host %s' % self.id
)
os_installer = self.os_installer
if not os_installer:
raise exception.Invalidparameter(
'os_installer is not set in host %s' % self.id
)
if not os.deployable:
raise exception.InvalidParameter(
'os %s is not deployable in host %s' % (os.name, self.id)
)
super(Host, self).validate()
@hybrid_property
def owner(self):
return self.creator.email
@owner.expression
def owner(cls):
return cls.creator.email
@property
def os_installed(self):
return self.state.state == 'SUCCESSFUL'
@ -781,13 +796,13 @@ class Host(BASE, TimestampMixin, HelperMixin):
state_dict = self.state_dict()
dict_info.update({
'machine_id': self.machine.id,
'owner': self.owner,
'os_installed': self.os_installed,
'hostname': self.name,
'networks': [
host_network.to_dict()
for host_network in self.host_networks
],
'os_installer': self.os_installer.to_dict(),
'clusters': [cluster.to_dict() for cluster in self.clusters],
'state': state_dict['state']
})
@ -884,9 +899,14 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
name = Column(String(80), unique=True)
reinstall_distributed_system = Column(Boolean, default=True)
config_step = Column(String(80), default='')
os_id = Column(Integer, ForeignKey('os.id'), nullable=True)
os_name = Column(String(80), nullable=True)
flavor = Column(String(80), nullable=True)
os_id = Column(Integer, ForeignKey('os.id'))
os_name = Column(String(80))
flavor_id = Column(
Integer,
ForeignKey('adapter_flavor.id'),
nullable=True
)
flavor_name = Column(String(80), nullable=True)
distributed_system_id = Column(
Integer, ForeignKey('distributed_system.id'),
nullable=True
@ -902,6 +922,7 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
adapter_id = Column(Integer, ForeignKey('adapter.id'))
adapter_name = Column(String(80), nullable=True)
creator_id = Column(Integer, ForeignKey('user.id'))
owner = Column(String(80))
clusterhosts = relationship(
ClusterHost,
passive_deletes=True, passive_updates=True,
@ -922,14 +943,12 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
super(Cluster, self).__init__(**kwargs)
def initialize(self):
adapter = self.adapter
if adapter:
self.put_package_config = {
'roles': [role.name for role in adapter.roles]
}
super(Cluster, self).initialize()
def update(self):
creator = self.creator
if creator:
self.owner = creator.email
if self.reinstall_distributed_system:
if self.state in ['SUCCESSFUL', 'ERROR']:
if self.config_validated:
@ -948,10 +967,17 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
self.adapter_name = adapter.name
self.distributed_system = adapter.adapter_distributed_system
self.distributed_system_name = self.distributed_system.name
flavor = self.flavor
if flavor:
self.flavor_name = flavor.name
else:
self.flavor_name = None
else:
self.adapter_name = None
self.distributed_system = None
self.distributed_system_name = None
self.flavor = None
self.flavor_name = None
super(Cluster, self).update()
def validate(self):
@ -961,30 +987,49 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
'creator is not set in cluster %s' % self.id
)
os = self.os
if os and not os.deployable:
if not os:
raise exception.InvalidParameter(
'os is not set in cluster %s' % self.id
)
if not os.deployable:
raise exception.InvalidParameter(
'os %s is not deployable' % os.name
)
adapter = self.adapter
if adapter:
if not adapter.deployable:
raise exception.InvalidParameter(
'adapter %s is not deployable' % adapter.name
)
supported_os_ids = [
adapter_os.os.id for adapter_os in adapter.supported_oses
]
if os and os.id not in supported_os_ids:
raise exception.InvalidParameter(
'os %s is not supported' % os.name
)
distributed_system = self.distributed_system
if distributed_system and not distributed_system.deployable:
if not adapter:
raise exception.InvalidParameter(
'adapter is not set in cluster %s' % self.id
)
if not adapter.deployable:
raise exception.InvalidParameter(
'adapter %s is not deployable' % adapter.name
)
supported_os_ids = [
adapter_os.os.id for adapter_os in adapter.supported_oses
]
if os.id not in supported_os_ids:
raise exception.InvalidParameter(
'os %s is not supported' % os.name
)
distributed_system = self.distributed_system
if distributed_system:
if not distributed_system.deployable:
raise exception.InvalidParamerter(
'distributed system %s is not deployable' % (
distributed_system.name
)
)
flavor = self.flavor
if not flavor:
raise exception.InvalidParameter(
'flavor is not set in cluster %s' % self.id
)
if flavor.adapter_id != self.adapter_id:
raise exception.InvalidParameter(
'flavor adapter id %s does not match adapter id %s' % (
flavor.adapter_id, self.adapter_id
)
)
super(Cluster, self).validate()
@property
@ -1032,14 +1077,6 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
logging.info('put cluster %s package config: %s', self.id, value)
self.config_validated = False
@hybrid_property
def owner(self):
return self.creator.email
@owner.expression
def owner(cls):
return cls.creator.email
@property
def distributed_system_installed(self):
return self.state.state == 'SUCCESSFUL'
@ -1049,10 +1086,11 @@ class Cluster(BASE, TimestampMixin, HelperMixin):
def to_dict(self):
dict_info = super(Cluster, self).to_dict()
dict_info.update({
'distributed_system_installed': self.distributed_system_installed,
'owner': self.owner,
})
dict_info['distributed_system_installed'] = (
self.distributed_system_installed
)
if self.flavor:
dict_info['flavor'] = self.flavor.to_dict()
return dict_info
@ -1318,6 +1356,44 @@ class SwitchMachine(BASE, HelperMixin, TimestampMixin):
vlans.append(item)
self.vlans = vlans
@property
def filtered(self):
filters = self.switch.filters
port = self.port
for port_filter in filters:
logging.debug('apply filter %s on port %s', port_filter, port)
denied = port_filter['filter_type'] != 'allow'
if 'ports' in port_filter:
if port in port_filter['ports']:
logging.debug('port %s is allowed? %s', port, not denied)
return denied
port_prefix = port_filter.get('port_prefix', '')
port_suffix = port_filter.get('port_suffix', '')
pattern = re.compile(r'%s(\d+)%s' % (port_prefix, port_suffix))
match = pattern.match(port)
if match:
logging.debug(
'port %s matches pattern %s',
port, pattern.pattern
)
port_number = match.group(1)
if (
'port_start' not in port_filter or
port_number >= port_filter['port_start']
) and (
'port_end' not in port_filter or
port_number <= port_filter['port_end']
):
logging.debug('port %s is allowed? %s', port, not denied)
return denied
else:
logging.debug(
'port %s does not match pattern %s',
port, pattern.pattern
)
logging.debug('port %s is allowed', port)
return False
def to_dict(self):
dict_info = self.machine.to_dict()
dict_info.update(super(SwitchMachine, self).to_dict())
@ -1400,6 +1476,7 @@ class Machine(BASE, HelperMixin, TimestampMixin):
'vlans': switch_machine.vlans
}
for switch_machine in self.switch_machines
if not switch_machine.filtered
]
if dict_info['switches']:
dict_info.update(dict_info['switches'][0])
@ -1620,6 +1697,105 @@ class OperatingSystem(BASE, HelperMixin):
return dict_info
class AdapterFlavorRole(BASE, HelperMixin):
"""Adapter flavor roles."""
__tablename__ = 'adapter_flavor_role'
flavor_id = Column(
Integer,
ForeignKey(
'adapter_flavor.id',
onupdate='CASCADE', ondelete='CASCADE'
),
primary_key=True
)
role_id = Column(
Integer,
ForeignKey(
'adapter_role.id',
onupdate='CASCADE', ondelete='CASCADE'
),
primary_key=True
)
def __init__(self, flavor_id, role_id):
self.flavor_id = flavor_id
self.role_id = role_id
super(AdapterFlavorRole, self).__init__()
def validate(self):
flavor_adapter_id = self.flavor.adapter_id
role_adapter_id = self.role.adapter_id
if flavor_adapter_id != role_adapter_id:
raise exception.InvalidParameter(
'flavor adapter %s and role adapter %s does not match' % (
flavor_adapter_id, role_adapter_id
)
)
def to_dict(self):
dict_info = super(AdapterFlavorRole, self).to_dict()
dict_info.update(
self.role.to_dict()
)
return dict_info
class AdapterFlavor(BASE, HelperMixin):
"""Adapter's flavors."""
__tablename__ = 'adapter_flavor'
id = Column(Integer, primary_key=True)
adapter_id = Column(
Integer,
ForeignKey('adapter.id', onupdate='CASCADE', ondelete='CASCADE')
)
name = Column(String(80), unique=True)
display_name = Column(String(80))
template = Column(String(80))
flavor_roles = relationship(
AdapterFlavorRole,
passive_deletes=True, passive_updates=True,
cascade='all, delete-orphan',
backref=backref('flavor')
)
clusters = relationship(
Cluster,
backref=backref('flavor')
)
__table_args__ = (
UniqueConstraint('name', 'adapter_id', name='constraint'),
)
def __init__(self, name, adapter_id, **kwargs):
self.name = name
self.adapter_id = adapter_id
super(AdapterFlavor, self).__init__(**kwargs)
def initialize(self):
if not self.display_name:
self.display_name = self.name
super(AdapterFlavor, self).initialize()
def validate(self):
if not self.template:
raise exception.InvalidParameter(
'template is not set in adapter flavor %s' % self.id
)
super(AdapterFlavor, self).validate()
def to_dict(self):
dict_info = super(AdapterFlavor, self).to_dict()
dict_info['roles'] = [
flavor_role.to_dict() for flavor_role in self.flavor_roles
]
return dict_info
class AdapterRole(BASE, HelperMixin):
"""Adapter's roles."""
@ -1638,6 +1814,13 @@ class AdapterRole(BASE, HelperMixin):
)
)
flavor_roles = relationship(
AdapterFlavorRole,
passive_deletes=True, passive_updates=True,
cascade='all, delete-orphan',
backref=backref('role')
)
__table_args__ = (
UniqueConstraint('name', 'adapter_id', name='constraint'),
)
@ -1776,6 +1959,12 @@ class Adapter(BASE, HelperMixin):
cascade='all, delete-orphan',
backref=backref('adapter')
)
flavors = relationship(
AdapterFlavor,
passive_deletes=True, passive_updates=True,
cascade='all, delete-orphan',
backref=backref('adapter')
)
children = relationship(
'Adapter',
passive_deletes=True, passive_updates=True,
@ -1876,6 +2065,17 @@ class Adapter(BASE, HelperMixin):
else:
return []
@property
def adapter_flavors(self):
flavors = self.flavors
if flavors:
return flavors
parent = self.parent
if parent:
return parent.adapter_flavors
else:
return []
def to_dict(self):
dict_info = super(Adapter, self).to_dict()
dict_info.update({
@ -1886,6 +2086,9 @@ class Adapter(BASE, HelperMixin):
adapter_os.to_dict()
for adapter_os in self.adapter_supported_oses
],
'flavors': [
flavor.to_dict() for flavor in self.adapter_flavors
]
})
distributed_system = self.adapter_distributed_system
if distributed_system:
@ -1947,9 +2150,13 @@ class OSInstaller(BASE, InstallerMixin):
cascade='all, delete-orphan',
backref=backref('os_installer')
)
hosts = relationship(
Host,
backref=backref('os_installer')
)
def __init__(self, instance_name, **kwargs):
self.instance_name = instance_name
def __init__(self, alias, **kwargs):
self.alias = alias
super(OSInstaller, self).__init__(**kwargs)
@ -1964,45 +2171,37 @@ class PackageInstaller(BASE, InstallerMixin):
backref=backref('package_installer')
)
def __init__(self, instance_name, **kwargs):
self.instance_name = instance_name
def __init__(self, alias, **kwargs):
self.alias = alias
super(PackageInstaller, self).__init__(**kwargs)
class Network(BASE, TimestampMixin, HelperMixin):
class Subnet(BASE, TimestampMixin, HelperMixin):
"""network table."""
__tablename__ = 'network'
__tablename__ = 'subnet'
id = Column(Integer, primary_key=True)
name = Column(String(80), unique=True)
subnet = Column(String(80), unique=True)
host_networks = relationship(
host_interfaces = relationship(
HostNetwork,
passive_deletes=True, passive_updates=True,
cascade='all, delete-orphan',
backref=backref('network')
backref=backref('subnet')
)
def __init__(self, subnet, **kwargs):
self.subnet = subnet
super(Network, self).__init__(**kwargs)
super(Subnet, self).__init__(**kwargs)
def initialize(self):
if not self.name:
self.name = self.subnet
super(Network, self).initialize()
def validate(self):
try:
netaddr.IPNetwork(self.subnet)
except Exception:
raise exception.InvalidParameter(
'subnet %s format is uncorrect' % self.subnet
)
super(Subnet, self).initialize()
class LogProgressingHistory(BASE):
class LogProgressingHistory(BASE, TimestampMixin, HelperMixin):
"""host installing log history for each file.
:param id: int, identity as primary key.
@ -2014,21 +2213,25 @@ class LogProgressingHistory(BASE):
:param severity: Enum, the installing message severity.
('ERROR', 'WARNING', 'INFO')
:param line_matcher_name: str, the line matcher name of the log processor.
:param update_timestamp: datetime, the latest timestamp the entry updated.
"""
__tablename__ = 'log_progressing_history'
id = Column(Integer, primary_key=True)
pathname = Column(String(80), unique=True)
position = Column(Integer, ColumnDefault(0))
partial_line = Column(Text)
percentage = Column(Float, ColumnDefault(0.0))
message = Column(Text)
severity = Column(Enum('ERROR', 'WARNING', 'INFO'), ColumnDefault('INFO'))
line_matcher_name = Column(String(80), ColumnDefault('start'))
update_timestamp = Column(DateTime, default=datetime.datetime.now(),
onupdate=datetime.datetime.now())
def __init__(self, **kwargs):
pathname = Column(String(80), unique=True)
position = Column(Integer, default=0)
partial_line = Column(Text, default='')
percentage = Column(Float, default=0.0)
message = Column(Text, default='')
severity = Column(
Enum('ERROR', 'WARNING', 'INFO'),
ColumnDefault('INFO')
)
line_matcher_name = Column(
String(80), default='start'
)
def __init__(self, pathname, **kwargs):
self.pathname = pathname
super(LogProgressingHistory, self).__init__(**kwargs)
def __repr__(self):

View File

@ -438,28 +438,33 @@ def _get_package_adapter_matcher(package_installer, target_system):
return None
def update_progress(os_installer, os_names, package_installer, target_systems,
cluster_hosts):
def update_progress(
os_installers, os_names, package_installers, target_systems,
cluster_hosts
):
"""Update adapter installing progress.
:param os_installer: os installer name
:param package_installer: package installer name.
:param os_installers: cluster id to os installer name
:param package_installers: cluster id to package installer name.
:param cluster_hosts: clusters and hosts in each cluster to update.
:param cluster_hosts: dict of int to list of int.
"""
for clusterid, hostids in cluster_hosts.items():
"""
adapter = _get_adapter_matcher(os_installer, os_names[clusterid],
package_installer,
adapter = _get_adapter_matcher(os_installers[clusterid],
os_names[clusterid],
package_installers[clusterid],
target_systems[clusterid])
if not adapter:
continue
adapter.update_progress(clusterid, hostids)
"""
os_adapter = _get_os_adapter_matcher(os_installer, os_names[clusterid])
os_adapter = _get_os_adapter_matcher(
os_installers[clusterid], os_names[clusterid]
)
package_adapter = _get_package_adapter_matcher(
package_installer,
package_installers[clusterid],
target_systems[clusterid]
)
if not (os_adapter or package_adapter):

View File

@ -131,6 +131,36 @@ def reset_host(host_id):
pass
@celery.task(name='compass.tasks.poweron_machine')
def poweron_machine(machine_id):
"""Deploy the given cluster.
:param cluster_hosts: the cluster and hosts of each cluster to deploy.
:type cluster_hosts: dict of int to list of int
"""
pass
@celery.task(name='compass.tasks.poweroff_machine')
def poweroff_machine(machine_id):
"""Deploy the given cluster.
:param cluster_hosts: the cluster and hosts of each cluster to deploy.
:type cluster_hosts: dict of int to list of int
"""
pass
@celery.task(name='compass.tasks.reset_machine')
def reset_machine(machine_id):
"""Deploy the given cluster.
:param cluster_hosts: the cluster and hosts of each cluster to deploy.
:type cluster_hosts: dict of int to list of int
"""
pass
@celery.task(name='compass.tasks.update_progress')
def update_clusters_progress(cluster_hosts):
"""Calculate the installing progress of the given cluster.

View File

@ -0,0 +1,3 @@
NAME = 'ceph'
PARENT = 'general'
DISTRIBUTED_SYSTEM = 'ceph'

View File

@ -0,0 +1,7 @@
NAME = 'ceph(chef)'
DSPLAY_NAME = 'ceph(ceph)'
PARENT = 'ceph'
PACKAGE_INSTALLER = 'chef_installer'
OS_INSTALLER = 'cobbler'
SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*']
DEPLOYABLE = True

View File

@ -0,0 +1,7 @@
NAME = 'openstack_icehouse'
DISPLAY_NAME = 'OpenStack Icehouse'
PARENT = 'openstack'
PACKAGE_INSTALLER = 'chef_installer'
OS_INSTALLER = 'cobbler'
SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*']
DEPLOYABLE = True

View File

@ -0,0 +1 @@
NAME = 'general'

View File

@ -0,0 +1,4 @@
NAME = 'openstack'
PARENT = 'general'
DISTRIBUTED_SYSTEM = 'openstack'
SUPPORTED_OSES = ['CentOS6.5', 'Ubuntu12.04']

View File

@ -0,0 +1,5 @@
NAME = 'os_only'
PARENT = 'general'
OS_INSTALLER = 'cobbler'
SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*']
DEPLOYABLE = True

View File

@ -0,0 +1,3 @@
NAME = 'ceph'
PARENT = 'general'
DEPLOYABLE = True

View File

@ -0,0 +1,2 @@
NAME = 'general'
PARENT = ''

View File

@ -0,0 +1,3 @@
NAME ='openstack'
PARENT = 'general'
DEPLOYABLE = True

View File

@ -0,0 +1,16 @@
ADAPTER_NAME = 'openstack_icehouse'
FLAVORS = [{
'flavor': 'allinone',
'display_name': 'allinone',
'template': 'allinone.tmpl',
'roles': ['allinone-compute']
}, {
'flavor': 'multiroles',
'display_name': 'multiroles',
'template': 'multiroles.tmpl',
'roles': [
'os-compute-worker', 'os-network', 'os-block-storage-worker',
'os-image', 'os-compute-vncproxy', 'os-controller',
'os-ops-messaging', 'os-ops-database', 'ha-proxy'
]
}]

View File

@ -0,0 +1,2 @@
NAME = 'CentOS'
PARENT = 'general'

View File

@ -0,0 +1,3 @@
NAME = 'CentOS-6.5-x86_64'
PARENT = 'CentOS'
DEPLOYABLE = True

View File

@ -0,0 +1,2 @@
NAME = 'general'
PARENT = ''

View File

@ -0,0 +1,2 @@
NAME = 'Ubuntu'
PARENT = 'general'

View File

@ -0,0 +1,3 @@
NAME = 'Ubuntu-12.04-x86_64'
PARENT = 'Ubuntu'
DEPLOYABLE = True

View File

@ -0,0 +1,2 @@
NAME = 'dns'
VALIDATOR = is_valid_dns

View File

@ -0,0 +1,2 @@
NAME = 'gateway'
VALIDATOR = is_valid_gateway

View File

@ -0,0 +1 @@
NAME = 'general'

View File

@ -0,0 +1,2 @@
NAME = 'general_list'
FIELD_TYPE = list

View File

@ -0,0 +1,2 @@
NAME = 'ip'
VALIDATOR = is_valid_ip

View File

@ -0,0 +1,2 @@
NAME = 'netmask'
VALIDATOR = is_valid_netmask

View File

@ -0,0 +1,2 @@
NAME = 'network'
VALIDATOR = is_valid_network

View File

@ -0,0 +1,3 @@
NAME = 'password'
VALIDATOR = is_valid_password
DESCRIPTION = 'password'

View File

@ -0,0 +1,3 @@
NAME = 'percentage'
FIELD_TYPE = int
VALIDATOR = is_valid_percentage

View File

@ -0,0 +1,2 @@
NAME = 'size'
VALIDATOR = is_valid_size

View File

@ -0,0 +1,3 @@
NAME = 'username'
VALIDATOR = is_valid_username
DESCRIPTION = 'username'

View File

@ -0,0 +1,9 @@
NAME = 'cobbler'
INSTANCE_NAME = 'cobbler'
SETTINGS = {
'cobbler_url': 'http://127.0.0.1/cobbler_api',
'credentials': {
'username': 'cobbler',
'password': 'cobbler'
}
}

View File

@ -0,0 +1,171 @@
OS = 'general'
METADATA = {
'general': {
'_self': {
'required_in_whole_config': True
},
'language': {
'_self': {
'field': 'general',
'default_value': 'EN',
'options': ['EN', 'CN'],
'mapping_to': 'language'
}
},
'timezone': {
'_self': {
'field': 'general',
'default_value': 'UTC',
'options': [
'America/New_York', 'America/Chicago',
'America/Los_Angeles', 'Asia/Shanghai',
'Asia/Tokyo', 'Europe/Paris',
'Europe/London', 'Europe/Moscow',
'Europe/Rome', 'Europe/Madrid',
'Europe/Berlin', 'UTC'
],
'mapping_to': 'timezone'
}
},
'http_proxy': {
'_self': {
'field': 'general',
'default_value': 'http://$ipaddr:3128',
'options': [
'http://$ipaddr:3128'
],
'mapping_to': 'http_proxy'
}
},
'https_proxy': {
'_self': {
'field': 'general',
'default_value': 'http://$ipaddr:3128',
'options': [
'http://$ipaddr:3128'
],
'mapping_to': 'https_proxy'
}
},
'no_proxy': {
'_self': {
'field': 'general_list',
'default_value': [
'127.0.0.1',
'$hostname',
'$ipaddr'
],
'options': [
'127.0.0.1',
'$hostname',
'$ipaddr'
],
'mapping_to': 'no_proxy'
}
},
'ntp_server': {
'_self': {
'is_required': True,
'field': 'general',
'default_value': '$ipaddr',
'options': [
'$ipaddr'
],
'mapping_to': 'ntp_server'
}
},
'dns_servers': {
'_self': {
'is_required': True,
'field': 'general_list',
'default_value': [
'$ipaddr',
],
'options': [
'$ipaddr'
],
'mapping_to': 'nameservers'
}
},
'domain': {
'_self': {
'field': 'general',
'is_required' : True,
'default_value': ['$domain'][0],
'options': ['$domain'],
}
},
'search_path': {
'_self': {
'field': 'general_list',
'default_value': [
'$domain'
],
'options': ['$domain'],
'mapping_to': 'search_path'
}
},
'default_gateway': {
'_self': {
'is_required': True,
'field': 'ip',
'default_value': '$gateway',
'mapping_to': 'gateway'
}
}
},
'server_credentials': {
'_self': {
'required_in_whole_config': True,
'mapping_to': 'server_credentials'
},
'username': {
'_self': {
'is_required': True,
'default_value': 'root',
'field': 'username',
'mapping_to': 'username'
}
},
'password': {
'_self': {
'is_required': True,
'default_value': 'root',
'field': 'password',
'mapping_to': 'password'
}
}
},
'partition': {
'_self': {
'required_in_whole_config': True,
'options': ['/boot', 'swap', '/var', '/home'],
'mapping_to': 'partition'
},
'$partition': {
'_self': {
'validator': is_valid_partition
},
'max_size': {
'_self': {
'field': 'size',
'mapping_to': 'max_vol_size'
},
},
'percentage': {
'_self': {
'field': 'percentage',
'default_value': 10,
'mapping_to': 'vol_percentage'
}
},
'size': {
'_self': {
'field': 'size',
'default_value': '1G',
'mapping_to': 'vol_size'
},
}
}
}
}

View File

@ -0,0 +1,2 @@
NAME = 'dns'
VALIDATOR = is_valid_dns

View File

@ -0,0 +1,2 @@
NAME = 'gateway'
VALIDATOR = is_valid_gateway

View File

@ -0,0 +1 @@
NAME = 'general'

View File

@ -0,0 +1,2 @@
NAME = 'ip address'
VALIDATOR = is_valid_ip

View File

@ -0,0 +1,2 @@
NAME = 'netmask'
VALIDATOR = is_valid_netmask

View File

@ -0,0 +1,2 @@
NAME = 'network'
VALIDATOR = is_valid_network

View File

@ -0,0 +1,3 @@
NAME = 'password'
VALIDATOR = is_valid_password
DESCRIPTION = 'password'

View File

@ -0,0 +1,3 @@
NAME = 'percentage'
FIELD_TYPE = int
VALIDATOR = is_valid_percentage

View File

@ -0,0 +1,3 @@
NAME = 'roles'
FIELD_TYPE = list
DESCRIPTION = 'roles'

View File

@ -0,0 +1,2 @@
NAME = 'size'
VALIDATOR = is_valid_size

View File

@ -0,0 +1,3 @@
NAME = 'username'
VALIDATOR = is_valid_username
DESCRIPTION = 'username'

View File

@ -0,0 +1,8 @@
NAME = 'chef_installer'
INSTANCE_NAME = 'chef_installer'
SETTINGS = {
'chef_url': 'https://127.0.0.1',
'key_dir': '',
'client_name': '',
'databags': ['user_passwords', 'db_passwords', 'service_passwords', 'secrets']
}

View File

@ -0,0 +1,58 @@
ADAPTER = 'openstack'
METADATA = {
'security': {
'_self': {
'required_in_whole_config': True,
},
'service_credentials': {
'_self': {
'mapping_to': 'service_credentials'
},
'$service': {
'username': {
'_self': {
'is_required': True,
'field': 'username',
'mapping_to': 'username'
}
},
'password': {
'_self': {
'is_required': True,
'field': 'password',
'mapping_to': 'password'
}
}
}
},
'console_credentials': {
'$console': {
'username': {
'_self': {
'is_required': True,
'field': 'username',
'mapping_to': 'username'
}
},
'password': {
'_self': {
'is_required': True,
'field': 'password',
'mapping_to': 'password'
}
}
}
}
},
'network_mapping': {
'_self': {
'required_in_whole_config': True
},
'$interface_type': {
'_self': {
'is_required': True,
'field': 'general'
}
}
}
}

View File

@ -0,0 +1,44 @@
ADAPTER_NAME = 'openstack_icehouse'
ROLES = [{
'role': 'os-compute-worker',
'display_name': 'compute node',
'description': 'compute node'
}, {
'role': 'os-network',
'display_name': 'network node',
'description': 'network node'
}, {
'role': 'os-block-storage-worker',
'display_name': 'storage node',
'description': 'storage node'
}, {
'role': 'os-image',
'display_name': 'image node',
'description': 'image node'
}, {
'role': 'os-compute-vncproxy',
'display_name': 'vnc proxy node',
'description': 'vnc proxy node'
}, {
'role': 'os-controller',
'display_name': 'controller node',
'description': 'controller node'
}, {
'role': 'os-ops-messaging',
'display_name': 'message queue node',
'description': 'message queue node'
}, {
'role': 'os-ops-database',
'display_name': 'database node',
'description': 'database node'
}, {
'role': 'ha-proxy',
'display_name': 'ha proxy node',
'description': 'ha proxy node',
'optional': True
}, {
'role': 'allinone-compute',
'display_name': 'all in one compute',
'description': 'all in one compute',
'optional': True
}]

View File

@ -30,7 +30,9 @@ reload(setting)
# from compass.api import app
from compass.db.api import adapter_holder as adapter_api
from compass.db.api import database
from compass.db.api import metadata_holder as metadata_api
from compass.utils import flags
from compass.utils import logsetting
from compass.utils import util
@ -41,12 +43,19 @@ class ApiTestCase(unittest2.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
logsetting.init()
reload(setting)
setting.CONFIG_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data'
)
database.init('sqlite://')
database.create_db()
adapter_api.load_adapters()
metadata_api.load_metadatas()
def tearDown(self):
database.drop_db()
reload(setting)
super(ApiTestCase, self).tearDown()
def test_login(self):

View File

@ -0,0 +1,3 @@
NAME = 'ceph'
PARENT = 'general'
DISTRIBUTED_SYSTEM = 'ceph'

View File

@ -0,0 +1,7 @@
NAME = 'ceph(chef)'
DSPLAY_NAME = 'ceph(ceph)'
PARENT = 'ceph'
PACKAGE_INSTALLER = 'chef_installer'
OS_INSTALLER = 'cobbler'
SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*']
DEPLOYABLE = True

View File

@ -0,0 +1,7 @@
NAME = 'openstack_icehouse'
DISPLAY_NAME = 'OpenStack Icehouse'
PARENT = 'openstack'
PACKAGE_INSTALLER = 'chef_installer'
OS_INSTALLER = 'cobbler'
SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*']
DEPLOYABLE = True

View File

@ -0,0 +1 @@
NAME = 'general'

View File

@ -0,0 +1,4 @@
NAME = 'openstack'
PARENT = 'general'
DISTRIBUTED_SYSTEM = 'openstack'
SUPPORTED_OSES = ['CentOS6.5', 'Ubuntu12.04']

View File

@ -0,0 +1,5 @@
NAME = 'os_only'
PARENT = 'general'
OS_INSTALLER = 'cobbler'
SUPPORTED_OS_PATTERNS = ['(?i)centos.*', '(?i)ubuntu.*']
DEPLOYABLE = True

View File

@ -0,0 +1,3 @@
NAME = 'ceph'
PARENT = 'general'
DEPLOYABLE = True

View File

@ -0,0 +1,2 @@
NAME = 'general'
PARENT = ''

View File

@ -0,0 +1,3 @@
NAME ='openstack'
PARENT = 'general'
DEPLOYABLE = True

View File

@ -0,0 +1,16 @@
ADAPTER_NAME = 'openstack_icehouse'
FLAVORS = [{
'flavor': 'allinone',
'display_name': 'allinone',
'template': 'allinone.tmpl',
'roles': ['allinone-compute']
}, {
'flavor': 'multiroles',
'display_name': 'multiroles',
'template': 'multiroles.tmpl',
'roles': [
'os-compute-worker', 'os-network', 'os-block-storage-worker',
'os-image', 'os-compute-vncproxy', 'os-controller',
'os-ops-messaging', 'os-ops-database', 'ha-proxy'
]
}]

View File

@ -0,0 +1,2 @@
NAME = 'CentOS'
PARENT = 'general'

View File

@ -0,0 +1,3 @@
NAME = 'CentOS-6.5-x86_64'
PARENT = 'CentOS'
DEPLOYABLE = True

View File

@ -0,0 +1,2 @@
NAME = 'general'
PARENT = ''

View File

@ -0,0 +1,2 @@
NAME = 'Ubuntu'
PARENT = 'general'

View File

@ -0,0 +1,3 @@
NAME = 'Ubuntu-12.04-x86_64'
PARENT = 'Ubuntu'
DEPLOYABLE = True

View File

@ -0,0 +1,2 @@
NAME = 'dns'
VALIDATOR = is_valid_dns

View File

@ -0,0 +1,2 @@
NAME = 'gateway'
VALIDATOR = is_valid_gateway

View File

@ -0,0 +1 @@
NAME = 'general'

View File

@ -0,0 +1,2 @@
NAME = 'general_list'
FIELD_TYPE = list

View File

@ -0,0 +1,2 @@
NAME = 'ip'
VALIDATOR = is_valid_ip

View File

@ -0,0 +1,2 @@
NAME = 'netmask'
VALIDATOR = is_valid_netmask

View File

@ -0,0 +1,2 @@
NAME = 'network'
VALIDATOR = is_valid_network

View File

@ -0,0 +1,3 @@
NAME = 'password'
VALIDATOR = is_valid_password
DESCRIPTION = 'password'

View File

@ -0,0 +1,3 @@
NAME = 'percentage'
FIELD_TYPE = int
VALIDATOR = is_valid_percentage

View File

@ -0,0 +1,2 @@
NAME = 'size'
VALIDATOR = is_valid_size

View File

@ -0,0 +1,3 @@
NAME = 'username'
VALIDATOR = is_valid_username
DESCRIPTION = 'username'

View File

@ -0,0 +1,9 @@
NAME = 'cobbler'
INSTANCE_NAME = 'cobbler'
SETTINGS = {
'cobbler_url': 'http://127.0.0.1/cobbler_api',
'credentials': {
'username': 'cobbler',
'password': 'cobbler'
}
}

View File

@ -0,0 +1,171 @@
OS = 'general'
METADATA = {
'general': {
'_self': {
'required_in_whole_config': True
},
'language': {
'_self': {
'field': 'general',
'default_value': 'EN',
'options': ['EN', 'CN'],
'mapping_to': 'language'
}
},
'timezone': {
'_self': {
'field': 'general',
'default_value': 'UTC',
'options': [
'America/New_York', 'America/Chicago',
'America/Los_Angeles', 'Asia/Shanghai',
'Asia/Tokyo', 'Europe/Paris',
'Europe/London', 'Europe/Moscow',
'Europe/Rome', 'Europe/Madrid',
'Europe/Berlin', 'UTC'
],
'mapping_to': 'timezone'
}
},
'http_proxy': {
'_self': {
'field': 'general',
'default_value': 'http://$ipaddr:3128',
'options': [
'http://$ipaddr:3128'
],
'mapping_to': 'http_proxy'
}
},
'https_proxy': {
'_self': {
'field': 'general',
'default_value': 'http://$ipaddr:3128',
'options': [
'http://$ipaddr:3128'
],
'mapping_to': 'https_proxy'
}
},
'no_proxy': {
'_self': {
'field': 'general_list',
'default_value': [
'127.0.0.1',
'$hostname',
'$ipaddr'
],
'options': [
'127.0.0.1',
'$hostname',
'$ipaddr'
],
'mapping_to': 'no_proxy'
}
},
'ntp_server': {
'_self': {
'is_required': True,
'field': 'general',
'default_value': '$ipaddr',
'options': [
'$ipaddr'
],
'mapping_to': 'ntp_server'
}
},
'dns_servers': {
'_self': {
'is_required': True,
'field': 'general_list',
'default_value': [
'$ipaddr',
],
'options': [
'$ipaddr'
],
'mapping_to': 'nameservers'
}
},
'domain': {
'_self': {
'field': 'general',
'is_required' : True,
'default_value': ['$domain'][0],
'options': ['$domain'],
}
},
'search_path': {
'_self': {
'field': 'general_list',
'default_value': [
'$domain'
],
'options': ['$domain'],
'mapping_to': 'search_path'
}
},
'default_gateway': {
'_self': {
'is_required': True,
'field': 'ip',
'default_value': '$gateway',
'mapping_to': 'gateway'
}
}
},
'server_credentials': {
'_self': {
'required_in_whole_config': True,
'mapping_to': 'server_credentials'
},
'username': {
'_self': {
'is_required': True,
'default_value': 'root',
'field': 'username',
'mapping_to': 'username'
}
},
'password': {
'_self': {
'is_required': True,
'default_value': 'root',
'field': 'password',
'mapping_to': 'password'
}
}
},
'partition': {
'_self': {
'required_in_whole_config': True,
'options': ['/boot', 'swap', '/var', '/home'],
'mapping_to': 'partition'
},
'$partition': {
'_self': {
'validator': is_valid_partition
},
'max_size': {
'_self': {
'field': 'size',
'mapping_to': 'max_vol_size'
},
},
'percentage': {
'_self': {
'field': 'percentage',
'default_value': 10,
'mapping_to': 'vol_percentage'
}
},
'size': {
'_self': {
'field': 'size',
'default_value': '1G',
'mapping_to': 'vol_size'
},
}
}
}
}

View File

@ -0,0 +1,2 @@
NAME = 'dns'
VALIDATOR = is_valid_dns

View File

@ -0,0 +1,2 @@
NAME = 'gateway'
VALIDATOR = is_valid_gateway

View File

@ -0,0 +1 @@
NAME = 'general'

View File

@ -0,0 +1,2 @@
NAME = 'ip address'
VALIDATOR = is_valid_ip

View File

@ -0,0 +1,2 @@
NAME = 'netmask'
VALIDATOR = is_valid_netmask

View File

@ -0,0 +1,2 @@
NAME = 'network'
VALIDATOR = is_valid_network

View File

@ -0,0 +1,3 @@
NAME = 'password'
VALIDATOR = is_valid_password
DESCRIPTION = 'password'

View File

@ -0,0 +1,3 @@
NAME = 'percentage'
FIELD_TYPE = int
VALIDATOR = is_valid_percentage

View File

@ -0,0 +1,3 @@
NAME = 'roles'
FIELD_TYPE = list
DESCRIPTION = 'roles'

View File

@ -0,0 +1,2 @@
NAME = 'size'
VALIDATOR = is_valid_size

View File

@ -0,0 +1,3 @@
NAME = 'username'
VALIDATOR = is_valid_username
DESCRIPTION = 'username'

View File

@ -0,0 +1,8 @@
NAME = 'chef_installer'
INSTANCE_NAME = 'chef_installer'
SETTINGS = {
'chef_url': 'https://127.0.0.1',
'key_dir': '',
'client_name': '',
'databags': ['user_passwords', 'db_passwords', 'service_passwords', 'secrets']
}

Some files were not shown because too many files have changed in this diff Show More