move adapter from db to memory

Change-Id: I366052e23d72dd94229513d6a0992338d0d44638
This commit is contained in:
xiaodongwang 2015-06-25 13:56:20 -07:00 committed by Xicheng Chang
parent 12fe72fa73
commit 14ad281057
109 changed files with 5972 additions and 4746 deletions

View File

@ -75,7 +75,7 @@ def clean_installers():
filtered_os_installers[os_installer_name] = os_installer filtered_os_installers[os_installer_name] = os_installer
else: else:
logging.info( logging.info(
'ignore os isntaller %s', os_installer_name 'ignore os installer %s', os_installer_name
) )
else: else:
logging.info( logging.info(

View File

@ -75,9 +75,6 @@ flags.add('adapter_name',
flags.add('adapter_os_pattern', flags.add('adapter_os_pattern',
help='adapter os name', help='adapter os name',
default=r'^(?i)centos.*') default=r'^(?i)centos.*')
flags.add('adapter_target_system_pattern',
help='adapter target system name',
default='^openstack$')
flags.add('adapter_flavor_pattern', flags.add('adapter_flavor_pattern',
help='adapter flavor name', help='adapter flavor name',
default='allinone') default='allinone')
@ -342,120 +339,58 @@ def _poll_switches(client):
def _get_adapter(client): def _get_adapter(client):
"""get adapter.""" """get adapter."""
status, resp = client.list_adapters() adapter_name = flags.OPTIONS.adapter_name
status, resp = client.list_adapters(
name=adapter_name
)
logging.info( logging.info(
'get all adapters status: %s, resp: %s', 'get all adapters for name %s status: %s, resp: %s',
status, resp adapter_name, status, resp
) )
if status >= 400: if status >= 400:
msg = 'failed to get adapters' msg = 'failed to get adapters'
raise Exception(msg) raise Exception(msg)
adapter_name = flags.OPTIONS.adapter_name if not resp:
msg = 'no adapter found'
raise Exception(msg)
adapter = resp[0]
os_pattern = flags.OPTIONS.adapter_os_pattern os_pattern = flags.OPTIONS.adapter_os_pattern
if os_pattern: if os_pattern:
os_re = re.compile(os_pattern) os_re = re.compile(os_pattern)
else: else:
os_re = None os_re = None
target_system_pattern = flags.OPTIONS.adapter_target_system_pattern
if target_system_pattern:
target_system_re = re.compile(target_system_pattern)
else:
target_system_re = None
flavor_pattern = flags.OPTIONS.adapter_flavor_pattern flavor_pattern = flags.OPTIONS.adapter_flavor_pattern
if flavor_pattern: if flavor_pattern:
flavor_re = re.compile(flavor_pattern) flavor_re = re.compile(flavor_pattern)
else: else:
flavor_re = None flavor_re = None
adapter_id = None
adapter_id = adapter['id']
os_id = None os_id = None
distributed_system_id = None
flavor_id = None flavor_id = None
adapter = None for supported_os in adapter['supported_oses']:
for item in resp: if not os_re or os_re.match(supported_os['name']):
adapter_id = None os_id = supported_os['os_id']
os_id = None
flavor_id = None
adapter = item
for supported_os in adapter['supported_oses']:
if not os_re or os_re.match(supported_os['name']):
os_id = supported_os['os_id']
break
if not os_id:
logging.info('no os found for adapter %s', adapter)
continue
if 'flavors' in adapter:
for flavor in adapter['flavors']:
if not flavor_re or flavor_re.match(flavor['name']):
flavor_id = flavor['id']
break
if adapter_name:
if adapter['name'] == adapter_name:
adapter_id = adapter['id']
logging.info('adapter name %s matches: %s',
adapter_name, adapter)
else:
logging.info('adapter name %s does not match %s',
adapter_name, adapter)
elif (
'distributed_system_name' in item and
adapter['distributed_system_name']
):
if (
target_system_re and
target_system_re.match(adapter['distributed_system_name'])
):
adapter_id = adapter['id']
logging.info(
'distributed system name pattern %s matches: %s',
target_system_pattern, adapter
)
else:
logging.info(
'distributed system name pattern %s does not match: %s',
target_system_pattern, adapter
)
else:
if not target_system_re:
adapter_id = adapter['id']
logging.info(
'os only adapter matches no target_system_pattern'
)
else:
logging.info(
'distributed system name pattern defined '
'but the adapter does not have '
'distributed_system_name attributes'
)
if adapter_id and target_system_re:
distributed_system_id = adapter['distributed_system_id']
if adapter_id:
logging.info('adadpter matches: %s', adapter)
break break
if not adapter_id: if 'flavors' in adapter:
msg = 'no adapter found' for flavor in adapter['flavors']:
raise Exception(msg) if not flavor_re or flavor_re.match(flavor['name']):
flavor_id = flavor['id']
break
if not os_id: if not os_id:
msg = 'no os found for %s' % os_pattern msg = 'no os found for %s' % os_pattern
raise Exception(msg) raise Exception(msg)
if target_system_re and not distributed_system_id:
msg = 'no distributed system found for %s' % target_system_pattern
raise Exception(msg)
if flavor_re and not flavor_id: if flavor_re and not flavor_id:
msg = 'no flavor found for %s' % flavor_pattern msg = 'no flavor found for %s' % flavor_pattern
raise Exception(msg) raise Exception(msg)
logging.info('adpater for deploying a cluster: %s', adapter_id) logging.info('adpater for deploying a cluster: %s', adapter_id)
return (adapter_id, os_id, distributed_system_id, flavor_id) return (adapter_id, os_id, flavor_id)
def _add_subnets(client): def _add_subnets(client):
@ -1059,14 +994,14 @@ def main():
machines = _get_machines(client) machines = _get_machines(client)
logging.info('machines are %s', machines) logging.info('machines are %s', machines)
subnet_mapping = _add_subnets(client) subnet_mapping = _add_subnets(client)
adapter_id, os_id, distributed_system_id, flavor_id = _get_adapter(client) adapter_id, os_id, flavor_id = _get_adapter(client)
cluster_id, host_mapping, role_mapping = _add_cluster( cluster_id, host_mapping, role_mapping = _add_cluster(
client, adapter_id, os_id, flavor_id, machines) client, adapter_id, os_id, flavor_id, machines)
host_ips = _set_host_networking( host_ips = _set_host_networking(
client, host_mapping, subnet_mapping client, host_mapping, subnet_mapping
) )
_set_cluster_os_config(client, cluster_id, host_ips) _set_cluster_os_config(client, cluster_id, host_ips)
if distributed_system_id: if flavor_id:
_set_cluster_package_config(client, cluster_id) _set_cluster_package_config(client, cluster_id)
if role_mapping: if role_mapping:
_set_hosts_roles(client, cluster_id, host_mapping, role_mapping) _set_hosts_roles(client, cluster_id, host_mapping, role_mapping)

View File

@ -28,10 +28,15 @@ def delete_cluster(
cluster_id, host_id_list, cluster_id, host_id_list,
username=None, delete_underlying_host=False username=None, delete_underlying_host=False
): ):
"""Delete cluster. """Delete cluster and all clusterhosts on it.
:param cluster_id: id of the cluster. :param cluster_id: id of the cluster.
:type cluster_id: int :type cluster_id: int
:param host_id_list: list of host id.
:type host_id_list: list of int.
If delete_underlying_host is set, all underlying hosts will
be deleted.
.. note:: .. note::
The function should be called out of database session. The function should be called out of database session.
@ -66,6 +71,19 @@ def delete_cluster_host(
cluster_id, host_id, cluster_id, host_id,
username=None, delete_underlying_host=False username=None, delete_underlying_host=False
): ):
"""Delete clusterhost.
:param cluster_id: id of the cluster.
:type cluster_id: int
:param host_id: id of the host.
:type host_id: int
If delete_underlying_host is set, the underlying host
will be deleted too.
.. note::
The function should be called out of database session.
"""
with util.lock('serialized_action', timeout=100) as lock: with util.lock('serialized_action', timeout=100) as lock:
if not lock: if not lock:
raise Exception('failed to acquire lock to delete clusterhost') raise Exception('failed to acquire lock to delete clusterhost')
@ -94,6 +112,14 @@ def delete_cluster_host(
def delete_host( def delete_host(
host_id, cluster_id_list, username=None host_id, cluster_id_list, username=None
): ):
"""Delete host and all clusterhosts on it.
:param host_id: id of the host.
:type host_id: int
.. note::
The function should be called out of database session.
"""
with util.lock('serialized_action', timeout=100) as lock: with util.lock('serialized_action', timeout=100) as lock:
if not lock: if not lock:
raise Exception('failed to acquire lock to delete host') raise Exception('failed to acquire lock to delete host')

View File

@ -128,16 +128,17 @@ def health_check(cluster_id, report_uri, username):
except Exception as exc: except Exception as exc:
logging.error("health_check exception: ============= %s" % exc) logging.error("health_check exception: ============= %s" % exc)
data = {'state': 'error', 'error_message': str(exc), 'report': {}} data = {'state': 'error', 'error_message': str(exc), 'report': {}}
reports = health_check_db.list_health_reports(user, cluster_id) reports = health_check_db.list_health_reports(
cluster_id, user=user)
if not reports: if not reports:
# Exception before executing command remotely for health check. # Exception before executing command remotely for health check.
# No reports names sending back yet. Create a report # No reports names sending back yet. Create a report
name = 'pre_remote_health_check' name = 'pre_remote_health_check'
health_check_db.add_report_record( health_check_db.add_report_record(
cluster_id, name=name, **data cluster_id, name, user=user, **data
) )
health_check_db.update_multi_reports(cluster_id, **data) health_check_db.update_multi_reports(cluster_id, user=user, **data)
class ServerPowerMgmt(object): class ServerPowerMgmt(object):

View File

@ -14,9 +14,7 @@
"""Base class for Compass Health Check.""" """Base class for Compass Health Check."""
from compass.actions.health_check import utils as health_check_utils from compass.actions.health_check import utils as health_check_utils
from compass.db.api import database from compass.db.api import adapter as adapter_api
from compass.db.api import utils
from compass.db import models
from compass.utils import setting_wrapper as setting from compass.utils import setting_wrapper as setting
@ -28,30 +26,25 @@ class BaseCheck(object):
self.code = 1 self.code = 1
self.messages = [] self.messages = []
self.dist, self.version, self.release = health_check_utils.get_dist() self.dist, self.version, self.release = health_check_utils.get_dist()
adapter_api.load_adapters_internal()
self.os_installer = self._get_os_installer() self.os_installer = self._get_os_installer()
self.package_installer = self._get_package_installer() self.package_installer = self._get_package_installer()
def _get_os_installer(self): def _get_os_installer(self):
with database.session() as session: installer = adapter_api.OS_INSTALLERS.values()[0]
installer = utils.get_db_object( os_installer = {}
session, models.OSInstaller os_installer['name'] = health_check_utils.strip_name(
) installer['name'])
os_installer = {} os_installer.update(installer['settings'])
os_installer['name'] = health_check_utils.strip_name(
installer.name)
os_installer.update(installer.settings)
return os_installer return os_installer
def _get_package_installer(self): def _get_package_installer(self):
package_installer = {} package_installer = {}
with database.session() as session: installer = adapter_api.PACKAGE_INSTALLERS.values()[0]
installer = session.query( package_installer = {}
models.PackageInstaller package_installer['name'] = health_check_utils.strip_name(
).first() installer['name'])
package_installer = {} package_installer.update(installer['settings'])
package_installer['name'] = health_check_utils.strip_name(
installer.name)
package_installer.update(installer.settings)
return package_installer return package_installer
def _set_status(self, code, message): def _set_status(self, code, message):

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Module to reinstall a given cluster """Module to receive installation callback.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com> .. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
""" """
@ -34,6 +34,8 @@ def os_installed(
:param host_id: host that os is installed. :param host_id: host that os is installed.
:type host_id: integer :type host_id: integer
:param clusterhosts_ready: the clusterhosts that should trigger ready.
:param clusters_os_ready: the cluster that should trigger os ready.
.. note:: .. note::
The function should be called out of database session. The function should be called out of database session.
@ -110,6 +112,11 @@ def package_installed(
): ):
"""Callback when package is installed. """Callback when package is installed.
:param cluster_id: cluster id.
:param host_id: host id.
:param cluster_ready: if the cluster should trigger ready.
:param host_ready: if the host should trigger ready.
.. note:: .. note::
The function should be called out of database session. The function should be called out of database session.
""" """
@ -153,6 +160,9 @@ def cluster_installed(
): ):
"""Callback when cluster is installed. """Callback when cluster is installed.
:param cluster_id: cluster id
:param clusterhosts_ready: clusterhosts that should trigger ready.
.. note:: .. note::
The function should be called out of database session. The function should be called out of database session.
""" """

View File

@ -24,6 +24,15 @@ from compass.hdsdiscovery.hdmanager import HDManager
def _poll_switch(ip_addr, credentials, req_obj='mac', oper="SCAN"): def _poll_switch(ip_addr, credentials, req_obj='mac', oper="SCAN"):
"""Poll switch by ip addr.
Args:
ip_addr: ip addr of the switch.
credentials: credentials of the switch.
Returns: switch attributes dict and list of machine attributes dict.
"""
under_monitoring = 'under_monitoring' under_monitoring = 'under_monitoring'
unreachable = 'unreachable' unreachable = 'unreachable'
polling_error = 'error' polling_error = 'error'
@ -124,6 +133,12 @@ def poll_switch(poller_email, ip_addr, credentials,
'failed to acquire lock to poll switch %s' % ip_addr 'failed to acquire lock to poll switch %s' % ip_addr
) )
# TODO(grace): before repoll the switch, set the state to repolling.
# and when the poll switch is timeout, set the state to error.
# the frontend should only consider some main state like INTIALIZED,
# ERROR and SUCCESSFUL, REPOLLING is as an intermediate state to
# indicate the switch is in learning the mac of the machines connected
# to it.
logging.debug('poll switch: %s', ip_addr) logging.debug('poll switch: %s', ip_addr)
switch_dict, machine_dicts = _poll_switch( switch_dict, machine_dicts = _poll_switch(
ip_addr, credentials, req_obj=req_obj, oper=oper ip_addr, credentials, req_obj=req_obj, oper=oper

View File

@ -147,19 +147,6 @@ def update_progress():
) )
continue continue
clusterhost_id = clusterhost['clusterhost_id'] clusterhost_id = clusterhost['clusterhost_id']
if 'distributed_system_name' not in clusterhost:
logging.error(
'distributed_system_name is not in clusterhost %s',
clusterhost
)
continue
clusterhost_dirname = setting.CLUSTERHOST_INATALLATION_LOGDIR_NAME
if clusterhost_dirname not in clusterhost:
logging.error(
'%s is not in clusterhost %s',
clusterhost_dirname, clusterhost
)
continue
if 'cluster_id' not in clusterhost: if 'cluster_id' not in clusterhost:
logging.error( logging.error(
'cluster_id not in clusterhost %s', 'cluster_id not in clusterhost %s',
@ -176,6 +163,19 @@ def update_progress():
) )
continue continue
cluster, _ = cluster_mapping[cluster_id] cluster, _ = cluster_mapping[cluster_id]
if 'flavor_name' not in cluster:
logging.error(
'flavor_name is not in clusterhost %s related cluster',
clusterhost
)
continue
clusterhost_dirname = setting.CLUSTERHOST_INATALLATION_LOGDIR_NAME
if clusterhost_dirname not in clusterhost:
logging.error(
'%s is not in clusterhost %s',
clusterhost_dirname, clusterhost
)
continue
adapter_id = cluster['adapter_id'] adapter_id = cluster['adapter_id']
if adapter_id not in adapter_mapping: if adapter_id not in adapter_mapping:
logging.info( logging.info(
@ -196,6 +196,7 @@ def update_progress():
continue continue
package_installer = adapter['package_installer'] package_installer = adapter['package_installer']
clusterhost['package_installer'] = package_installer clusterhost['package_installer'] = package_installer
clusterhost['adapter_name'] = adapter['name']
clusterhost_state = cluster_api.get_clusterhost_self_state( clusterhost_state = cluster_api.get_clusterhost_self_state(
clusterhost_id, user=user clusterhost_id, user=user
) )

View File

@ -30,6 +30,14 @@ from compass.deployment.utils import constants as const
@contextmanager @contextmanager
def lock(lock_name, blocking=True, timeout=10): def lock(lock_name, blocking=True, timeout=10):
"""acquire a lock to do some actions.
The lock is acquired by lock_name among the whole distributed
systems.
"""
# TODO(xicheng): in future we should explicitly told which redis
# server we want to talk to make the lock works on distributed
# systems.
redis_instance = redis.Redis() redis_instance = redis.Redis()
instance_lock = redis_instance.lock(lock_name, timeout=timeout) instance_lock = redis_instance.lock(lock_name, timeout=timeout)
owned = False owned = False
@ -220,6 +228,7 @@ class ActionHelper(object):
@staticmethod @staticmethod
def save_deployed_config(deployed_config, user): def save_deployed_config(deployed_config, user):
"""Save deployed config."""
cluster_config = deployed_config[const.CLUSTER] cluster_config = deployed_config[const.CLUSTER]
cluster_id = cluster_config[const.ID] cluster_id = cluster_config[const.ID]
del cluster_config[const.ID] del cluster_config[const.ID]
@ -259,6 +268,11 @@ class ActionHelper(object):
def delete_cluster( def delete_cluster(
cluster_id, host_id_list, user, delete_underlying_host=False cluster_id, host_id_list, user, delete_underlying_host=False
): ):
"""Delete cluster.
If delete_underlying_host is set, underlying hosts will also
be deleted.
"""
if delete_underlying_host: if delete_underlying_host:
for host_id in host_id_list: for host_id in host_id_list:
host_db.del_host( host_db.del_host(
@ -272,6 +286,10 @@ class ActionHelper(object):
def delete_cluster_host( def delete_cluster_host(
cluster_id, host_id, user, delete_underlying_host=False cluster_id, host_id, user, delete_underlying_host=False
): ):
"""Delete clusterhost.
If delete_underlying_host set, also delete underlying host.
"""
if delete_underlying_host: if delete_underlying_host:
host_db.del_host( host_db.del_host(
host_id, True, True, user=user host_id, True, True, user=user
@ -288,6 +306,7 @@ class ActionHelper(object):
@staticmethod @staticmethod
def host_ready(host_id, from_database_only, user): def host_ready(host_id, from_database_only, user):
"""Trigger host ready."""
host_db.update_host_state_internal( host_db.update_host_state_internal(
host_id, from_database_only=from_database_only, host_id, from_database_only=from_database_only,
user=user, ready=True user=user, ready=True
@ -297,6 +316,7 @@ class ActionHelper(object):
def cluster_host_ready( def cluster_host_ready(
cluster_id, host_id, from_database_only, user cluster_id, host_id, from_database_only, user
): ):
"""Trigger clusterhost ready."""
cluster_db.update_cluster_host_state_internal( cluster_db.update_cluster_host_state_internal(
cluster_id, host_id, from_database_only=from_database_only, cluster_id, host_id, from_database_only=from_database_only,
user=user, ready=True user=user, ready=True
@ -304,6 +324,7 @@ class ActionHelper(object):
@staticmethod @staticmethod
def cluster_ready(cluster_id, from_database_only, user): def cluster_ready(cluster_id, from_database_only, user):
"""Trigger cluster ready."""
cluster_db.update_cluster_state_internal( cluster_db.update_cluster_state_internal(
cluster_id, from_database_only=from_database_only, cluster_id, from_database_only=from_database_only,
user=user, ready=True user=user, ready=True

File diff suppressed because it is too large Load Diff

View File

@ -1510,7 +1510,6 @@ mediaType: application/json
"cobbler_url": "http://10.145.88.211/cobbler_api" "cobbler_url": "http://10.145.88.211/cobbler_api"
} }
}, },
"distributed_system_id": 1,
"supported_oses": [ "supported_oses": [
{ {
"os_id": 1, "os_id": 1,
@ -1523,13 +1522,11 @@ mediaType: application/json
"name": "CentOS-6.5-x86_64" "name": "CentOS-6.5-x86_64"
} }
], ],
"distributed_system_name": "openstack",
"display_name": "OpenStack Icehouse", "display_name": "OpenStack Icehouse",
"id": 3 "id": 3
}] }]
queryParameters: queryParameters:
name: name:
distributed_system_name:
description: Lists information for all adapters description: Lists information for all adapters
headers: headers:
Access-token: Access-token:
@ -1870,44 +1867,41 @@ mediaType: application/json
application/json: application/json:
example: | example: |
[ [
{ {
"created_at": "2014-10-18 23:01:23", "created_at": "2014-10-18 23:01:23",
"os_name": "CentOS-6.5-x86_64", "os_name": "CentOS-6.5-x86_64",
"name": "cluster1", "name": "cluster1",
"reinstall_distributed_system": true, "reinstall_distributed_system": true,
"distributed_system_id": 1, "adapter_id": 3,
"adapter_id": 3, "updated_at": "2014-10-18 23:01:23",
"updated_at": "2014-10-18 23:01:23", "owner": "admin@huawei.com",
"owner": "admin@huawei.com", "os_id": 2,
"os_id": 2, "distributed_system_installed": false,
"distributed_system_name": "openstack", "flavor": {
"distributed_system_installed": false, "display_name": "All-In-One",
"flavor": { "name": "allinone",
"display_name": "All-In-One", "roles": [
"name": "allinone", {
"roles": [ "display_name": "all in one compute",
{ "description": "all in one compute",
"display_name": "all in one compute", "adapter_id": 3,
"description": "all in one compute", "role_id": 35,
"adapter_id": 3, "flavor_id": 4,
"role_id": 35, "optional": true,
"flavor_id": 4, "id": 35,
"optional": true, "name": "allinone-compute"
"id": 35, }
"name": "allinone-compute" ],
} "adapter_id": 3,
], "template": "allinone.tmpl",
"adapter_id": 3, "id": 4
"template": "allinone.tmpl", },
"id": 4 "id": 1
}, }
"id": 1
}
] ]
queryParameters: queryParameters:
name: name:
os_name: os_name:
distributed_system_name:
owner: owner:
adapter_name: adapter_name:
flavor_name: flavor_name:
@ -1937,12 +1931,10 @@ mediaType: application/json
"os_name": "CentOS-6.5-x86_64", "os_name": "CentOS-6.5-x86_64",
"name": "cluster1", "name": "cluster1",
"reinstall_distributed_system": true, "reinstall_distributed_system": true,
"distributed_system_id": 1,
"adapter_id": 3, "adapter_id": 3,
"updated_at": "2014-10-18 23:01:23", "updated_at": "2014-10-18 23:01:23",
"owner": "admin@huawei.com", "owner": "admin@huawei.com",
"os_id": 2, "os_id": 2,
"distributed_system_name": "openstack",
"distributed_system_installed": false, "distributed_system_installed": false,
"flavor": { "flavor": {
"display_name": "All-In-One", "display_name": "All-In-One",
@ -1990,12 +1982,10 @@ mediaType: application/json
"os_name": "CentOS-6.5-x86_64", "os_name": "CentOS-6.5-x86_64",
"name": "cluster1", "name": "cluster1",
"reinstall_distributed_system": true, "reinstall_distributed_system": true,
"distributed_system_id": 1,
"adapter_id": 3, "adapter_id": 3,
"updated_at": "2014-10-18 23:01:23", "updated_at": "2014-10-18 23:01:23",
"owner": "admin@huawei.com", "owner": "admin@huawei.com",
"os_id": 2, "os_id": 2,
"distributed_system_name": "openstack",
"distributed_system_installed": false, "distributed_system_installed": false,
"flavor": { "flavor": {
"display_name": "All-In-One", "display_name": "All-In-One",
@ -2048,12 +2038,10 @@ mediaType: application/json
"os_name": "CentOS-6.5-x86_64", "os_name": "CentOS-6.5-x86_64",
"name": "cluster_new", "name": "cluster_new",
"reinstall_distributed_system": true, "reinstall_distributed_system": true,
"distributed_system_id": 1,
"adapter_id": 3, "adapter_id": 3,
"updated_at": "2014-10-18 23:16:39", "updated_at": "2014-10-18 23:16:39",
"owner": "admin@huawei.com", "owner": "admin@huawei.com",
"os_id": 2, "os_id": 2,
"distributed_system_name": "openstack",
"distributed_system_installed": false, "distributed_system_installed": false,
"flavor": { "flavor": {
"display_name": "All-In-One", "display_name": "All-In-One",
@ -2100,12 +2088,10 @@ mediaType: application/json
"os_name": "CentOS-6.5-x86_64", "os_name": "CentOS-6.5-x86_64",
"name": "cluster1", "name": "cluster1",
"reinstall_distributed_system": true, "reinstall_distributed_system": true,
"distributed_system_id": 1,
"adapter_id": 3, "adapter_id": 3,
"updated_at": "2014-10-18 23:01:23", "updated_at": "2014-10-18 23:01:23",
"owner": "admin@huawei.com", "owner": "admin@huawei.com",
"os_id": 2, "os_id": 2,
"distributed_system_name": "openstack",
"distributed_system_installed": false, "distributed_system_installed": false,
"flavor": { "flavor": {
"display_name": "All-In-One", "display_name": "All-In-One",
@ -2454,7 +2440,6 @@ mediaType: application/json
"owner": "admin@huawei.com", "owner": "admin@huawei.com",
"port": "4", "port": "4",
"location": {}, "location": {},
"distributed_system_name": "openstack",
"os_name": "CentOS-6.5-x86_64", "os_name": "CentOS-6.5-x86_64",
"reinstall_distributed_system": true, "reinstall_distributed_system": true,
"mac": "00:0c:29:2b:c9:d4", "mac": "00:0c:29:2b:c9:d4",
@ -2568,7 +2553,6 @@ mediaType: application/json
"owner": "admin@huawei.com", "owner": "admin@huawei.com",
"port": "4", "port": "4",
"location": {}, "location": {},
"distributed_system_name": "openstack",
"os_name": "CentOS-6.5-x86_64", "os_name": "CentOS-6.5-x86_64",
"reinstall_distributed_system": true, "reinstall_distributed_system": true,
"mac": "00:0c:29:2b:c9:d4", "mac": "00:0c:29:2b:c9:d4",
@ -2650,7 +2634,6 @@ mediaType: application/json
"owner": "admin@huawei.com", "owner": "admin@huawei.com",
"port": "4", "port": "4",
"location": {}, "location": {},
"distributed_system_name": "openstack",
"os_name": "CentOS-6.5-x86_64", "os_name": "CentOS-6.5-x86_64",
"reinstall_distributed_system": true, "reinstall_distributed_system": true,
"mac": "00:0c:29:2b:c9:d4", "mac": "00:0c:29:2b:c9:d4",
@ -3336,7 +3319,6 @@ mediaType: application/json
"created_at": "2014-10-18 23:16:02", "created_at": "2014-10-18 23:16:02",
"adapter_id": 3, "adapter_id": 3,
"updated_at": "2014-10-18 23:16:39", "updated_at": "2014-10-18 23:16:39",
"distributed_system_name": "openstack",
"owner": "admin@huawei.com", "owner": "admin@huawei.com",
"distributed_system_installed": false, "distributed_system_installed": false,
"id": 2 "id": 2

View File

@ -77,7 +77,7 @@ PRESET_VALUES = {
'GATEWAY': '10.145.88.1', 'GATEWAY': '10.145.88.1',
'PROXY': 'http://10.145.89.100:3128', 'PROXY': 'http://10.145.89.100:3128',
'OS_NAME_PATTERN': 'CentOS.*', 'OS_NAME_PATTERN': 'CentOS.*',
'DISTRIBUTED_SYSTEM_NAME_PATTERN': 'openstack.*', 'ADAPTER_NAME': 'openstack_icehouse',
'FLAVOR_PATTERN': 'allinone.*', 'FLAVOR_PATTERN': 'allinone.*',
'ROLES_LIST': ['allinone-compute'], 'ROLES_LIST': ['allinone-compute'],
'MACHINES_TO_ADD': ['00:0c:29:a7:ea:4b'], 'MACHINES_TO_ADD': ['00:0c:29:a7:ea:4b'],
@ -185,14 +185,11 @@ adapters = response
adapter_id = None adapter_id = None
os_id = None os_id = None
flavor_id = None flavor_id = None
adapter_pattern = re.compile(PRESET_VALUES['DISTRIBUTED_SYSTEM_NAME_PATTERN']) adapter_name = PRESET_VALUES['ADPATER_NAME']
os_pattern = re.compile(PRESET_VALUES['OS_NAME_PATTERN']) os_pattern = re.compile(PRESET_VALUES['OS_NAME_PATTERN'])
flavor_pattern = re.compile(PRESET_VALUES['FLAVOR_PATTERN']) flavor_pattern = re.compile(PRESET_VALUES['FLAVOR_PATTERN'])
for adapter in adapters: for adapter in adapters:
if ( if adapter_name == adapter['name']:
'distributed_system_name' in adapter and
adapter_pattern.match(adapter['distributed_system_name'])
):
adapter_id = adapter['id'] adapter_id = adapter['id']
for supported_os in adapter['supported_oses']: for supported_os in adapter['supported_oses']:
if os_pattern.match(supported_os['name']): if os_pattern.match(supported_os['name']):
@ -201,7 +198,6 @@ for adapter in adapters:
for flavor in adapter['flavors']: for flavor in adapter['flavors']:
if flavor_pattern.match(flavor['name']): if flavor_pattern.match(flavor['name']):
flavor_id = flavor['id'] flavor_id = flavor['id']
if adapter_id and os_id and flavor_id: if adapter_id and os_id and flavor_id:
break break

View File

@ -490,21 +490,11 @@ class Client(object):
def delete_subnet(self, subnet_id): def delete_subnet(self, subnet_id):
return self._delete('/subnets/%s' % subnet_id) return self._delete('/subnets/%s' % subnet_id)
def list_adapters(self, name=None, distributed_system_name=None, def list_adapters(self, name=None):
os_installer_name=None, package_installer_name=None):
data = {} data = {}
if name: if name:
data['name'] = name data['name'] = name
if distributed_system_name:
data['distributed_system_name'] = distributed_system_name
if os_installer_name:
data['os_installer_name'] = os_installer_name
if package_installer_name:
data['package_installer_name'] = package_installer_name
return self._get('/adapters', data=data) return self._get('/adapters', data=data)
def get_adapter(self, adapter_id): def get_adapter(self, adapter_id):
@ -520,7 +510,7 @@ class Client(object):
return self._get('/oses/%s/metadata' % os_id) return self._get('/oses/%s/metadata' % os_id)
def list_clusters(self, name=None, os_name=None, def list_clusters(self, name=None, os_name=None,
distributed_system_name=None, owner=None, owner=None,
adapter_id=None): adapter_id=None):
data = {} data = {}
if name: if name:
@ -529,9 +519,6 @@ class Client(object):
if os_name: if os_name:
data['os_name'] = os_name data['os_name'] = os_name
if distributed_system_name:
data['distributed_system_name'] = distributed_system_name
if owner: if owner:
data['owner'] = owner data['owner'] = owner

View File

@ -25,174 +25,289 @@ from compass.utils import setting_wrapper as setting
from compass.utils import util from compass.utils import util
def _add_system(session, model, configs, exception_when_existing=True): OSES = None
parents = {} OS_INSTALLERS = None
for config in configs: PACKAGE_INSTALLERS = None
logging.info( ADAPTERS = None
'add config %s to %s', ADAPTERS_FLAVORS = None
config, model ADAPTERS_ROLES = None
)
object = utils.add_db_object(
session, model,
exception_when_existing, config['NAME'],
deployable=config.get('DEPLOYABLE', False)
)
parents[config['NAME']] = (
object, config.get('PARENT', None)
)
for name, (object, parent_name) in parents.items():
if parent_name:
parent, _ = parents[parent_name]
else:
parent = None
utils.update_db_object(session, object, parent=parent)
def add_oses_internal(session, exception_when_existing=True): def _get_oses_from_configuration():
"""Get all os configs from os configuration dir.
Example: {
<os_name>: {
'name': <os_name>,
'id': <os_name>,
'os_id': <os_name>,
'deployable': True
}
}
"""
configs = util.load_configs(setting.OS_DIR) configs = util.load_configs(setting.OS_DIR)
_add_system( systems = {}
session, models.OperatingSystem, configs, for config in configs:
exception_when_existing=exception_when_existing logging.info('get config %s', config)
) system_name = config['NAME']
parent_name = config.get('PARENT', None)
system = {
def add_distributed_systems_internal(session, exception_when_existing=True): 'name': system_name,
configs = util.load_configs(setting.DISTRIBUTED_SYSTEM_DIR) 'id': system_name,
_add_system( 'os_id': system_name,
session, models.DistributedSystem, configs, 'parent': parent_name,
exception_when_existing=exception_when_existing 'parent_id': parent_name,
) 'deployable': config.get('DEPLOYABLE', False)
}
systems[system_name] = system
def add_adapters_internal(session, exception_when_existing=True):
parents = {} parents = {}
for name, system in systems.items():
parent = system.get('parent', None)
parents[name] = parent
for name, system in systems.items():
util.recursive_merge_dict(name, systems, parents)
return systems
def _get_installers_from_configuration(configs):
"""Get installers from configurations.
Example: {
<installer_isntance>: {
'alias': <instance_name>,
'id': <instance_name>,
'name': <name>,
'settings': <dict pass to installer plugin>
}
}
"""
installers = {}
for config in configs:
name = config['NAME']
instance_name = config.get('INSTANCE_NAME', name)
installers[instance_name] = {
'alias': instance_name,
'id': instance_name,
'name': name,
'settings': config.get('SETTINGS', {})
}
return installers
def _get_os_installers_from_configuration():
"""Get os installers from os installer config dir."""
configs = util.load_configs(setting.OS_INSTALLER_DIR)
return _get_installers_from_configuration(configs)
def _get_package_installers_from_configuration():
"""Get package installers from package installer config dir."""
configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)
return _get_installers_from_configuration(configs)
def _get_adapters_from_configuration():
"""Get adapters from adapter config dir."""
configs = util.load_configs(setting.ADAPTER_DIR) configs = util.load_configs(setting.ADAPTER_DIR)
adapters = {}
for config in configs: for config in configs:
logging.info('add config %s to adapter', config) logging.info('add config %s to adapter', config)
if 'DISTRIBUTED_SYSTEM' in config:
distributed_system = utils.get_db_object(
session, models.DistributedSystem,
name=config['DISTRIBUTED_SYSTEM']
)
else:
distributed_system = None
if 'OS_INSTALLER' in config: if 'OS_INSTALLER' in config:
os_installer = utils.get_db_object( os_installer = OS_INSTALLERS[config['OS_INSTALLER']]
session, models.OSInstaller,
alias=config['OS_INSTALLER']
)
else: else:
os_installer = None os_installer = None
if 'PACKAGE_INSTALLER' in config: if 'PACKAGE_INSTALLER' in config:
package_installer = utils.get_db_object( package_installer = PACKAGE_INSTALLERS[
session, models.PackageInstaller, config['PACKAGE_INSTALLER']
alias=config['PACKAGE_INSTALLER'] ]
)
else: else:
package_installer = None package_installer = None
adapter = utils.add_db_object(
session, models.Adapter, adapter_name = config['NAME']
exception_when_existing, parent_name = config.get('PARENT', None)
config['NAME'], adapter = {
display_name=config.get('DISPLAY_NAME', None), 'name': adapter_name,
distributed_system=distributed_system, 'id': adapter_name,
os_installer=os_installer, 'parent': parent_name,
package_installer=package_installer, 'parent_id': parent_name,
deployable=config.get('DEPLOYABLE', False), 'display_name': config.get('DISPLAY_NAME', adapter_name),
health_check_cmd=config.get('HEALTH_CHECK_COMMAND', None) 'os_installer': os_installer,
) 'package_installer': package_installer,
'deployable': config.get('DEPLOYABLE', False),
'health_check_cmd': config.get('HEALTH_CHECK_COMMAND', None),
'supported_oses': [],
'roles': [],
'flavors': []
}
supported_os_patterns = [ supported_os_patterns = [
re.compile(supported_os_pattern) re.compile(supported_os_pattern)
for supported_os_pattern in config.get('SUPPORTED_OS_PATTERNS', []) for supported_os_pattern in config.get('SUPPORTED_OS_PATTERNS', [])
] ]
oses = utils.list_db_objects( for os_name, os in OSES.items():
session, models.OperatingSystem if not os.get('deployable', False):
)
for os in oses:
if not os.deployable:
continue continue
os_name = os.name
for supported_os_pattern in supported_os_patterns: for supported_os_pattern in supported_os_patterns:
if supported_os_pattern.match(os_name): if supported_os_pattern.match(os_name):
utils.add_db_object( adapter['supported_oses'].append(os)
session, models.AdapterOS,
exception_when_existing,
os.id, adapter.id
)
break break
parents[config['NAME']] = (adapter, config.get('PARENT', None)) adapters[adapter_name] = adapter
for name, (adapter, parent_name) in parents.items(): parents = {}
if parent_name: for name, adapter in adapters.items():
parent, _ = parents[parent_name] parent = adapter.get('parent', None)
else: parents[name] = parent
parent = None for name, adapter in adapters.items():
utils.update_db_object(session, adapter, parent=parent) util.recursive_merge_dict(name, adapters, parents)
return adapters
def add_roles_internal(session, exception_when_existing=True): def _add_roles_from_configuration():
"""Get roles from roles config dir and update to adapters."""
configs = util.load_configs(setting.ADAPTER_ROLE_DIR) configs = util.load_configs(setting.ADAPTER_ROLE_DIR)
for config in configs: for config in configs:
logging.info( logging.info(
'add config %s to role', config 'add config %s to role', config
) )
adapter = utils.get_db_object( adapter_name = config['ADAPTER_NAME']
session, models.Adapter, adapter = ADAPTERS[adapter_name]
name=config['ADAPTER_NAME'] adapter_roles = ADAPTERS_ROLES.setdefault(adapter_name, {})
)
for role_dict in config['ROLES']: for role_dict in config['ROLES']:
utils.add_db_object( role_name = role_dict['role']
session, models.AdapterRole, display_name = role_dict.get('display_name', role_name)
exception_when_existing, role_dict['role'], adapter.id, adapter_roles[role_name] = {
display_name=role_dict.get('display_name', None), 'name': role_name,
description=role_dict.get('description', None), 'id': '%s:%s' % (adapter_name, role_name),
optional=role_dict.get('optional', False) 'adapter_id': adapter_name,
) 'adapter_name': adapter_name,
'display_name': display_name,
'description': role_dict.get('description', display_name),
'optional': role_dict.get('optional', False)
}
parents = {}
for name, adapter in ADAPTERS.items():
parent = adapter.get('parent', None)
parents[name] = parent
for adapter_name, adapter_roles in ADAPTERS_ROLES.items():
util.recursive_merge_dict(adapter_name, ADAPTERS_ROLES, parents)
for adapter_name, adapter_roles in ADAPTERS_ROLES.items():
adapter = ADAPTERS[adapter_name]
adapter['roles'] = adapter_roles.values()
def add_flavors_internal(session, exception_when_existing=True): def _add_flavors_from_configuration():
"""Get flavors from flavor config dir and update to adapters."""
configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR) configs = util.load_configs(setting.ADAPTER_FLAVOR_DIR)
for config in configs: for config in configs:
logging.info('add config %s to flavor', config) logging.info('add config %s to flavor', config)
adapter = utils.get_db_object( adapter_name = config['ADAPTER_NAME']
session, models.Adapter, adapter = ADAPTERS[adapter_name]
name=config['ADAPTER_NAME'] adapter_flavors = ADAPTERS_FLAVORS.setdefault(adapter_name, {})
) adapter_roles = ADAPTERS_ROLES[adapter_name]
for flavor_dict in config['FLAVORS']: for flavor_dict in config['FLAVORS']:
flavor = utils.add_db_object( flavor_name = flavor_dict['flavor']
session, models.AdapterFlavor, flavor_id = '%s:%s' % (adapter_name, flavor_name)
exception_when_existing, flavor_dict['flavor'], adapter.id, flavor = {
display_name=flavor_dict.get('display_name', None), 'name': flavor_name,
template=flavor_dict.get('template', None) 'id': flavor_id,
) 'adapter_id': adapter_name,
role_names = flavor_dict.get('roles', []) 'adapter_name': adapter_name,
for role_name in role_names: 'display_name': flavor_dict.get('display_name', flavor_name),
role = utils.get_db_object( 'template': flavor_dict.get('template', None)
session, models.AdapterRole, }
name=role_name, adapter_id=adapter.id flavor_roles = flavor_dict.get('roles', [])
) roles_in_flavor = []
utils.add_db_object( for flavor_role in flavor_roles:
session, models.AdapterFlavorRole, if isinstance(flavor_role, basestring):
exception_when_existing, flavor.id, role.id role_name = flavor_role
) role_in_flavor = {
utils.update_db_object( 'name': role_name,
session, flavor, 'flavor_id': flavor_id
patched_ordered_flavor_roles=[role_name] }
) else:
role_in_flavor = flavor_role
role_in_flavor['flavor_id'] = flavor_id
if 'role' in role_in_flavor:
role_in_flavor['name'] = role_in_flavor['role']
del role_in_flavor['role']
role_name = role_in_flavor['name']
role = adapter_roles[role_name]
util.merge_dict(role_in_flavor, role, override=False)
roles_in_flavor.append(role_in_flavor)
flavor['roles'] = roles_in_flavor
adapter_flavors[flavor_name] = flavor
parents = {}
for name, adapter in ADAPTERS.items():
parent = adapter.get('parent', None)
parents[name] = parent
for adapter_name, adapter_roles in ADAPTERS_FLAVORS.items():
util.recursive_merge_dict(adapter_name, ADAPTERS_FLAVORS, parents)
for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():
adapter = ADAPTERS[adapter_name]
adapter['flavors'] = adapter_flavors.values()
def get_adapters_internal(session): def load_adapters_internal(force_reload=False):
"""Load adapter related configurations into memory.
If force_reload, reload all configurations even it is loaded already.
"""
global OSES
if force_reload or OSES is None:
OSES = _get_oses_from_configuration()
global OS_INSTALLERS
if force_reload or OS_INSTALLERS is None:
OS_INSTALLERS = _get_os_installers_from_configuration()
global PACKAGE_INSTALLERS
if force_reload or PACKAGE_INSTALLERS is None:
PACKAGE_INSTALLERS = _get_package_installers_from_configuration()
global ADAPTERS
if force_reload or ADAPTERS is None:
ADAPTERS = _get_adapters_from_configuration()
global ADAPTERS_ROLES
if force_reload or ADAPTERS_ROLES is None:
ADAPTERS_ROLES = {}
_add_roles_from_configuration()
global ADAPTERS_FLAVORS
if force_reload or ADAPTERS_FLAVORS is None:
ADAPTERS_FLAVORS = {}
_add_flavors_from_configuration()
def get_adapters_internal(force_reload=False):
"""Get all deployable adapters."""
load_adapters_internal(force_reload=force_reload)
adapter_mapping = {} adapter_mapping = {}
adapters = utils.list_db_objects( for adapter_name, adapter in ADAPTERS.items():
session, models.Adapter if adapter.get('deployable'):
) # TODO(xicheng): adapter should be filtered before
for adapter in adapters: # return to caller.
if adapter.deployable: adapter_mapping[adapter_name] = adapter
adapter_dict = adapter.to_dict()
adapter_mapping[adapter.id] = adapter_dict
else: else:
logging.info( logging.info(
'ignore adapter %s since it is not deployable', 'ignore adapter %s since it is not deployable',
adapter.to_dict() adapter_name
) )
return adapter_mapping return adapter_mapping
def get_flavors_internal(force_reload=False):
"""Get all deployable flavors."""
load_adapters_internal(force_reload=force_reload)
adapter_flavor_mapping = {}
for adapter_name, adapter_flavors in ADAPTERS_FLAVORS.items():
adapter = ADAPTERS.get(adapter_name, {})
for flavor_name, flavor in adapter_flavors.items():
if adapter.get('deployable'):
# TODO(xicheng): flavor dict should be filtered before
# return to caller.
adapter_flavor_mapping.setdefault(
adapter_name, {}
)[flavor_name] = flavor
else:
logging.info(
'ignore adapter %s since it is not deployable',
adapter_name
)
return adapter_flavor_mapping

View File

@ -25,38 +25,48 @@ from compass.db import exception
SUPPORTED_FIELDS = [ SUPPORTED_FIELDS = [
'name', 'name',
'distributed_system_name',
] ]
RESP_FIELDS = [ RESP_FIELDS = [
'id', 'name', 'roles', 'flavors', 'id', 'name', 'roles', 'flavors',
'os_installer', 'package_installer', 'os_installer', 'package_installer',
'distributed_system_id',
'distributed_system_name',
'supported_oses', 'display_name', 'health_check_cmd' 'supported_oses', 'display_name', 'health_check_cmd'
] ]
RESP_OS_FIELDS = [ RESP_OS_FIELDS = [
'id', 'os_id', 'name' 'id', 'name', 'os_id'
] ]
RESP_ROLES_FIELDS = [ RESP_ROLES_FIELDS = [
'id', 'name', 'display_name', 'description', 'optional' 'id', 'name', 'display_name', 'description', 'optional'
] ]
RESP_FLAVORS_FIELDS = [ RESP_FLAVORS_FIELDS = [
'id', 'name', 'display_name', 'template', 'roles' 'id', 'adapter_id', 'adapter_name', 'name', 'display_name',
'template', 'roles'
] ]
@database.run_in_session() ADAPTER_MAPPING = None
def load_adapters(session): FLAVOR_MAPPING = None
load_adapters_internal(session)
def load_adapters_internal(session): def load_adapters(force_reload=False):
global ADAPTER_MAPPING global ADAPTER_MAPPING
logging.info('load adapters into memory') if force_reload or ADAPTER_MAPPING is None:
ADAPTER_MAPPING = adapter_api.get_adapters_internal(session) logging.info('load adapters into memory')
ADAPTER_MAPPING = adapter_api.get_adapters_internal(
force_reload=force_reload
)
ADAPTER_MAPPING = {} def load_flavors(force_reload=False):
global FLAVOR_MAPPING
if force_reload or FLAVOR_MAPPING is None:
logging.info('load flavors into memory')
FLAVOR_MAPPING = {}
adapters_flavors = adapter_api.get_flavors_internal(
force_reload=force_reload
)
for adapter_name, adapter_flavors in adapters_flavors.items():
for flavor_name, flavor in adapter_flavors.items():
FLAVOR_MAPPING['%s:%s' % (adapter_name, flavor_name)] = flavor
def _filter_adapters(adapter_config, filter_name, filter_value): def _filter_adapters(adapter_config, filter_name, filter_value):
@ -80,15 +90,10 @@ def _filter_adapters(adapter_config, filter_name, filter_value):
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS) @utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_LIST_ADAPTERS permission.PERMISSION_LIST_ADAPTERS
) )
@utils.output_filters( @utils.output_filters(name=utils.general_filter_callback)
name=utils.general_filter_callback,
distributed_system_name=utils.general_filter_callback,
os_installer_name=utils.general_filter_callback,
package_installer_name=utils.general_filter_callback
)
@utils.wrap_to_dict( @utils.wrap_to_dict(
RESP_FIELDS, RESP_FIELDS,
supported_oses=RESP_OS_FIELDS, supported_oses=RESP_OS_FIELDS,
@ -97,26 +102,13 @@ def _filter_adapters(adapter_config, filter_name, filter_value):
) )
def list_adapters(user=None, session=None, **filters): def list_adapters(user=None, session=None, **filters):
"""list adapters.""" """list adapters."""
if not ADAPTER_MAPPING: load_adapters()
load_adapters_internal(session)
return ADAPTER_MAPPING.values() return ADAPTER_MAPPING.values()
def get_adapter_internal(session, adapter_id):
"""get adapter."""
if not ADAPTER_MAPPING:
load_adapters_internal(session)
if adapter_id not in ADAPTER_MAPPING:
raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id
)
return ADAPTER_MAPPING[adapter_id]
@utils.supported_filters([]) @utils.supported_filters([])
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_LIST_ADAPTERS permission.PERMISSION_LIST_ADAPTERS
) )
@utils.wrap_to_dict( @utils.wrap_to_dict(
@ -127,4 +119,37 @@ def get_adapter_internal(session, adapter_id):
) )
def get_adapter(adapter_id, user=None, session=None, **kwargs): def get_adapter(adapter_id, user=None, session=None, **kwargs):
"""get adapter.""" """get adapter."""
return get_adapter_internal(session, adapter_id) load_adapters()
if adapter_id not in ADAPTER_MAPPING:
raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id
)
return ADAPTER_MAPPING[adapter_id]
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
def list_flavors(user=None, session=None, **filters):
"""List flavors."""
load_flavors()
return FLAVOR_MAPPING.values()
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
def get_flavor(flavor_id, user=None, session=None, **kwargs):
"""Get flavor."""
load_flavors()
if flavor_id not in FLAVOR_MAPPING:
raise exception.RecordNotExists(
'flavor %s does not exist' % flavor_id
)
return FLAVOR_MAPPING[flavor_id]

File diff suppressed because it is too large Load Diff

View File

@ -51,6 +51,8 @@ POOL_MAPPING = {
def init(database_url=None): def init(database_url=None):
"""Initialize database. """Initialize database.
Adjust sqlalchemy logging if necessary.
:param database_url: string, database url. :param database_url: string, database url.
""" """
global ENGINE global ENGINE
@ -81,35 +83,48 @@ def init(database_url=None):
def in_session(): def in_session():
"""check if in database session scope.""" """check if in database session scope."""
if hasattr(SESSION_HOLDER, 'session'): bool(hasattr(SESSION_HOLDER, 'session'))
return True
else:
return False
@contextmanager @contextmanager
def session(): def session(exception_when_in_session=True):
"""database session scope. """database session scope.
.. note:: To operate database, it should be called in database session.
To operate database, it should be called in database session. If not exception_when_in_session, the with session statement support
nested session and only the out most session commit/rollback the
transaction.
""" """
if not ENGINE: if not ENGINE:
init() init()
nested_session = False
if hasattr(SESSION_HOLDER, 'session'): if hasattr(SESSION_HOLDER, 'session'):
logging.error('we are already in session') if exception_when_in_session:
raise exception.DatabaseException('session already exist') logging.error('we are already in session')
raise exception.DatabaseException('session already exist')
else:
new_session = SESSION_HOLDER.session
nested_session = True
logging.log(
logsetting.getLevelByName('fine'),
'reuse session %s', nested_session
)
else: else:
new_session = SCOPED_SESSION() new_session = SCOPED_SESSION()
setattr(SESSION_HOLDER, 'session', new_session) setattr(SESSION_HOLDER, 'session', new_session)
logging.log(
logsetting.getLevelByName('fine'),
'enter session %s', new_session
)
try: try:
yield new_session yield new_session
new_session.commit() if not nested_session:
new_session.commit()
except Exception as error: except Exception as error:
new_session.rollback() if not nested_session:
logging.error('failed to commit session') new_session.rollback()
logging.error('failed to commit session')
logging.exception(error) logging.exception(error)
if isinstance(error, IntegrityError): if isinstance(error, IntegrityError):
for item in error.statement.split(): for item in error.statement.split():
@ -128,15 +143,21 @@ def session():
else: else:
raise exception.DatabaseException(str(error)) raise exception.DatabaseException(str(error))
finally: finally:
new_session.close() if not nested_session:
SCOPED_SESSION.remove() new_session.close()
delattr(SESSION_HOLDER, 'session') SCOPED_SESSION.remove()
delattr(SESSION_HOLDER, 'session')
logging.log(
logsetting.getLevelByName('fine'),
'exit session %s', new_session
)
def current_session(): def current_session():
"""Get the current session scope when it is called. """Get the current session scope when it is called.
:return: database session. :return: database session.
:raises: DatabaseException when it is not in session.
""" """
try: try:
return SESSION_HOLDER.session return SESSION_HOLDER.session
@ -149,26 +170,42 @@ def current_session():
raise exception.DatabaseException(str(error)) raise exception.DatabaseException(str(error))
def run_in_session(): def run_in_session(exception_when_in_session=True):
"""Decorator to make sure the decorated function run in session.
When not exception_when_in_session, the run_in_session can be
decorated several times.
"""
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
if 'session' in kwargs.keys(): try:
return func(*args, **kwargs) my_session = kwargs.get('session')
else: if my_session is not None:
with session() as my_session:
kwargs['session'] = my_session
return func(*args, **kwargs) return func(*args, **kwargs)
else:
with session(
exception_when_in_session=exception_when_in_session
) as my_session:
kwargs['session'] = my_session
return func(*args, **kwargs)
except Exception as error:
logging.error(
'got exception with func %s args %s kwargs %s',
func, args, kwargs
)
logging.exception(error)
raise error
return wrapper return wrapper
return decorator return decorator
def _setup_user_table(user_session): def _setup_user_table(user_session):
"""Initialize default user.""" """Initialize user table with default user."""
logging.info('setup user table') logging.info('setup user table')
from compass.db.api import user from compass.db.api import user
user.add_user_internal( user.add_user(
user_session, session=user_session,
email=setting.COMPASS_ADMIN_EMAIL, email=setting.COMPASS_ADMIN_EMAIL,
password=setting.COMPASS_ADMIN_PASSWORD, password=setting.COMPASS_ADMIN_PASSWORD,
is_admin=True is_admin=True
@ -180,120 +217,22 @@ def _setup_permission_table(permission_session):
logging.info('setup permission table.') logging.info('setup permission table.')
from compass.db.api import permission from compass.db.api import permission
permission.add_permissions_internal( permission.add_permissions_internal(
permission_session session=permission_session
) )
def _setup_switch_table(switch_session): def _setup_switch_table(switch_session):
"""Initialize switch table.""" """Initialize switch table."""
# TODO(xicheng): deprecate setup default switch.
logging.info('setup switch table') logging.info('setup switch table')
from compass.db.api import switch from compass.db.api import switch
switch.add_switch_internal( switch.add_switch(
switch_session, long(netaddr.IPAddress(setting.DEFAULT_SWITCH_IP)), True, setting.DEFAULT_SWITCH_IP,
True, filters=['allow ports all'] session=switch_session,
machine_filters=['allow ports all']
) )
def _setup_os_installers(installer_session):
"""Initialize os_installer table."""
logging.info('setup os installer table')
from compass.db.api import installer
installer.add_os_installers_internal(
installer_session
)
def _setup_package_installers(installer_session):
"""Initialize package_installer table."""
logging.info('setup package installer table')
from compass.db.api import installer
installer.add_package_installers_internal(
installer_session
)
def _setup_oses(os_session):
"""Initialize os table."""
logging.info('setup os table')
from compass.db.api import adapter
adapter.add_oses_internal(
os_session
)
def _setup_distributed_systems(distributed_system_session):
"""Initialize distributed system table."""
logging.info('setup distributed system table')
from compass.db.api import adapter
adapter.add_distributed_systems_internal(
distributed_system_session
)
def _setup_adapters(adapter_session):
"""Initialize package adapter table."""
logging.info('setup adapter table')
from compass.db.api import adapter
adapter.add_adapters_internal(
adapter_session)
def _setup_os_fields(field_session):
"""Initialize os field table."""
logging.info('setup os field table')
from compass.db.api import metadata
metadata.add_os_field_internal(field_session)
def _setup_package_fields(field_session):
"""Initialize package field table."""
logging.info('setup package field table')
from compass.db.api import metadata
metadata.add_package_field_internal(field_session)
def _setup_flavor_fields(field_session):
"""Initialize flavor field table."""
logging.info('setup flavor field table')
from compass.db.api import metadata
metadata.add_flavor_field_internal(field_session)
def _setup_os_metadatas(metadata_session):
"""Initialize os metadata table."""
logging.info('setup os metadata table')
from compass.db.api import metadata
metadata.add_os_metadata_internal(metadata_session)
def _setup_package_metadatas(metadata_session):
"""Initialize package metadata table."""
logging.info('setup package metadata table')
from compass.db.api import metadata
metadata.add_package_metadata_internal(metadata_session)
def _setup_flavor_metadatas(metadata_session):
"""Initialize flavor metadata table."""
logging.info('setup flavor metadata table')
from compass.db.api import metadata
metadata.add_flavor_metadata_internal(metadata_session)
def _setup_adapter_roles(role_session):
"""Initialize adapter role table."""
logging.info('setup adapter role table')
from compass.db.api import adapter
adapter.add_roles_internal(role_session)
def _setup_adapter_flavors(flavor_session):
"""Initialize adapter flavor table."""
logging.info('setup adapter flavor table')
from compass.db.api import adapter
adapter.add_flavors_internal(flavor_session)
def _update_others(other_session): def _update_others(other_session):
"""Update other tables.""" """Update other tables."""
logging.info('update other tables') logging.info('update other tables')
@ -311,25 +250,12 @@ def _update_others(other_session):
@run_in_session() @run_in_session()
def create_db(session): def create_db(session=None):
"""Create database.""" """Create database."""
models.BASE.metadata.create_all(bind=ENGINE) models.BASE.metadata.create_all(bind=ENGINE)
_setup_permission_table(session) _setup_permission_table(session)
_setup_user_table(session) _setup_user_table(session)
_setup_switch_table(session) _setup_switch_table(session)
_setup_os_installers(session)
_setup_package_installers(session)
_setup_oses(session)
_setup_distributed_systems(session)
_setup_adapters(session)
_setup_adapter_roles(session)
_setup_adapter_flavors(session)
_setup_os_fields(session)
_setup_package_fields(session)
_setup_flavor_fields(session)
_setup_os_metadatas(session)
_setup_package_metadatas(session)
_setup_flavor_metadatas(session)
_update_others(session) _update_others(session)

View File

@ -16,7 +16,9 @@
"""Cluster health check report.""" """Cluster health check report."""
import logging import logging
from compass.db.api import cluster as cluster_api
from compass.db.api import database from compass.db.api import database
from compass.db.api import host as host_api
from compass.db.api import permission from compass.db.api import permission
from compass.db.api import user as user_api from compass.db.api import user as user_api
from compass.db.api import utils from compass.db.api import utils
@ -39,27 +41,32 @@ RESP_ACTION_FIELDS = ['cluster_id', 'status']
@utils.supported_filters(REQUIRED_INSERT_FIELDS, OPTIONAL_INSERT_FIELDS) @utils.supported_filters(REQUIRED_INSERT_FIELDS, OPTIONAL_INSERT_FIELDS)
@database.run_in_session() @database.run_in_session()
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def add_report_record(cluster_id, name, report={}, def add_report_record(cluster_id, name=None, report={},
state='verifying', session=None, **kwargs): state='verifying', session=None, **kwargs):
"""Create a health check report record.""" """Create a health check report record."""
# Replace any white space into '-' # Replace any white space into '-'
words = name.split() words = name.split()
name = '-'.join(words) name = '-'.join(words)
cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
return utils.add_db_object( return utils.add_db_object(
session, models.HealthCheckReport, True, cluster_id, name, session, models.HealthCheckReport, True, cluster.id, name,
report=report, state=state, **kwargs report=report, state=state, **kwargs
) )
def _get_report(cluster_id, name, session=None):
cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
return utils.get_db_object(
session, models.HealthCheckReport, cluster_id=cluster.id, name=name
)
@utils.supported_filters(UPDATE_FIELDS) @utils.supported_filters(UPDATE_FIELDS)
@database.run_in_session() @database.run_in_session()
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def update_report(cluster_id, name, session=None, **kwargs): def update_report(cluster_id, name, session=None, **kwargs):
"""Update health check report.""" """Update health check report."""
report = utils.get_db_object( report = _get_report(cluster_id, name, session=session)
session, models.HealthCheckReport, cluster_id=cluster_id, name=name
)
if report.state == 'finished': if report.state == 'finished':
err_msg = 'Report cannot be updated if state is in "finished"' err_msg = 'Report cannot be updated if state is in "finished"'
raise exception.Forbidden(err_msg) raise exception.Forbidden(err_msg)
@ -72,106 +79,109 @@ def update_report(cluster_id, name, session=None, **kwargs):
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def update_multi_reports(cluster_id, session=None, **kwargs): def update_multi_reports(cluster_id, session=None, **kwargs):
"""Bulk update reports.""" """Bulk update reports."""
# TODO(grace): rename the fuction if needed to reflect the fact.
return set_error(cluster_id, session=session, **kwargs) return set_error(cluster_id, session=session, **kwargs)
def set_error(cluster_id, report={}, session=None, def set_error(cluster_id, report={}, session=None,
state='error', error_message=None): state='error', error_message=None):
with session.begin(subtransactions=True): cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
logging.debug( logging.debug(
"session %s updates all reports as %s in cluster %s", "updates all reports as %s in cluster %s",
id(session), state, cluster_id state, cluster_id
) )
session.query( return utils.update_db_objects(
models.HealthCheckReport session, models.HealthCheckReport,
).filter_by(cluster_id=cluster_id).update( updates={
{"report": {}, 'state': 'error', 'error_message': error_message} 'report': {},
) 'state': 'error',
'error_message': error_message
reports = session.query( }, cluster_id=cluster.id
models.HealthCheckReport )
).filter_by(cluster_id=cluster_id).all()
return reports
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_LIST_HEALTH_REPORT permission.PERMISSION_LIST_HEALTH_REPORT
) )
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def list_health_reports(user, cluster_id, session=None): def list_health_reports(cluster_id, user=None, session=None):
"""List all reports in the specified cluster.""" """List all reports in the specified cluster."""
cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
return utils.list_db_objects( return utils.list_db_objects(
session, models.HealthCheckReport, cluster_id=cluster_id session, models.HealthCheckReport, cluster_id=cluster.id
) )
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_GET_HEALTH_REPORT permission.PERMISSION_GET_HEALTH_REPORT
) )
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def get_health_report(user, cluster_id, name, session=None): def get_health_report(cluster_id, name, user=None, session=None):
return utils.get_db_object( return _get_report(
session, models.HealthCheckReport, cluster_id=cluster_id, name=name cluster_id, name, session=session
) )
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_DELETE_REPORT permission.PERMISSION_DELETE_REPORT
) )
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def delete_reports(user, cluster_id, name=None, session=None): def delete_reports(cluster_id, name=None, user=None, session=None):
if not name: # TODO(grace): better to separate this function into two.
report = utils.get_db_object( # One is to delete a report of a cluster, the other to delete all
session, models.HealthCheckReport, cluster_id=cluster_id, name=name # reports under a cluster.
) if name:
report = _get_report(cluster_id, name, session=session)
return utils.del_db_object(session, report) return utils.del_db_object(session, report)
else:
return utils.del_db_objects( cluster = cluster_api.get_cluster_internal(
session, models.HealthCheckReport, cluster_id=cluster_id cluster_id, session=session
) )
return utils.del_db_objects(
session, models.HealthCheckReport, cluster_id=cluster.id
)
@utils.supported_filters(optional_support_keys=['check_health']) @utils.supported_filters(optional_support_keys=['check_health'])
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_CHECK_CLUSTER_HEALTH permission.PERMISSION_CHECK_CLUSTER_HEALTH
) )
@utils.wrap_to_dict(RESP_ACTION_FIELDS) @utils.wrap_to_dict(RESP_ACTION_FIELDS)
def start_check_cluster_health(user, cluster_id, send_report_url, def start_check_cluster_health(cluster_id, send_report_url,
session=None, check_health={}): user=None, session=None, check_health={}):
"""Start to check cluster health.""" """Start to check cluster health."""
cluster_state = utils.get_db_object( cluster = cluster_api.get_cluster_internal(cluster_id, session=session)
session, models.Cluster, True, id=cluster_id
).state_dict()
if cluster_state['state'] != 'SUCCESSFUL': if cluster.state.state != 'SUCCESSFUL':
logging.debug("state is %s" % cluster_state['state']) logging.debug("state is %s" % cluster.state.state)
err_msg = "Healthcheck starts only after cluster finished deployment!" err_msg = "Healthcheck starts only after cluster finished deployment!"
raise exception.Forbidden(err_msg) raise exception.Forbidden(err_msg)
reports = utils.list_db_objects( reports = utils.list_db_objects(
session, models.HealthCheckReport, session, models.HealthCheckReport,
cluster_id=cluster_id, state='verifying' cluster_id=cluster.id, state='verifying'
) )
if reports: if reports:
err_msg = 'Healthcheck in progress, please wait for it to complete!' err_msg = 'Healthcheck in progress, please wait for it to complete!'
raise exception.Forbidden(err_msg) raise exception.Forbidden(err_msg)
# Clear all preivous report # Clear all preivous report
# TODO(grace): the delete should be moved into celery task.
# We should consider the case that celery task is down.
utils.del_db_objects( utils.del_db_objects(
session, models.HealthCheckReport, cluster_id=cluster_id session, models.HealthCheckReport, cluster_id=cluster.id
) )
from compass.tasks import client as celery_client from compass.tasks import client as celery_client
celery_client.celery.send_task( celery_client.celery.send_task(
'compass.tasks.cluster_health', 'compass.tasks.cluster_health',
(cluster_id, send_report_url, user.email) (cluster.id, send_report_url, user.email)
) )
return { return {
"cluster_id": cluster_id, "cluster_id": cluster.id,
"status": "start to check cluster health." "status": "start to check cluster health."
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,53 +0,0 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapter database operations."""
import logging
import os
from compass.db.api import database
from compass.db.api import utils
from compass.db import exception
from compass.db import models
from compass.utils import setting_wrapper as setting
from compass.utils import util
def _add_installers(session, model, configs, exception_when_existing=True):
installers = []
for config in configs:
installers.append(utils.add_db_object(
session, model,
exception_when_existing, config['INSTANCE_NAME'],
name=config['NAME'],
settings=config.get('SETTINGS', {})
))
return installers
def add_os_installers_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.OS_INSTALLER_DIR)
return _add_installers(
session, models.OSInstaller, configs,
exception_when_existing=exception_when_existing
)
def add_package_installers_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)
return _add_installers(
session, models.PackageInstaller, configs,
exception_when_existing=exception_when_existing
)

View File

@ -14,6 +14,7 @@
"""Switch database operations.""" """Switch database operations."""
import logging import logging
import re
from compass.db.api import database from compass.db.api import database
from compass.db.api import permission from compass.db.api import permission
@ -43,9 +44,26 @@ RESP_DEPLOY_FIELDS = [
] ]
def _get_machine(machine_id, session=None, **kwargs):
"""Get machine by id."""
if isinstance(machine_id, (int, long)):
return utils.get_db_object(
session, models.Machine,
id=machine_id, **kwargs
)
raise exception.InvalidParameter(
'machine id %s type is not int compatible' % machine_id
)
def get_machine_internal(machine_id, session=None, **kwargs):
"""Helper function to other files under db/api."""
return _get_machine(machine_id, session=session, **kwargs)
@utils.supported_filters([]) @utils.supported_filters([])
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_LIST_MACHINES permission.PERMISSION_LIST_MACHINES
) )
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
@ -53,10 +71,10 @@ def get_machine(
machine_id, exception_when_missing=True, machine_id, exception_when_missing=True,
user=None, session=None, **kwargs user=None, session=None, **kwargs
): ):
"""get field dict of a machine.""" """get a machine."""
return utils.get_db_object( return _get_machine(
session, models.Machine, machine_id, session=session,
exception_when_missing, id=machine_id exception_when_missing=exception_when_missing
) )
@ -64,7 +82,7 @@ def get_machine(
optional_support_keys=SUPPORTED_FIELDS optional_support_keys=SUPPORTED_FIELDS
) )
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_LIST_MACHINES permission.PERMISSION_LIST_MACHINES
) )
@utils.output_filters( @utils.output_filters(
@ -80,9 +98,9 @@ def list_machines(user=None, session=None, **filters):
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def _update_machine(session, machine_id, **kwargs): def _update_machine(machine_id, session=None, **kwargs):
"""Update a machine.""" """Update a machine."""
machine = utils.get_db_object(session, models.Machine, id=machine_id) machine = _get_machine(machine_id, session=session)
return utils.update_db_object(session, machine, **kwargs) return utils.update_db_object(session, machine, **kwargs)
@ -92,15 +110,19 @@ def _update_machine(session, machine_id, **kwargs):
) )
@utils.input_validates(ipmi_credentials=utils.check_ipmi_credentials) @utils.input_validates(ipmi_credentials=utils.check_ipmi_credentials)
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_ADD_MACHINE permission.PERMISSION_ADD_MACHINE
) )
def update_machine(machine_id, user=None, session=None, **kwargs): def update_machine(machine_id, user=None, session=None, **kwargs):
"""Update a machine."""
return _update_machine( return _update_machine(
session, machine_id, **kwargs machine_id, session=session, **kwargs
) )
# replace [ipmi_credentials, tag, location] to
# [patched_ipmi_credentials, patched_tag, patched_location]
# in kwargs. It tells db these fields will be patched.
@utils.replace_filters( @utils.replace_filters(
ipmi_credentials='patched_ipmi_credentials', ipmi_credentials='patched_ipmi_credentials',
tag='patched_tag', tag='patched_tag',
@ -112,24 +134,18 @@ def update_machine(machine_id, user=None, session=None, **kwargs):
) )
@database.run_in_session() @database.run_in_session()
@utils.output_validates(ipmi_credentials=utils.check_ipmi_credentials) @utils.output_validates(ipmi_credentials=utils.check_ipmi_credentials)
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_ADD_MACHINE permission.PERMISSION_ADD_MACHINE
) )
def patch_machine(machine_id, user=None, session=None, **kwargs): def patch_machine(machine_id, user=None, session=None, **kwargs):
"""Patch a machine."""
return _update_machine( return _update_machine(
session, machine_id, **kwargs machine_id, session=session, **kwargs
) )
@utils.supported_filters() def _check_machine_deletable(machine):
@database.run_in_session() """Check a machine deletable."""
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEL_MACHINE
)
@utils.wrap_to_dict(RESP_FIELDS)
def del_machine(machine_id, user=None, session=None, **kwargs):
"""Delete a machine."""
machine = utils.get_db_object(session, models.Machine, id=machine_id)
if machine.host: if machine.host:
host = machine.host host = machine.host
raise exception.NotAcceptable( raise exception.NotAcceptable(
@ -137,12 +153,24 @@ def del_machine(machine_id, user=None, session=None, **kwargs):
machine.mac, host.name machine.mac, host.name
) )
) )
@utils.supported_filters()
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEL_MACHINE
)
@utils.wrap_to_dict(RESP_FIELDS)
def del_machine(machine_id, user=None, session=None, **kwargs):
"""Delete a machine."""
machine = _get_machine(machine_id, session=session)
_check_machine_deletable(machine)
return utils.del_db_object(session, machine) return utils.del_db_object(session, machine)
@utils.supported_filters(optional_support_keys=['poweron']) @utils.supported_filters(optional_support_keys=['poweron'])
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_DEPLOY_HOST permission.PERMISSION_DEPLOY_HOST
) )
@utils.wrap_to_dict( @utils.wrap_to_dict(
@ -154,8 +182,8 @@ def poweron_machine(
): ):
"""power on machine.""" """power on machine."""
from compass.tasks import client as celery_client from compass.tasks import client as celery_client
machine = utils.get_db_object( machine = _get_machine(
session, models.Machine, id=machine_id machine_id, session=session
) )
celery_client.celery.send_task( celery_client.celery.send_task(
'compass.tasks.poweron_machine', 'compass.tasks.poweron_machine',
@ -169,7 +197,7 @@ def poweron_machine(
@utils.supported_filters(optional_support_keys=['poweroff']) @utils.supported_filters(optional_support_keys=['poweroff'])
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_DEPLOY_HOST permission.PERMISSION_DEPLOY_HOST
) )
@utils.wrap_to_dict( @utils.wrap_to_dict(
@ -181,8 +209,8 @@ def poweroff_machine(
): ):
"""power off machine.""" """power off machine."""
from compass.tasks import client as celery_client from compass.tasks import client as celery_client
machine = utils.get_db_object( machine = _get_machine(
session, models.Machine, id=machine_id machine_id, session=session
) )
celery_client.celery.send_task( celery_client.celery.send_task(
'compass.tasks.poweroff_machine', 'compass.tasks.poweroff_machine',
@ -196,7 +224,7 @@ def poweroff_machine(
@utils.supported_filters(optional_support_keys=['reset']) @utils.supported_filters(optional_support_keys=['reset'])
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_DEPLOY_HOST permission.PERMISSION_DEPLOY_HOST
) )
@utils.wrap_to_dict( @utils.wrap_to_dict(
@ -208,8 +236,8 @@ def reset_machine(
): ):
"""reset machine.""" """reset machine."""
from compass.tasks import client as celery_client from compass.tasks import client as celery_client
machine = utils.get_db_object( machine = _get_machine(
session, models.Machine, id=machine_id machine_id, session=session
) )
celery_client.celery.send_task( celery_client.celery.send_task(
'compass.tasks.reset_machine', 'compass.tasks.reset_machine',

View File

@ -17,6 +17,7 @@ import copy
import logging import logging
import string import string
from compass.db.api import adapter as adapter_api
from compass.db.api import database from compass.db.api import database
from compass.db.api import utils from compass.db.api import utils
from compass.db import callback as metadata_callback from compass.db import callback as metadata_callback
@ -29,26 +30,39 @@ from compass.utils import setting_wrapper as setting
from compass.utils import util from compass.utils import util
def _add_field_internal(session, model, configs): OS_FIELDS = None
fields = [] PACKAGE_FIELDS = None
FLAVOR_FIELDS = None
OSES_METADATA = None
PACKAGES_METADATA = None
FLAVORS_METADATA = None
OSES_METADATA_UI_CONVERTERS = None
FLAVORS_METADATA_UI_CONVERTERS = None
def _get_field_from_configuration(configs):
"""Get fields from configurations."""
fields = {}
for config in configs: for config in configs:
if not isinstance(config, dict): if not isinstance(config, dict):
raise exception.InvalidParameter( raise exception.InvalidParameter(
'config %s is not dict' % config 'config %s is not dict' % config
) )
fields.append(utils.add_db_object( field_name = config['NAME']
session, model, False, fields[field_name] = {
config['NAME'], 'name': field_name,
field_type=config.get('FIELD_TYPE', basestring), 'id': field_name,
display_type=config.get('DISPLAY_TYPE', 'text'), 'field_type': config.get('FIELD_TYPE', basestring),
validator=config.get('VALIDATOR', None), 'display_type': config.get('DISPLAY_TYPE', 'text'),
js_validator=config.get('JS_VALIDATOR', None), 'validator': config.get('VALIDATOR', None),
description=config.get('DESCRIPTION', None) 'js_validator': config.get('JS_VALIDATOR', None),
)) 'description': config.get('DESCRIPTION', field_name)
}
return fields return fields
def add_os_field_internal(session): def _get_os_fields_from_configuration():
"""Get os fields from os field config dir."""
env_locals = {} env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS) env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS) env_locals.update(metadata_callback.CALLBACK_LOCALS)
@ -56,12 +70,13 @@ def add_os_field_internal(session):
setting.OS_FIELD_DIR, setting.OS_FIELD_DIR,
env_locals=env_locals env_locals=env_locals
) )
return _add_field_internal( return _get_field_from_configuration(
session, models.OSConfigField, configs configs
) )
def add_package_field_internal(session): def _get_package_fields_from_configuration():
"""Get package fields from package field config dir."""
env_locals = {} env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS) env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS) env_locals.update(metadata_callback.CALLBACK_LOCALS)
@ -69,12 +84,13 @@ def add_package_field_internal(session):
setting.PACKAGE_FIELD_DIR, setting.PACKAGE_FIELD_DIR,
env_locals=env_locals env_locals=env_locals
) )
return _add_field_internal( return _get_field_from_configuration(
session, models.PackageConfigField, configs configs
) )
def add_flavor_field_internal(session): def _get_flavor_fields_from_configuration():
"""Get flavor fields from flavor field config dir."""
env_locals = {} env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS) env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS) env_locals.update(metadata_callback.CALLBACK_LOCALS)
@ -82,26 +98,38 @@ def add_flavor_field_internal(session):
setting.FLAVOR_FIELD_DIR, setting.FLAVOR_FIELD_DIR,
env_locals=env_locals env_locals=env_locals
) )
return _add_field_internal( return _get_field_from_configuration(
session, models.FlavorConfigField, configs configs
) )
def _add_metadata( def _get_metadata_from_configuration(
session, field_model, metadata_model, id, path, name, config, path, name, config,
exception_when_existing=True, parent=None, **kwargs fields, **kwargs
): ):
"""Recursively get metadata from configuration.
Args:
path: used to indicate the path to the root element.
mainly for trouble shooting.
name: the key of the metadata section.
config: the value of the metadata section.
fields: all fields defined in os fields or package fields dir.
"""
if not isinstance(config, dict): if not isinstance(config, dict):
raise exception.InvalidParameter( raise exception.InvalidParameter(
'%s config %s is not dict' % (path, config) '%s config %s is not dict' % (path, config)
) )
metadata_self = config.get('_self', {}) metadata_self = config.get('_self', {})
if 'field' in metadata_self: if 'field' in metadata_self:
field = utils.get_db_object( field_name = metadata_self['field']
session, field_model, field=metadata_self['field'] field = fields[field_name]
)
else: else:
field = None field = {}
# mapping to may contain $ like $partition. Here we replace the
# $partition to the key of the correspendent config. The backend then
# can use this kind of feature to support multi partitions when we
# only declare the partition metadata in one place.
mapping_to_template = metadata_self.get('mapping_to', None) mapping_to_template = metadata_self.get('mapping_to', None)
if mapping_to_template: if mapping_to_template:
mapping_to = string.Template( mapping_to = string.Template(
@ -111,33 +139,54 @@ def _add_metadata(
) )
else: else:
mapping_to = None mapping_to = None
metadata = utils.add_db_object( self_metadata = {
session, metadata_model, exception_when_existing, 'name': name,
id, path, name=name, parent=parent, field=field, 'display_name': metadata_self.get('display_name', name),
display_name=metadata_self.get('display_name', name), 'field_type': field.get('field_type', dict),
description=metadata_self.get('description', None), 'display_type': field.get('display_type', None),
is_required=metadata_self.get('is_required', False), 'description': metadata_self.get(
required_in_whole_config=metadata_self.get( 'description', field.get('description', None)
),
'is_required': metadata_self.get('is_required', False),
'required_in_whole_config': metadata_self.get(
'required_in_whole_config', False), 'required_in_whole_config', False),
mapping_to=mapping_to, 'mapping_to': mapping_to,
validator=metadata_self.get('validator', None), 'validator': metadata_self.get(
js_validator=metadata_self.get('js_validator', None), 'validator', field.get('validator', None)
default_value=metadata_self.get('default_value', None), ),
default_callback=metadata_self.get('default_callback', None), 'js_validator': metadata_self.get(
default_callback_params=metadata_self.get( 'js_validator', field.get('js_validator', None)
),
'default_value': metadata_self.get('default_value', None),
'default_callback': metadata_self.get('default_callback', None),
'default_callback_params': metadata_self.get(
'default_callback_params', {}), 'default_callback_params', {}),
options=metadata_self.get('options', None), 'options': metadata_self.get('options', None),
options_callback=metadata_self.get('options_callback', None), 'options_callback': metadata_self.get('options_callback', None),
options_callback_params=metadata_self.get( 'options_callback_params': metadata_self.get(
'options_callback_params', {}), 'options_callback_params', {}),
autofill_callback=metadata_self.get( 'autofill_callback': metadata_self.get(
'autofill_callback', None), 'autofill_callback', None),
autofill_callback_params=metadata_self.get( 'autofill_callback_params': metadata_self.get(
'autofill_callback_params', {}), 'autofill_callback_params', {}),
required_in_options=metadata_self.get( 'required_in_options': metadata_self.get(
'required_in_options', False), 'required_in_options', False)
**kwargs }
) self_metadata.update(kwargs)
metadata = {'_self': self_metadata}
# Key extension used to do two things:
# one is to return the extended metadata that $<something>
# will be replace to possible extensions.
# The other is to record the $<something> to extended value
# and used in future mapping_to subsititution.
# TODO(grace): select proper name instead of key_extensions if
# you think it is better.
# Suppose key_extension is {'$partition': ['/var', '/']} for $partition
# the metadata for $partition will be mapped to {
# '/var': ..., '/': ...} and kwargs={'partition': '/var'} and
# kwargs={'partition': '/'} will be parsed to recursive metadata parsing
# for sub metadata under '/var' and '/'. Then in the metadata parsing
# for the sub metadata, this kwargs will be used to substitute mapping_to.
key_extensions = metadata_self.get('key_extensions', {}) key_extensions = metadata_self.get('key_extensions', {})
general_keys = [] general_keys = []
for key, value in config.items(): for key, value in config.items():
@ -160,20 +209,16 @@ def _add_metadata(
) )
sub_kwargs = dict(kwargs) sub_kwargs = dict(kwargs)
sub_kwargs[key[1:]] = extended_key sub_kwargs[key[1:]] = extended_key
_add_metadata( metadata[extended_key] = _get_metadata_from_configuration(
session, field_model, metadata_model, '%s/%s' % (path, extended_key), extended_key, value,
id, '%s/%s' % (path, extended_key), extended_key, value, fields, **sub_kwargs
exception_when_existing=exception_when_existing,
parent=metadata, **sub_kwargs
) )
else: else:
if key.startswith('$'): if key.startswith('$'):
general_keys.append(key) general_keys.append(key)
_add_metadata( metadata[key] = _get_metadata_from_configuration(
session, field_model, metadata_model, '%s/%s' % (path, key), key, value,
id, '%s/%s' % (path, key), key, value, fields, **kwargs
exception_when_existing=exception_when_existing,
parent=metadata, **kwargs
) )
if len(general_keys) > 1: if len(general_keys) > 1:
raise exception.InvalidParameter( raise exception.InvalidParameter(
@ -184,8 +229,9 @@ def _add_metadata(
return metadata return metadata
def add_os_metadata_internal(session, exception_when_existing=True): def _get_oses_metadata_from_configuration():
os_metadatas = [] """Get os metadata from os metadata config dir."""
oses_metadata = {}
env_locals = {} env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS) env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS) env_locals.update(metadata_callback.CALLBACK_LOCALS)
@ -194,22 +240,28 @@ def add_os_metadata_internal(session, exception_when_existing=True):
env_locals=env_locals env_locals=env_locals
) )
for config in configs: for config in configs:
os = utils.get_db_object( os_name = config['OS']
session, models.OperatingSystem, name=config['OS'] os_metadata = oses_metadata.setdefault(os_name, {})
)
for key, value in config['METADATA'].items(): for key, value in config['METADATA'].items():
os_metadatas.append(_add_metadata( os_metadata[key] = _get_metadata_from_configuration(
session, models.OSConfigField, key, key, value, OS_FIELDS
models.OSConfigMetadata, )
os.id, key, key, value,
exception_when_existing=exception_when_existing, oses = adapter_api.OSES
parent=None parents = {}
)) for os_name, os in oses.items():
return os_metadatas parent = os.get('parent', None)
parents[os_name] = parent
for os_name, os in oses.items():
oses_metadata[os_name] = util.recursive_merge_dict(
os_name, oses_metadata, parents
)
return oses_metadata
def add_package_metadata_internal(session, exception_when_existing=True): def _get_packages_metadata_from_configuration():
package_metadatas = [] """Get package metadata from package metadata config dir."""
packages_metadata = {}
env_locals = {} env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS) env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS) env_locals.update(metadata_callback.CALLBACK_LOCALS)
@ -218,22 +270,27 @@ def add_package_metadata_internal(session, exception_when_existing=True):
env_locals=env_locals env_locals=env_locals
) )
for config in configs: for config in configs:
adapter = utils.get_db_object( adapter_name = config['ADAPTER']
session, models.Adapter, name=config['ADAPTER'] package_metadata = packages_metadata.setdefault(adapter_name, {})
)
for key, value in config['METADATA'].items(): for key, value in config['METADATA'].items():
package_metadatas.append(_add_metadata( package_metadata[key] = _get_metadata_from_configuration(
session, models.PackageConfigField, key, key, value, PACKAGE_FIELDS
models.PackageConfigMetadata, )
adapter.id, key, key, value, adapters = adapter_api.ADAPTERS
exception_when_existing=exception_when_existing, parents = {}
parent=None for adapter_name, adapter in adapters.items():
)) parent = adapter.get('parent', None)
return package_metadatas parents[adapter_name] = parent
for adapter_name, adapter in adapters.items():
packages_metadata[adapter_name] = util.recursive_merge_dict(
adapter_name, packages_metadata, parents
)
return packages_metadata
def add_flavor_metadata_internal(session, exception_when_existing=True): def _get_flavors_metadata_from_configuration():
flavor_metadatas = [] """Get flavor metadata from flavor metadata config dir."""
flavors_metadata = {}
env_locals = {} env_locals = {}
env_locals.update(metadata_validator.VALIDATOR_LOCALS) env_locals.update(metadata_validator.VALIDATOR_LOCALS)
env_locals.update(metadata_callback.CALLBACK_LOCALS) env_locals.update(metadata_callback.CALLBACK_LOCALS)
@ -242,18 +299,26 @@ def add_flavor_metadata_internal(session, exception_when_existing=True):
env_locals=env_locals env_locals=env_locals
) )
for config in configs: for config in configs:
flavor = utils.get_db_object( adapter_name = config['ADAPTER']
session, models.AdapterFlavor, name=config['FLAVOR'] flavor_name = config['FLAVOR']
) flavor_metadata = flavors_metadata.setdefault(
adapter_name, {}
).setdefault(flavor_name, {})
for key, value in config['METADATA'].items(): for key, value in config['METADATA'].items():
flavor_metadatas.append(_add_metadata( flavor_metadata[key] = _get_metadata_from_configuration(
session, models.FlavorConfigField, key, key, value, FLAVOR_FIELDS
models.FlavorConfigMetadata, )
flavor.id, key, key, value,
exception_when_existing=exception_when_existing, packages_metadata = PACKAGES_METADATA
parent=None adapters_flavors = adapter_api.ADAPTERS_FLAVORS
)) for adapter_name, adapter_flavors in adapters_flavors.items():
return flavor_metadatas package_metadata = packages_metadata.get(adapter_name, {})
for flavor_name, flavor in adapter_flavors.items():
flavor_metadata = flavors_metadata.setdefault(
adapter_name, {}
).setdefault(flavor_name, {})
util.merge_dict(flavor_metadata, package_metadata, override=False)
return flavors_metadata
def _filter_metadata(metadata, **kwargs): def _filter_metadata(metadata, **kwargs):
@ -295,282 +360,158 @@ def _filter_metadata(metadata, **kwargs):
return filtered_metadata return filtered_metadata
def get_package_metadatas_internal(session): def _load_metadata(force_reload=False):
"""Load metadata information into memory.
If force_reload, the metadata information will be reloaded
even if the metadata is already loaded.
"""
adapter_api.load_adapters_internal(force_reload=force_reload)
global OS_FIELDS
if force_reload or OS_FIELDS is None:
OS_FIELDS = _get_os_fields_from_configuration()
global PACKAGE_FIELDS
if force_reload or PACKAGE_FIELDS is None:
PACKAGE_FIELDS = _get_package_fields_from_configuration()
global FLAVOR_FIELDS
if force_reload or FLAVOR_FIELDS is None:
FLAVOR_FIELDS = _get_flavor_fields_from_configuration()
global OSES_METADATA
if force_reload or OSES_METADATA is None:
OSES_METADATA = _get_oses_metadata_from_configuration()
global PACKAGES_METADATA
if force_reload or PACKAGES_METADATA is None:
PACKAGES_METADATA = _get_packages_metadata_from_configuration()
global FLAVORS_METADATA
if force_reload or FLAVORS_METADATA is None:
FLAVORS_METADATA = _get_flavors_metadata_from_configuration()
global OSES_METADATA_UI_CONVERTERS
if force_reload or OSES_METADATA_UI_CONVERTERS is None:
OSES_METADATA_UI_CONVERTERS = (
_get_oses_metadata_ui_converters_from_configuration()
)
global FLAVORS_METADATA_UI_CONVERTERS
if force_reload or FLAVORS_METADATA_UI_CONVERTERS is None:
FLAVORS_METADATA_UI_CONVERTERS = (
_get_flavors_metadata_ui_converters_from_configuration()
)
def _get_oses_metadata_ui_converters_from_configuration():
"""Get os metadata ui converters from os metadata mapping config dir.
os metadata ui converter is used to convert os metadata to
the format UI can understand and show.
"""
oses_metadata_ui_converters = {}
configs = util.load_configs(setting.OS_MAPPING_DIR)
for config in configs:
os_name = config['OS']
oses_metadata_ui_converters[os_name] = config.get('CONFIG_MAPPING', {})
oses = adapter_api.OSES
parents = {}
for os_name, os in oses.items():
parent = os.get('parent', None)
parents[os_name] = parent
for os_name, os in oses.items():
oses_metadata_ui_converters[os_name] = util.recursive_merge_dict(
os_name, oses_metadata_ui_converters, parents
)
return oses_metadata_ui_converters
def _get_flavors_metadata_ui_converters_from_configuration():
"""Get flavor metadata ui converters from flavor mapping config dir."""
flavors_metadata_ui_converters = {}
configs = util.load_configs(setting.FLAVOR_MAPPING_DIR)
for config in configs:
adapter_name = config['ADAPTER']
flavor_name = config['FLAVOR']
flavors_metadata_ui_converters.setdefault(
adapter_name, {}
)[flavor_name] = config.get('CONFIG_MAPPING', {})
adapters = adapter_api.ADAPTERS
parents = {}
for adapter_name, adapter in adapters.items():
parent = adapter.get('parent', None)
parents[adapter_name] = parent
for adapter_name, adapter in adapters.items():
flavors_metadata_ui_converters[adapter_name] = (
util.recursive_merge_dict(
adapter_name, flavors_metadata_ui_converters, parents
)
)
return flavors_metadata_ui_converters
def get_packages_metadata_internal(force_reload=False):
"""Get deployable package metadata."""
_load_metadata(force_reload=force_reload)
metadata_mapping = {} metadata_mapping = {}
adapters = utils.list_db_objects( adapters = adapter_api.ADAPTERS
session, models.Adapter for adapter_name, adapter in adapters.items():
) if adapter.get('deployable'):
for adapter in adapters: metadata_mapping[adapter_name] = _filter_metadata(
if adapter.deployable: PACKAGES_METADATA.get(adapter_name, {})
metadata_dict = adapter.metadata_dict()
metadata_mapping[adapter.id] = _filter_metadata(
metadata_dict, session=session
) )
else: else:
logging.info( logging.info(
'ignore metadata since its adapter %s is not deployable', 'ignore metadata since its adapter %s is not deployable',
adapter.id adapter_name
) )
return metadata_mapping return metadata_mapping
def get_flavor_metadatas_internal(session): def get_flavors_metadata_internal(force_reload=False):
"""Get deployable flavor metadata."""
_load_metadata(force_reload=force_reload)
metadata_mapping = {} metadata_mapping = {}
flavors = utils.list_db_objects( adapters_flavors = adapter_api.ADAPTERS_FLAVORS
session, models.AdapterFlavor for adapter_name, adapter_flavors in adapters_flavors.items():
) adapter = adapter_api.ADAPTERS[adapter_name]
for flavor in flavors: if not adapter.get('deployable'):
flavor_metadata_dict = flavor.metadata_dict() logging.info(
metadata_mapping[flavor.id] = _filter_metadata( 'ignore metadata since its adapter %s is not deployable',
flavor_metadata_dict, session=session adapter_name
) )
adapters = utils.list_db_objects( continue
session, models.Adapter, id=flavor.adapter_id for flavor_name, flavor in adapter_flavors.items():
) flavor_metadata = FLAVORS_METADATA.get(
for adapter in adapters: adapter_name, {}
package_metadata_dict = adapter.metadata_dict() ).get(flavor_name, {})
metadata_mapping[flavor.id].update(_filter_metadata( metadata = _filter_metadata(flavor_metadata)
package_metadata_dict, session=session metadata_mapping.setdefault(
)) adapter_name, {}
)[flavor_name] = metadata
return metadata_mapping return metadata_mapping
def get_os_metadatas_internal(session): def get_flavors_metadata_ui_converters_internal(force_reload=False):
"""Get usable flavor metadata ui converters."""
_load_metadata(force_reload=force_reload)
return FLAVORS_METADATA_UI_CONVERTERS
def get_oses_metadata_internal(force_reload=False):
"""Get deployable os metadata."""
_load_metadata(force_reload=force_reload)
metadata_mapping = {} metadata_mapping = {}
oses = utils.list_db_objects( oses = adapter_api.OSES
session, models.OperatingSystem for os_name, os in oses.items():
) if os.get('deployable'):
for os in oses: metadata_mapping[os_name] = _filter_metadata(
if os.deployable: OSES_METADATA.get(os_name, {})
metadata_dict = os.metadata_dict()
metadata_mapping[os.id] = _filter_metadata(
metadata_dict, session=session
) )
else: else:
logging.info( logging.info(
'ignore metadata since its os %s is not deployable', 'ignore metadata since its os %s is not deployable',
os.id os_name
) )
return metadata_mapping return metadata_mapping
def _validate_self( def get_oses_metadata_ui_converters_internal(force_reload=False):
config_path, config_key, config, """Get usable os metadata ui converters."""
metadata, whole_check, _load_metadata(force_reload=force_reload)
**kwargs return OSES_METADATA_UI_CONVERTERS
):
logging.debug('validate config self %s', config_path)
if '_self' not in metadata:
if isinstance(config, dict):
_validate_config(
config_path, config, metadata, whole_check, **kwargs
)
return
field_type = metadata['_self'].get('field_type', basestring)
if not isinstance(config, field_type):
raise exception.InvalidParameter(
'%s config type is not %s' % (config_path, field_type)
)
is_required = metadata['_self'].get(
'is_required', False
)
required_in_whole_config = metadata['_self'].get(
'required_in_whole_config', False
)
if isinstance(config, basestring):
if config == '' and not is_required and not required_in_whole_config:
# ignore empty config when it is optional
return
required_in_options = metadata['_self'].get(
'required_in_options', False
)
options = metadata['_self'].get('options', None)
if required_in_options:
if field_type in [int, basestring, float, bool]:
if options and config not in options:
raise exception.InvalidParameter(
'%s config is not in %s' % (config_path, options)
)
elif field_type in [list, tuple]:
if options and not set(config).issubset(set(options)):
raise exception.InvalidParameter(
'%s config is not in %s' % (config_path, options)
)
elif field_type == dict:
if options and not set(config.keys()).issubset(set(options)):
raise exception.InvalidParameter(
'%s config is not in %s' % (config_path, options)
)
validator = metadata['_self'].get('validator', None)
logging.debug('validate by validator %s', validator)
if validator:
if not validator(config_key, config, **kwargs):
raise exception.InvalidParameter(
'%s config is invalid' % config_path
)
if isinstance(config, dict):
_validate_config(
config_path, config, metadata, whole_check, **kwargs
)
def _validate_config(
config_path, config, metadata, whole_check,
**kwargs
):
logging.debug('validate config %s', config_path)
generals = {}
specified = {}
for key, value in metadata.items():
if key.startswith('$'):
generals[key] = value
elif key.startswith('_'):
pass
else:
specified[key] = value
config_keys = set(config.keys())
specified_keys = set(specified.keys())
intersect_keys = config_keys & specified_keys
not_found_keys = config_keys - specified_keys
redundant_keys = specified_keys - config_keys
for key in redundant_keys:
if '_self' not in specified[key]:
continue
if specified[key]['_self'].get('is_required', False):
raise exception.InvalidParameter(
'%s/%s does not find but it is required' % (
config_path, key
)
)
if (
whole_check and
specified[key]['_self'].get(
'required_in_whole_config', False
)
):
raise exception.InvalidParameter(
'%s/%s does not find but it is required in whole config' % (
config_path, key
)
)
for key in intersect_keys:
_validate_self(
'%s/%s' % (config_path, key),
key, config[key], specified[key], whole_check,
**kwargs
)
for key in not_found_keys:
if not generals:
raise exception.InvalidParameter(
'key %s missing in metadata %s' % (
key, config_path
)
)
for general_key, general_value in generals.items():
_validate_self(
'%s/%s' % (config_path, key),
key, config[key], general_value, whole_check,
**kwargs
)
def _autofill_self_config(
config_path, config_key, config,
metadata,
**kwargs
):
if '_self' not in metadata:
if isinstance(config, dict):
_autofill_config(
config_path, config, metadata, **kwargs
)
return config
logging.debug(
'autofill %s by metadata %s', config_path, metadata['_self']
)
autofill_callback = metadata['_self'].get(
'autofill_callback', None
)
autofill_callback_params = metadata['_self'].get(
'autofill_callback_params', {}
)
callback_params = dict(kwargs)
if autofill_callback_params:
callback_params.update(autofill_callback_params)
default_value = metadata['_self'].get(
'default_value', None
)
if default_value is not None:
callback_params['default_value'] = default_value
options = metadata['_self'].get(
'options', None
)
if options is not None:
callback_params['options'] = options
if autofill_callback:
config = autofill_callback(
config_key, config, **callback_params
)
if config is None:
new_config = {}
else:
new_config = config
if isinstance(new_config, dict):
_autofill_config(
config_path, new_config, metadata, **kwargs
)
if new_config:
config = new_config
return config
def _autofill_config(
config_path, config, metadata, **kwargs
):
generals = {}
specified = {}
for key, value in metadata.items():
if key.startswith('$'):
generals[key] = value
elif key.startswith('_'):
pass
else:
specified[key] = value
config_keys = set(config.keys())
specified_keys = set(specified.keys())
intersect_keys = config_keys & specified_keys
not_found_keys = config_keys - specified_keys
redundant_keys = specified_keys - config_keys
for key in redundant_keys:
self_config = _autofill_self_config(
'%s/%s' % (config_path, key),
key, None, specified[key], **kwargs
)
if self_config is not None:
config[key] = self_config
for key in intersect_keys:
config[key] = _autofill_self_config(
'%s/%s' % (config_path, key),
key, config[key], specified[key],
**kwargs
)
for key in not_found_keys:
for general_key, general_value in generals.items():
config[key] = _autofill_self_config(
'%s/%s' % (config_path, key),
key, config[key], general_value,
**kwargs
)
return config
def validate_config_internal(
config, metadata, whole_check, **kwargs
):
_validate_config('', config, metadata, whole_check, **kwargs)
def autofill_config_internal(
config, metadata, **kwargs
):
return _autofill_config('', config, metadata, **kwargs)

View File

@ -15,6 +15,8 @@
"""Metadata related object holder.""" """Metadata related object holder."""
import logging import logging
from compass.db.api import adapter as adapter_api
from compass.db.api import adapter_holder as adapter_holder_api
from compass.db.api import database from compass.db.api import database
from compass.db.api import metadata as metadata_api from compass.db.api import metadata as metadata_api
from compass.db.api import permission from compass.db.api import permission
@ -27,94 +29,172 @@ from compass.utils import util
RESP_METADATA_FIELDS = [ RESP_METADATA_FIELDS = [
'os_config', 'package_config', 'flavor_config' 'os_config', 'package_config'
] ]
RESP_FLAVORS_FIELDS = [ RESP_UI_METADATA_FIELDS = [
'id', 'name', 'display_name', 'template', 'roles' 'os_global_config', 'flavor_config'
] ]
@database.run_in_session() def load_metadatas(force_reload=False):
def load_metadatas(session): """Load metadatas."""
load_os_metadatas_internal(session) # TODO(xicheng): today we load metadata in memory as it original
load_package_metadatas_internal(session) # format in files in metadata.py. We get these inmemory metadata
load_flavor_metadatas_internal(session) # and do some translation, store the translated metadata into memory
# too in metadata_holder.py. api can only access the global inmemory
# data in metadata_holder.py.
_load_os_metadatas(force_reload=force_reload)
_load_package_metadatas(force_reload=force_reload)
_load_flavor_metadatas(force_reload=force_reload)
_load_os_metadata_ui_converters(force_reload=force_reload)
_load_flavor_metadata_ui_converters(force_reload=force_reload)
def load_os_metadatas_internal(session): def _load_os_metadata_ui_converters(force_reload=False):
global OS_METADATA_MAPPING global OS_METADATA_UI_CONVERTERS
logging.info('load os metadatas into memory') if force_reload or OS_METADATA_UI_CONVERTERS is None:
OS_METADATA_MAPPING = metadata_api.get_os_metadatas_internal(session) logging.info('load os metadatas ui converters into memory')
OS_METADATA_UI_CONVERTERS = (
metadata_api.get_oses_metadata_ui_converters_internal(
def load_package_metadatas_internal(session): force_reload=force_reload
global PACKAGE_METADATA_MAPPING )
logging.info('load package metadatas into memory')
PACKAGE_METADATA_MAPPING = (
metadata_api.get_package_metadatas_internal(session)
)
def load_flavor_metadatas_internal(session):
global FLAVOR_METADATA_MAPPING
logging.info('load flavor metadatas into memory')
FLAVOR_METADATA_MAPPING = (
metadata_api.get_flavor_metadatas_internal(session)
)
OS_METADATA_MAPPING = {}
PACKAGE_METADATA_MAPPING = {}
FLAVOR_METADATA_MAPPING = {}
def _validate_config(
config, id, id_name, metadata_mapping, whole_check, **kwargs
):
if id not in metadata_mapping:
raise exception.InvalidParameter(
'%s id %s is not found in metadata mapping' % (id_name, id)
) )
metadatas = metadata_mapping[id]
metadata_api.validate_config_internal(
config, metadatas, whole_check, **kwargs def _load_os_metadatas(force_reload=False):
) """Load os metadata from inmemory db and map it by os_id."""
global OS_METADATA_MAPPING
if force_reload or OS_METADATA_MAPPING is None:
logging.info('load os metadatas into memory')
OS_METADATA_MAPPING = metadata_api.get_oses_metadata_internal(
force_reload=force_reload
)
def _load_flavor_metadata_ui_converters(force_reload=False):
"""Load flavor metadata ui converters from inmemory db.
The loaded metadata is mapped by flavor id.
"""
global FLAVOR_METADATA_UI_CONVERTERS
if force_reload or FLAVOR_METADATA_UI_CONVERTERS is None:
logging.info('load flavor metadata ui converters into memory')
FLAVOR_METADATA_UI_CONVERTERS = {}
adapters_flavors_metadata_ui_converters = (
metadata_api.get_flavors_metadata_ui_converters_internal(
force_reload=force_reload
)
)
for adapter_name, adapter_flavors_metadata_ui_converters in (
adapters_flavors_metadata_ui_converters.items()
):
for flavor_name, flavor_metadata_ui_converter in (
adapter_flavors_metadata_ui_converters.items()
):
FLAVOR_METADATA_UI_CONVERTERS[
'%s:%s' % (adapter_name, flavor_name)
] = flavor_metadata_ui_converter
@util.deprecated
def _load_package_metadatas(force_reload=False):
"""Load deployable package metadata from inmemory db."""
global PACKAGE_METADATA_MAPPING
if force_reload or PACKAGE_METADATA_MAPPING is None:
logging.info('load package metadatas into memory')
PACKAGE_METADATA_MAPPING = (
metadata_api.get_packages_metadata_internal(
force_reload=force_reload
)
)
def _load_flavor_metadatas(force_reload=False):
"""Load flavor metadata from inmemory db.
The loaded metadata are mapped by flavor id.
"""
global FLAVOR_METADATA_MAPPING
if force_reload or FLAVOR_METADATA_MAPPING is None:
logging.info('load flavor metadatas into memory')
FLAVOR_METADATA_MAPPING = {}
adapters_flavors_metadata = (
metadata_api.get_flavors_metadata_internal(
force_reload=force_reload
)
)
for adapter_name, adapter_flavors_metadata in (
adapters_flavors_metadata.items()
):
for flavor_name, flavor_metadata in (
adapter_flavors_metadata.items()
):
FLAVOR_METADATA_MAPPING[
'%s:%s' % (adapter_name, flavor_name)
] = flavor_metadata
OS_METADATA_MAPPING = None
PACKAGE_METADATA_MAPPING = None
FLAVOR_METADATA_MAPPING = None
OS_METADATA_UI_CONVERTERS = None
FLAVOR_METADATA_UI_CONVERTERS = None
def validate_os_config( def validate_os_config(
session, config, os_id, whole_check=False, **kwargs config, os_id, whole_check=False, **kwargs
): ):
if not OS_METADATA_MAPPING: """Validate os config."""
load_os_metadatas_internal(session) load_metadatas()
if os_id not in OS_METADATA_MAPPING:
raise exception.InvalidParameter(
'os %s is not found in os metadata mapping' % os_id
)
_validate_config( _validate_config(
config, os_id, 'os', OS_METADATA_MAPPING, '', config, OS_METADATA_MAPPING[os_id],
whole_check, session=session, **kwargs whole_check, **kwargs
) )
@util.deprecated
def validate_package_config( def validate_package_config(
session, config, adapter_id, whole_check=False, **kwargs config, adapter_id, whole_check=False, **kwargs
): ):
if not PACKAGE_METADATA_MAPPING: """Validate package config."""
load_package_metadatas_internal(session) load_metadatas()
if adapter_id not in PACKAGE_METADATA_MAPPING:
raise exception.InvalidParameter(
'adapter %s is not found in package metedata mapping' % adapter_id
)
_validate_config( _validate_config(
config, adapter_id, 'adapter', PACKAGE_METADATA_MAPPING, '', config, PACKAGE_METADATA_MAPPING[adapter_id],
whole_check, session=session, **kwargs whole_check, **kwargs
) )
def validate_flavor_config( def validate_flavor_config(
session, config, flavor_id, whole_check=False, **kwargs config, flavor_id, whole_check=False, **kwargs
): ):
if not FLAVOR_METADATA_MAPPING: """Validate flavor config."""
load_flavor_metadatas_internal(session) load_metadatas()
if flavor_id not in FLAVOR_METADATA_MAPPING:
raise exception.InvalidParameter(
'flavor %s is not found in flavor metedata mapping' % flavor_id
)
_validate_config( _validate_config(
config, flavor_id, 'flavor', FLAVOR_METADATA_MAPPING, '', config, FLAVOR_METADATA_MAPPING[flavor_id],
whole_check, session=session, **kwargs whole_check, **kwargs
) )
def _filter_metadata(metadata, **kwargs): def _filter_metadata(metadata, **kwargs):
"""Filter metadata before return it to api.
Some metadata fields are not json compatible or
only used in db/api internally.
We should strip these fields out before return to api.
"""
if not isinstance(metadata, dict): if not isinstance(metadata, dict):
return metadata return metadata
filtered_metadata = {} filtered_metadata = {}
@ -141,132 +221,162 @@ def _filter_metadata(metadata, **kwargs):
return filtered_metadata return filtered_metadata
def get_package_metadata_internal(session, adapter_id): @util.deprecated
"""get package metadata internal.""" def _get_package_metadata(adapter_id):
if not PACKAGE_METADATA_MAPPING: """get package metadata."""
load_package_metadatas_internal(session) load_metadatas()
if adapter_id not in PACKAGE_METADATA_MAPPING: if adapter_id not in PACKAGE_METADATA_MAPPING:
raise exception.RecordNotExists( raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id 'adpater %s does not exist' % adapter_id
) )
return _filter_metadata( return _filter_metadata(
PACKAGE_METADATA_MAPPING[adapter_id], session=session PACKAGE_METADATA_MAPPING[adapter_id]
) )
@util.deprecated
@utils.supported_filters([]) @utils.supported_filters([])
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_LIST_METADATAS permission.PERMISSION_LIST_METADATAS
) )
@utils.wrap_to_dict(RESP_METADATA_FIELDS) @utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_package_metadata(adapter_id, user=None, session=None, **kwargs): def get_package_metadata(adapter_id, user=None, session=None, **kwargs):
"""Get package metadata from adapter."""
return { return {
'package_config': get_package_metadata_internal(session, adapter_id) 'package_config': _get_package_metadata(adapter_id)
} }
def get_flavor_metadata_internal(session, flavor_id): def _get_flavor_metadata(flavor_id):
"""get flavor metadata internal.""" """get flavor metadata."""
if not FLAVOR_METADATA_MAPPING: load_metadatas()
load_flavor_metadatas_internal(session)
if flavor_id not in FLAVOR_METADATA_MAPPING: if flavor_id not in FLAVOR_METADATA_MAPPING:
raise exception.RecordNotExists( raise exception.RecordNotExists(
'flavor %s does not exist' % flavor_id 'flavor %s does not exist' % flavor_id
) )
return _filter_metadata( return _filter_metadata(FLAVOR_METADATA_MAPPING[flavor_id])
FLAVOR_METADATA_MAPPING[flavor_id], session=session
)
@utils.supported_filters([]) @utils.supported_filters([])
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_LIST_METADATAS permission.PERMISSION_LIST_METADATAS
) )
@utils.wrap_to_dict(RESP_METADATA_FIELDS) @utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_flavor_metadata(flavor_id, user=None, session=None, **kwargs): def get_flavor_metadata(flavor_id, user=None, session=None, **kwargs):
"""Get flavor metadata by flavor."""
return { return {
'flavor_config': get_flavor_metadata_internal(session, flavor_id) 'package_config': _get_flavor_metadata(flavor_id)
} }
@utils.supported_filters([]) def _get_os_metadata(os_id):
@database.run_in_session() """get os metadata."""
@user_api.check_user_permission_in_session( load_metadatas()
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
def list_flavors(user=None, session=None, **filters):
"""List flavors."""
return utils.list_db_objects(
session, models.AdapterFlavor, **filters
)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission_in_session(
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
def get_flavor(flavor_id, user=None, session=None, **kwargs):
"""Get flavor."""
return utils.get_db_object(
session, models.AdapterFlavor, id=flavor_id
)
def get_os_metadata_internal(session, os_id):
"""get os metadata internal."""
if not OS_METADATA_MAPPING:
load_os_metadatas_internal(session)
if os_id not in OS_METADATA_MAPPING: if os_id not in OS_METADATA_MAPPING:
raise exception.RecordNotExists( raise exception.RecordNotExists(
'os %s does not exist' % os_id 'os %s does not exist' % os_id
) )
return _filter_metadata( return _filter_metadata(OS_METADATA_MAPPING[os_id])
OS_METADATA_MAPPING[os_id], session=session
)
def _get_os_metadata_ui_converter(os_id):
"""get os metadata ui converter."""
load_metadatas()
if os_id not in OS_METADATA_UI_CONVERTERS:
raise exception.RecordNotExists(
'os %s does not exist' % os_id
)
return OS_METADATA_UI_CONVERTERS[os_id]
def _get_flavor_metadata_ui_converter(flavor_id):
"""get flavor metadata ui converter."""
load_metadatas()
if flavor_id not in FLAVOR_METADATA_UI_CONVERTERS:
raise exception.RecordNotExists(
'flavor %s does not exist' % flavor_id
)
return FLAVOR_METADATA_UI_CONVERTERS[flavor_id]
@utils.supported_filters([]) @utils.supported_filters([])
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_LIST_METADATAS permission.PERMISSION_LIST_METADATAS
) )
@utils.wrap_to_dict(RESP_METADATA_FIELDS) @utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_os_metadata(os_id, user=None, session=None, **kwargs): def get_os_metadata(os_id, user=None, session=None, **kwargs):
"""get os metadatas.""" """get os metadatas."""
return {'os_config': get_os_metadata_internal(session, os_id)} return {'os_config': _get_os_metadata(os_id)}
def get_ui_metadata(metadata, config): @utils.supported_filters([])
"""convert os_metadata to ui os_metadata.""" @database.run_in_session()
result_config = {} @user_api.check_user_permission(
result_config[config['mapped_name']] = [] permission.PERMISSION_LIST_METADATAS
for mapped_child in config['mapped_children']: )
@utils.wrap_to_dict(RESP_UI_METADATA_FIELDS)
def get_os_ui_metadata(os_id, user=None, session=None, **kwargs):
"""Get os metadata ui converter by os."""
metadata = _get_os_metadata(os_id)
metadata_ui_converter = _get_os_metadata_ui_converter(os_id)
return _get_ui_metadata(metadata, metadata_ui_converter)
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_UI_METADATA_FIELDS)
def get_flavor_ui_metadata(flavor_id, user=None, session=None, **kwargs):
"""Get flavor ui metadata by flavor."""
metadata = _get_flavor_metadata(flavor_id)
metadata_ui_converter = _get_flavor_metadata_ui_converter(flavor_id)
return _get_ui_metadata(metadata, metadata_ui_converter)
def _get_ui_metadata(metadata, metadata_ui_converter):
"""convert metadata to ui metadata.
Args:
metadata: metadata we defined in metadata files.
metadata_ui_converter: metadata ui converter defined in metadata
mapping files. Used to convert orignal
metadata to ui understandable metadata.
Returns:
ui understandable metadata.
"""
ui_metadata = {}
ui_metadata[metadata_ui_converter['mapped_name']] = []
for mapped_child in metadata_ui_converter['mapped_children']:
data_dict = {} data_dict = {}
for config_key, config_value in mapped_child.items(): for ui_key, ui_value in mapped_child.items():
for key, value in config_value.items(): for key, value in ui_value.items():
if 'data' == key: if 'data' == key:
result_data = [] result_data = []
_get_data(metadata[config_key], value, result_data) _get_ui_metadata_data(
metadata[ui_key], value, result_data
)
data_dict['data'] = result_data data_dict['data'] = result_data
else: else:
data_dict[key] = value data_dict[key] = value
result_config[config['mapped_name']].append(data_dict) ui_metadata[metadata_ui_converter['mapped_name']].append(data_dict)
return result_config return ui_metadata
def _get_data(metadata, config, result_data): def _get_ui_metadata_data(metadata, config, result_data):
"""Get ui metadata data and fill to result."""
data_dict = {} data_dict = {}
for key, config_value in config.items(): for key, config_value in config.items():
if isinstance(config_value, dict) and key != 'content_data': if isinstance(config_value, dict) and key != 'content_data':
if key in metadata.keys(): if key in metadata.keys():
_get_data(metadata[key], config_value, result_data) _get_ui_metadata_data(metadata[key], config_value, result_data)
else: else:
_get_data(metadata, config_value, result_data) _get_ui_metadata_data(metadata, config_value, result_data)
elif isinstance(config_value, list): elif isinstance(config_value, list):
option_list = [] option_list = []
for item in config_value: for item in config_value:
@ -285,9 +395,10 @@ def _get_data(metadata, config, result_data):
return result_data return result_data
@util.deprecated
@utils.supported_filters([]) @utils.supported_filters([])
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_LIST_METADATAS permission.PERMISSION_LIST_METADATAS
) )
@utils.wrap_to_dict(RESP_METADATA_FIELDS) @utils.wrap_to_dict(RESP_METADATA_FIELDS)
@ -295,9 +406,11 @@ def get_package_os_metadata(
adapter_id, os_id, adapter_id, os_id,
user=None, session=None, **kwargs user=None, session=None, **kwargs
): ):
from compass.db.api import adapter_holder as adapter_api """Get metadata by adapter and os."""
adapter = adapter_api.get_adapter_internal(session, adapter_id) adapter = adapter_holder_api.get_adapter(
os_ids = [os['os_id'] for os in adapter['supported_oses']] adapter_id, user=user, session=session
)
os_ids = [os['id'] for os in adapter['supported_oses']]
if os_id not in os_ids: if os_id not in os_ids:
raise exception.InvalidParameter( raise exception.InvalidParameter(
'os %s is not in the supported os list of adapter %s' % ( 'os %s is not in the supported os list of adapter %s' % (
@ -305,48 +418,307 @@ def get_package_os_metadata(
) )
) )
metadatas = {} metadatas = {}
metadatas['os_config'] = get_os_metadata_internal( metadatas['os_config'] = _get_os_metadata(
session, os_id os_id
) )
metadatas['package_config'] = get_package_metadata_internal( metadatas['package_config'] = _get_package_metadata(
session, adapter_id adapter_id
) )
return metadatas return metadatas
def _autofill_config( @utils.supported_filters([])
config, id, id_name, metadata_mapping, **kwargs @database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_METADATA_FIELDS)
def get_flavor_os_metadata(
flavor_id, os_id,
user=None, session=None, **kwargs
): ):
if id not in metadata_mapping: """Get metadata by flavor and os."""
flavor = adapter_holder_api.get_flavor(
flavor_id, user=user, session=session
)
adapter_id = flavor['adapter_id']
adapter = adapter_holder_api.get_adapter(
adapter_id, user=user, session=session
)
os_ids = [os['id'] for os in adapter['supported_oses']]
if os_id not in os_ids:
raise exception.InvalidParameter( raise exception.InvalidParameter(
'%s id %s is not found in metadata mapping' % (id_name, id) 'os %s is not in the supported os list of adapter %s' % (
os_id, adapter_id
)
) )
metadatas = metadata_mapping[id] metadatas = {}
metadatas['os_config'] = _get_os_metadata(
session, os_id
)
metadatas['package_config'] = _get_flavor_metadata(
session, flavor_id
)
return metadatas
def _validate_self(
config_path, config_key, config,
metadata, whole_check,
**kwargs
):
"""validate config by metadata self section."""
logging.debug('validate config self %s', config_path)
if '_self' not in metadata:
if isinstance(config, dict):
_validate_config(
config_path, config, metadata, whole_check, **kwargs
)
return
field_type = metadata['_self'].get('field_type', basestring)
if not isinstance(config, field_type):
raise exception.InvalidParameter(
'%s config type is not %s: %s' % (config_path, field_type, config)
)
is_required = metadata['_self'].get(
'is_required', False
)
required_in_whole_config = metadata['_self'].get(
'required_in_whole_config', False
)
if isinstance(config, basestring):
if config == '' and not is_required and not required_in_whole_config:
# ignore empty config when it is optional
return
required_in_options = metadata['_self'].get(
'required_in_options', False
)
options = metadata['_self'].get('options', None)
if required_in_options:
if field_type in [int, basestring, float, bool]:
if options and config not in options:
raise exception.InvalidParameter(
'%s config is not in %s: %s' % (
config_path, options, config
)
)
elif field_type in [list, tuple]:
if options and not set(config).issubset(set(options)):
raise exception.InvalidParameter(
'%s config is not in %s: %s' % (
config_path, options, config
)
)
elif field_type == dict:
if options and not set(config.keys()).issubset(set(options)):
raise exception.InvalidParameter(
'%s config is not in %s: %s' % (
config_path, options, config
)
)
validator = metadata['_self'].get('validator', None)
logging.debug('validate by validator %s', validator)
if validator:
if not validator(config_key, config, **kwargs):
raise exception.InvalidParameter(
'%s config is invalid' % config_path
)
if isinstance(config, dict):
_validate_config(
config_path, config, metadata, whole_check, **kwargs
)
def _validate_config(
config_path, config, metadata, whole_check,
**kwargs
):
"""validate config by metadata."""
logging.debug('validate config %s', config_path)
generals = {}
specified = {}
for key, value in metadata.items():
if key.startswith('$'):
generals[key] = value
elif key.startswith('_'):
pass
else:
specified[key] = value
config_keys = set(config.keys())
specified_keys = set(specified.keys())
intersect_keys = config_keys & specified_keys
not_found_keys = config_keys - specified_keys
redundant_keys = specified_keys - config_keys
for key in redundant_keys:
if '_self' not in specified[key]:
continue
if specified[key]['_self'].get('is_required', False):
raise exception.InvalidParameter(
'%s/%s does not find but it is required' % (
config_path, key
)
)
if (
whole_check and
specified[key]['_self'].get(
'required_in_whole_config', False
)
):
raise exception.InvalidParameter(
'%s/%s does not find but it is required in whole config' % (
config_path, key
)
)
for key in intersect_keys:
_validate_self(
'%s/%s' % (config_path, key),
key, config[key], specified[key], whole_check,
**kwargs
)
for key in not_found_keys:
if not generals:
raise exception.InvalidParameter(
'key %s missing in metadata %s' % (
key, config_path
)
)
for general_key, general_value in generals.items():
_validate_self(
'%s/%s' % (config_path, key),
key, config[key], general_value, whole_check,
**kwargs
)
def _autofill_self_config(
config_path, config_key, config,
metadata,
**kwargs
):
"""Autofill config by metadata self section."""
if '_self' not in metadata:
if isinstance(config, dict):
_autofill_config(
config_path, config, metadata, **kwargs
)
return config
logging.debug( logging.debug(
'auto fill %s config %s by params %s', 'autofill %s by metadata %s', config_path, metadata['_self']
id_name, config, kwargs
) )
return metadata_api.autofill_config_internal( autofill_callback = metadata['_self'].get(
config, metadatas, **kwargs 'autofill_callback', None
) )
autofill_callback_params = metadata['_self'].get(
'autofill_callback_params', {}
)
callback_params = dict(kwargs)
if autofill_callback_params:
callback_params.update(autofill_callback_params)
default_value = metadata['_self'].get(
'default_value', None
)
if default_value is not None:
callback_params['default_value'] = default_value
options = metadata['_self'].get(
'options', None
)
if options is not None:
callback_params['options'] = options
if autofill_callback:
config = autofill_callback(
config_key, config, **callback_params
)
if config is None:
new_config = {}
else:
new_config = config
if isinstance(new_config, dict):
_autofill_config(
config_path, new_config, metadata, **kwargs
)
if new_config:
config = new_config
return config
def _autofill_config(
config_path, config, metadata, **kwargs
):
"""autofill config by metadata."""
generals = {}
specified = {}
for key, value in metadata.items():
if key.startswith('$'):
generals[key] = value
elif key.startswith('_'):
pass
else:
specified[key] = value
config_keys = set(config.keys())
specified_keys = set(specified.keys())
intersect_keys = config_keys & specified_keys
not_found_keys = config_keys - specified_keys
redundant_keys = specified_keys - config_keys
for key in redundant_keys:
self_config = _autofill_self_config(
'%s/%s' % (config_path, key),
key, None, specified[key], **kwargs
)
if self_config is not None:
config[key] = self_config
for key in intersect_keys:
config[key] = _autofill_self_config(
'%s/%s' % (config_path, key),
key, config[key], specified[key],
**kwargs
)
for key in not_found_keys:
for general_key, general_value in generals.items():
config[key] = _autofill_self_config(
'%s/%s' % (config_path, key),
key, config[key], general_value,
**kwargs
)
return config
def autofill_os_config( def autofill_os_config(
session, config, os_id, **kwargs config, os_id, **kwargs
): ):
if not OS_METADATA_MAPPING: load_metadatas()
load_os_metadatas_internal(session) if os_id not in OS_METADATA_MAPPING:
raise exception.InvalidParameter(
'os %s is not found in os metadata mapping' % os_id
)
return _autofill_config( return _autofill_config(
config, os_id, 'os', OS_METADATA_MAPPING, session=session, **kwargs '', config, OS_METADATA_MAPPING[os_id], **kwargs
) )
def autofill_package_config( def autofill_package_config(
session, config, adapter_id, **kwargs config, adapter_id, **kwargs
): ):
if not PACKAGE_METADATA_MAPPING: load_metadatas()
load_package_metadatas_internal(session) if adapter_id not in PACKAGE_METADATA_MAPPING:
raise exception.InvalidParameter(
'adapter %s is not found in package metadata mapping' % adapter_id
)
return _autofill_config( return _autofill_config(
config, adapter_id, 'adapter', PACKAGE_METADATA_MAPPING, '', config, PACKAGE_METADATA_MAPPING[adapter_id], **kwargs
session=session, **kwargs )
def autofill_flavor_config(
config, flavor_id, **kwargs
):
load_metadatas()
if flavor_id not in FLAVOR_METADATA_MAPPING:
raise exception.InvalidParameter(
'flavor %s is not found in flavor metadata mapping' % flavor_id
)
return _autofill_config(
'', config, FLAVOR_METADATA_MAPPING[flavor_id], **kwargs
) )

View File

@ -15,6 +15,7 @@
"""Network related database operations.""" """Network related database operations."""
import logging import logging
import netaddr import netaddr
import re
from compass.db.api import database from compass.db.api import database
from compass.db.api import permission from compass.db.api import permission
@ -37,6 +38,7 @@ UPDATED_FIELDS = ['subnet', 'name']
def _check_subnet(subnet): def _check_subnet(subnet):
"""Check subnet format is correct."""
try: try:
netaddr.IPNetwork(subnet) netaddr.IPNetwork(subnet)
except Exception as error: except Exception as error:
@ -47,7 +49,7 @@ def _check_subnet(subnet):
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS) @utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_LIST_SUBNETS permission.PERMISSION_LIST_SUBNETS
) )
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
@ -58,9 +60,21 @@ def list_subnets(user=None, session=None, **filters):
) )
def _get_subnet(subnet_id, session=None, **kwargs):
"""Get subnet by subnet id."""
if isinstance(subnet_id, (int, long)):
return utils.get_db_object(
session, models.Subnet,
id=subnet_id, **kwargs
)
raise exception.InvalidParameter(
'subnet id %s type is not int compatible' % subnet_id
)
@utils.supported_filters([]) @utils.supported_filters([])
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_LIST_SUBNETS permission.PERMISSION_LIST_SUBNETS
) )
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
@ -69,9 +83,9 @@ def get_subnet(
user=None, session=None, **kwargs user=None, session=None, **kwargs
): ):
"""Get subnet info.""" """Get subnet info."""
return utils.get_db_object( return _get_subnet(
session, models.Subnet, subnet_id, session=session,
exception_when_missing, id=subnet_id exception_when_missing=exception_when_missing
) )
@ -81,7 +95,7 @@ def get_subnet(
) )
@utils.input_validates(subnet=_check_subnet) @utils.input_validates(subnet=_check_subnet)
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_ADD_SUBNET permission.PERMISSION_ADD_SUBNET
) )
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
@ -102,29 +116,20 @@ def add_subnet(
) )
@utils.input_validates(subnet=_check_subnet) @utils.input_validates(subnet=_check_subnet)
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session( @user_api.check_user_permission(
permission.PERMISSION_ADD_SUBNET permission.PERMISSION_ADD_SUBNET
) )
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def update_subnet(subnet_id, user=None, session=None, **kwargs): def update_subnet(subnet_id, user=None, session=None, **kwargs):
"""Update a subnet.""" """Update a subnet."""
subnet = utils.get_db_object( subnet = _get_subnet(
session, models.Subnet, id=subnet_id subnet_id, session=session
) )
return utils.update_db_object(session, subnet, **kwargs) return utils.update_db_object(session, subnet, **kwargs)
@utils.supported_filters([]) def _check_subnet_deletable(subnet):
@database.run_in_session() """Check a subnet deletable."""
@user_api.check_user_permission_in_session(
permission.PERMISSION_DEL_SUBNET
)
@utils.wrap_to_dict(RESP_FIELDS)
def del_subnet(subnet_id, user=None, session=None, **kwargs):
"""Delete a subnet."""
subnet = utils.get_db_object(
session, models.Subnet, id=subnet_id
)
if subnet.host_networks: if subnet.host_networks:
host_networks = [ host_networks = [
'%s:%s=%s' % ( '%s:%s=%s' % (
@ -139,4 +144,17 @@ def del_subnet(subnet_id, user=None, session=None, **kwargs):
) )
) )
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_DEL_SUBNET
)
@utils.wrap_to_dict(RESP_FIELDS)
def del_subnet(subnet_id, user=None, session=None, **kwargs):
"""Delete a subnet."""
subnet = _get_subnet(
subnet_id, session=session
)
_check_subnet_deletable(subnet)
return utils.del_db_object(session, subnet) return utils.del_db_object(session, subnet)

View File

@ -13,14 +13,17 @@
# limitations under the License. # limitations under the License.
"""Permission database operations.""" """Permission database operations."""
import re
from compass.db.api import database from compass.db.api import database
from compass.db.api import user as user_api from compass.db.api import user as user_api
from compass.db.api import utils from compass.db.api import utils
from compass.db import exception from compass.db import exception
from compass.db import models from compass.db import models
from compass.utils import util
SUPPORTED_FIELDS = ['name', 'alias', 'description'] SUPPORTED_FIELDS = ['id', 'name', 'alias', 'description']
RESP_FIELDS = ['id', 'name', 'alias', 'description'] RESP_FIELDS = ['id', 'name', 'alias', 'description']
@ -291,6 +294,7 @@ PERMISSIONS = [
] ]
@util.deprecated
def list_permissions_internal(session, **filters): def list_permissions_internal(session, **filters):
"""internal functions used only by other db.api modules.""" """internal functions used only by other db.api modules."""
return utils.list_db_objects(session, models.Permission, **filters) return utils.list_db_objects(session, models.Permission, **filters)
@ -298,7 +302,7 @@ def list_permissions_internal(session, **filters):
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS) @utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session(PERMISSION_LIST_PERMISSIONS) @user_api.check_user_permission(PERMISSION_LIST_PERMISSIONS)
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def list_permissions(user=None, session=None, **filters): def list_permissions(user=None, session=None, **filters):
"""list permissions.""" """list permissions."""
@ -307,22 +311,36 @@ def list_permissions(user=None, session=None, **filters):
) )
def _get_permission(permission_id, session=None, **kwargs):
"""Get permission object by the unique key of Permission table."""
if isinstance(permission_id, (int, long)):
return utils.get_db_object(
session, models.Permission, id=permission_id, **kwargs)
raise exception.InvalidParameter(
'permission id %s type is not int compatible' % permission_id
)
def get_permission_internal(permission_id, session=None, **kwargs):
return _get_permission(permission_id, session=session, **kwargs)
@utils.supported_filters() @utils.supported_filters()
@database.run_in_session() @database.run_in_session()
@user_api.check_user_permission_in_session(PERMISSION_LIST_PERMISSIONS) @user_api.check_user_permission(PERMISSION_LIST_PERMISSIONS)
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def get_permission( def get_permission(
permission_id, exception_when_missing=True, permission_id, exception_when_missing=True,
user=None, session=None, **kwargs user=None, session=None, **kwargs
): ):
"""get permissions.""" """get permissions."""
return utils.get_db_object( return _get_permission(
session, models.Permission, permission_id, session=session,
exception_when_missing, id=permission_id exception_when_missing=exception_when_missing
) )
def add_permissions_internal(session): def add_permissions_internal(session=None):
"""internal functions used by other db.api modules only.""" """internal functions used by other db.api modules only."""
permissions = [] permissions = []
for permission in PERMISSIONS: for permission in PERMISSIONS:

File diff suppressed because it is too large Load Diff

View File

@ -16,6 +16,7 @@
import datetime import datetime
import functools import functools
import logging import logging
import re
from flask.ext.login import UserMixin from flask.ext.login import UserMixin
@ -53,36 +54,14 @@ PERMISSION_RESP_FIELDS = [
def _check_email(email): def _check_email(email):
"""Check email is email format."""
if '@' not in email: if '@' not in email:
raise exception.InvalidParameter( raise exception.InvalidParameter(
'there is no @ in email address %s.' % email 'there is no @ in email address %s.' % email
) )
def get_user_internal(session, exception_when_missing=True, **kwargs): def _check_user_permission(user, permission, session=None):
"""internal function used only by other db.api modules."""
return utils.get_db_object(
session, models.User, exception_when_missing, **kwargs
)
def add_user_internal(
session, exception_when_existing=True,
email=None, **kwargs
):
"""internal function used only by other db.api modules."""
user = utils.add_db_object(
session, models.User,
exception_when_existing, email,
**kwargs)
_add_user_permissions(
session, user,
name=setting.COMPASS_DEFAULT_PERMISSIONS
)
return user
def _check_user_permission(session, user, permission):
"""Check user has permission.""" """Check user has permission."""
if not user: if not user:
logging.info('empty user means the call is from internal') logging.info('empty user means the call is from internal')
@ -102,14 +81,19 @@ def _check_user_permission(session, user, permission):
) )
def check_user_permission_in_session(permission): def check_user_permission(permission):
"""Decorator to check user having permission."""
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
if 'user' in kwargs.keys() and 'session' in kwargs.keys(): user = kwargs.get('user')
session = kwargs['session'] if user is not None:
user = kwargs['user'] session = kwargs.get('session')
_check_user_permission(session, user, permission) if session is None:
raise exception.DatabaseException(
'wrapper check_user_permission does not run in session'
)
_check_user_permission(user, permission, session=session)
return func(*args, **kwargs) return func(*args, **kwargs)
else: else:
return func(*args, **kwargs) return func(*args, **kwargs)
@ -118,11 +102,12 @@ def check_user_permission_in_session(permission):
def check_user_admin(): def check_user_admin():
"""Decorator to check user is admin."""
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
if 'user' in kwargs.keys(): user = kwargs.get('user')
user = kwargs['user'] if user is not None:
if not user.is_admin: if not user.is_admin:
raise exception.Forbidden( raise exception.Forbidden(
'User %s is not admin.' % ( 'User %s is not admin.' % (
@ -137,48 +122,56 @@ def check_user_admin():
def check_user_admin_or_owner(): def check_user_admin_or_owner():
"""Decorator to check user is admin or the owner of the resource."""
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(user_id, *args, **kwargs): def wrapper(user_id, *args, **kwargs):
if 'user' in kwargs.keys(): user = kwargs.get('user')
user = kwargs['user'] if user is not None:
if not user.is_admin and user.id != user_id: session = kwargs.get('session')
if session is None:
raise exception.DatabaseException(
'wrapper check_user_admin_or_owner is '
'not called in session'
)
check_user = _get_user(user_id, session=session)
if not user.is_admin and user.id != check_user.id:
raise exception.Forbidden( raise exception.Forbidden(
'User %s is not admin or the owner of user id %s.' % ( 'User %s is not admin or the owner of user %s.' % (
user.email, user_id user.email, check_user.email
) )
) )
return func(user_id, *args, **kwargs)
return func(
user_id, *args, **kwargs
)
else: else:
return func(user_id, *args, **kwargs) return func(
user_id, *args, **kwargs
)
return wrapper return wrapper
return decorator return decorator
def check_user_permission_internal(session, user, permission): def _add_user_permissions(user, session=None, **permission_filters):
"""internal function only used by other db.api modules."""
_check_user_permission(session, user, permission)
def _add_user_permissions(session, user, **permission_filters):
"""add permissions to a user.""" """add permissions to a user."""
from compass.db.api import permission as permission_api from compass.db.api import permission as permission_api
for api_permission in permission_api.list_permissions_internal( for api_permission in permission_api.list_permissions(
session, **permission_filters session=session, **permission_filters
): ):
utils.add_db_object( utils.add_db_object(
session, models.UserPermission, False, session, models.UserPermission, False,
user.id, api_permission.id user.id, api_permission['id']
) )
def _remove_user_permissions(session, user, **permission_filters): def _remove_user_permissions(user, session=None, **permission_filters):
"""remove permissions to a user.""" """remove permissions from a user."""
from compass.db.api import permission as permission_api from compass.db.api import permission as permission_api
permission_ids = [ permission_ids = [
api_permission.id api_permission['id']
for api_permission in permission_api.list_permissions_internal( for api_permission in permission_api.list_permissions(
session, **permission_filters session=session, **permission_filters
) )
] ]
utils.del_db_objects( utils.del_db_objects(
@ -187,7 +180,7 @@ def _remove_user_permissions(session, user, **permission_filters):
) )
def _set_user_permissions(session, user, **permission_filters): def _set_user_permissions(user, session=None, **permission_filters):
"""set permissions to a user.""" """set permissions to a user."""
utils.del_db_objects( utils.del_db_objects(
session, models.UserPermission, session, models.UserPermission,
@ -197,6 +190,8 @@ def _set_user_permissions(session, user, **permission_filters):
class UserWrapper(UserMixin): class UserWrapper(UserMixin):
"""Wrapper class provided to flask."""
def __init__( def __init__(
self, id, email, crypted_password, self, id, email, crypted_password,
active=True, is_admin=False, active=True, is_admin=False,
@ -241,6 +236,7 @@ class UserWrapper(UserMixin):
@database.run_in_session() @database.run_in_session()
def get_user_object(email, session=None, **kwargs): def get_user_object(email, session=None, **kwargs):
"""get user and convert to UserWrapper object."""
user = utils.get_db_object( user = utils.get_db_object(
session, models.User, False, email=email session, models.User, False, email=email
) )
@ -253,8 +249,13 @@ def get_user_object(email, session=None, **kwargs):
return UserWrapper(**user_dict) return UserWrapper(**user_dict)
@database.run_in_session() @database.run_in_session(exception_when_in_session=False)
def get_user_object_from_token(token, session=None): def get_user_object_from_token(token, session=None):
"""Get user from token and convert to UserWrapper object.
::note:
get_user_object_from_token may be called in session.
"""
expire_timestamp = { expire_timestamp = {
'ge': datetime.datetime.now() 'ge': datetime.datetime.now()
} }
@ -266,8 +267,8 @@ def get_user_object_from_token(token, session=None):
raise exception.Unauthorized( raise exception.Unauthorized(
'invalid user token: %s' % token 'invalid user token: %s' % token
) )
user_dict = utils.get_db_object( user_dict = _get_user(
session, models.User, id=user_token.user_id user_token.user_id, session=session
).to_dict() ).to_dict()
user_dict['token'] = token user_dict['token'] = token
expire_timestamp = user_token.expire_timestamp expire_timestamp = user_token.expire_timestamp
@ -310,17 +311,29 @@ def clean_user_token(token, user=None, session=None):
) )
def _get_user(user_id, session=None, **kwargs):
"""Get user object by user id."""
if isinstance(user_id, (int, long)):
return utils.get_db_object(
session, models.User, id=user_id, **kwargs
)
raise exception.InvalidParameter(
'user id %s type is not int compatible' % user_id
)
@utils.supported_filters() @utils.supported_filters()
@check_user_admin_or_owner()
@database.run_in_session() @database.run_in_session()
@check_user_admin_or_owner()
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def get_user( def get_user(
user_id, exception_when_missing=True, user_id, exception_when_missing=True,
user=None, session=None, **kwargs user=None, session=None, **kwargs
): ):
"""get field dict of a user.""" """get a user."""
return utils.get_db_object( return _get_user(
session, models.User, exception_when_missing, id=user_id user_id, session=session,
exception_when_missing=exception_when_missing
) )
@ -331,20 +344,21 @@ def get_current_user(
exception_when_missing=True, user=None, exception_when_missing=True, user=None,
session=None, **kwargs session=None, **kwargs
): ):
"""get field dict of a user.""" """get current user."""
return utils.get_db_object( return _get_user(
session, models.User, exception_when_missing, id=user.id user.id, session=session,
exception_when_missing=exception_when_missing
) )
@utils.supported_filters( @utils.supported_filters(
optional_support_keys=SUPPORTED_FIELDS optional_support_keys=SUPPORTED_FIELDS
) )
@check_user_admin()
@database.run_in_session() @database.run_in_session()
@check_user_admin()
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def list_users(user=None, session=None, **filters): def list_users(user=None, session=None, **filters):
"""List fields of all users by some fields.""" """List all users."""
return utils.list_db_objects( return utils.list_db_objects(
session, models.User, **filters session, models.User, **filters
) )
@ -356,27 +370,34 @@ def list_users(user=None, session=None, **filters):
optional_support_keys=OPTIONAL_ADDED_FIELDS, optional_support_keys=OPTIONAL_ADDED_FIELDS,
ignore_support_keys=IGNORE_FIELDS ignore_support_keys=IGNORE_FIELDS
) )
@check_user_admin()
@database.run_in_session() @database.run_in_session()
@check_user_admin()
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def add_user( def add_user(
exception_when_existing=True, user=None, exception_when_existing=True, user=None,
session=None, **kwargs session=None, email=None, **kwargs
): ):
"""Create a user and return created user object.""" """Create a user and return created user object."""
return add_user_internal( add_user = utils.add_db_object(
session, exception_when_existing, **kwargs session, models.User,
exception_when_existing, email,
**kwargs)
_add_user_permissions(
add_user,
session=session,
name=setting.COMPASS_DEFAULT_PERMISSIONS
) )
return add_user
@utils.supported_filters() @utils.supported_filters()
@check_user_admin()
@database.run_in_session() @database.run_in_session()
@check_user_admin()
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def del_user(user_id, user=None, session=None, **kwargs): def del_user(user_id, user=None, session=None, **kwargs):
"""delete a user and return the deleted user object.""" """delete a user and return the deleted user object."""
user = utils.get_db_object(session, models.User, id=user_id) del_user = _get_user(user_id, session=session)
return utils.del_db_object(session, user) return utils.del_db_object(session, del_user)
@utils.supported_filters( @utils.supported_filters(
@ -388,13 +409,13 @@ def del_user(user_id, user=None, session=None, **kwargs):
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def update_user(user_id, user=None, session=None, **kwargs): def update_user(user_id, user=None, session=None, **kwargs):
"""Update a user and return the updated user object.""" """Update a user and return the updated user object."""
user = utils.get_db_object( update_user = _get_user(
session, models.User, id=user_id user_id, session=session,
) )
allowed_fields = set() allowed_fields = set()
if user.is_admin: if user.is_admin:
allowed_fields |= set(ADMIN_UPDATED_FIELDS) allowed_fields |= set(ADMIN_UPDATED_FIELDS)
if user.id == user_id: if user.id == update_user.id:
allowed_fields |= set(SELF_UPDATED_FIELDS) allowed_fields |= set(SELF_UPDATED_FIELDS)
unsupported_fields = set(kwargs) - allowed_fields unsupported_fields = set(kwargs) - allowed_fields
if unsupported_fields: if unsupported_fields:
@ -404,47 +425,67 @@ def update_user(user_id, user=None, session=None, **kwargs):
user.email, user.email, unsupported_fields user.email, user.email, unsupported_fields
) )
) )
return utils.update_db_object(session, user, **kwargs) return utils.update_db_object(session, update_user, **kwargs)
@utils.supported_filters(optional_support_keys=PERMISSION_SUPPORTED_FIELDS) @utils.supported_filters(optional_support_keys=PERMISSION_SUPPORTED_FIELDS)
@check_user_admin_or_owner()
@database.run_in_session() @database.run_in_session()
@check_user_admin_or_owner()
@utils.wrap_to_dict(PERMISSION_RESP_FIELDS) @utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
def get_permissions(user_id, user=None, session=None, **kwargs): def get_permissions(
user_id, user=None, exception_when_missing=True,
session=None, **kwargs
):
"""List permissions of a user.""" """List permissions of a user."""
get_user = _get_user(
user_id, session=session,
exception_when_missing=exception_when_missing
)
return utils.list_db_objects( return utils.list_db_objects(
session, models.UserPermission, user_id=user_id, **kwargs session, models.UserPermission, user_id=get_user.id, **kwargs
)
def _get_permission(user_id, permission_id, session=None, **kwargs):
"""Get user permission by user id and permission id."""
user = _get_user(user_id, session=session)
from compass.db.api import permission as permission_api
permission = permission_api.get_permission_internal(
permission_id, session=session
)
return utils.get_db_object(
session, models.UserPermission,
user_id=user.id, permission_id=permission.id,
**kwargs
) )
@utils.supported_filters() @utils.supported_filters()
@check_user_admin_or_owner()
@database.run_in_session() @database.run_in_session()
@check_user_admin_or_owner()
@utils.wrap_to_dict(PERMISSION_RESP_FIELDS) @utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
def get_permission( def get_permission(
user_id, permission_id, exception_when_missing=True, user_id, permission_id, exception_when_missing=True,
user=None, session=None, **kwargs user=None, session=None, **kwargs
): ):
"""Get a specific user permission.""" """Get a permission of a user."""
return utils.get_db_object( return _get_permission(
session, models.UserPermission, user_id, permission_id,
exception_when_missing, exception_when_missing=exception_when_missing,
user_id=user_id, permission_id=permission_id, session=session,
**kwargs **kwargs
) )
@utils.supported_filters() @utils.supported_filters()
@check_user_admin_or_owner()
@database.run_in_session() @database.run_in_session()
@check_user_admin_or_owner()
@utils.wrap_to_dict(PERMISSION_RESP_FIELDS) @utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
def del_permission(user_id, permission_id, user=None, session=None, **kwargs): def del_permission(user_id, permission_id, user=None, session=None, **kwargs):
"""Delete a specific user permission.""" """Delete a permission from a user."""
user_permission = utils.get_db_object( user_permission = _get_permission(
session, models.UserPermission, user_id, permission_id,
user_id=user_id, permission_id=permission_id, session=session, **kwargs
**kwargs
) )
return utils.del_db_object(session, user_permission) return utils.del_db_object(session, user_permission)
@ -453,21 +494,27 @@ def del_permission(user_id, permission_id, user=None, session=None, **kwargs):
PERMISSION_ADDED_FIELDS, PERMISSION_ADDED_FIELDS,
ignore_support_keys=IGNORE_FIELDS ignore_support_keys=IGNORE_FIELDS
) )
@check_user_admin()
@database.run_in_session() @database.run_in_session()
@check_user_admin()
@utils.wrap_to_dict(PERMISSION_RESP_FIELDS) @utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
def add_permission( def add_permission(
user_id, exception_when_missing=True, user_id, permission_id=None, exception_when_existing=True,
permission_id=None, user=None, session=None user=None, session=None
): ):
"""Add an user permission.""" """Add a permission to a user."""
get_user = _get_user(user_id, session=session)
from compass.db.api import permission as permission_api
get_permission = permission_api.get_permission_internal(
permission_id, session=session
)
return utils.add_db_object( return utils.add_db_object(
session, models.UserPermission, exception_when_missing, session, models.UserPermission, exception_when_existing,
user_id, permission_id get_user.id, get_permission.id
) )
def _get_permission_filters(permission_ids): def _get_permission_filters(permission_ids):
"""Helper function to filter permissions."""
if permission_ids == 'all': if permission_ids == 'all':
return {} return {}
else: else:
@ -479,28 +526,28 @@ def _get_permission_filters(permission_ids):
'add_permissions', 'remove_permissions', 'set_permissions' 'add_permissions', 'remove_permissions', 'set_permissions'
] ]
) )
@check_user_admin()
@database.run_in_session() @database.run_in_session()
@check_user_admin()
@utils.wrap_to_dict(PERMISSION_RESP_FIELDS) @utils.wrap_to_dict(PERMISSION_RESP_FIELDS)
def update_permissions( def update_permissions(
user_id, add_permissions=[], remove_permissions=[], user_id, add_permissions=[], remove_permissions=[],
set_permissions=None, user=None, session=None, **kwargs set_permissions=None, user=None, session=None, **kwargs
): ):
"""update user permissions.""" """update user permissions."""
user = utils.get_db_object(session, models.User, id=user_id) update_user = _get_user(user_id, session=session)
if remove_permissions: if remove_permissions:
_remove_user_permissions( _remove_user_permissions(
session, user, update_user, session=session,
**_get_permission_filters(remove_permissions) **_get_permission_filters(remove_permissions)
) )
if add_permissions: if add_permissions:
_add_user_permissions( _add_user_permissions(
session, user, update_user, session=session,
**_get_permission_filters(add_permissions) **_get_permission_filters(add_permissions)
) )
if set_permissions is not None: if set_permissions is not None:
_set_user_permissions( _set_user_permissions(
session, user, update_user, session=session,
**_get_permission_filters(set_permissions) **_get_permission_filters(set_permissions)
) )
return user.user_permissions return update_user.user_permissions

View File

@ -36,14 +36,15 @@ def log_user_action(user_id, action, session=None):
@utils.supported_filters(optional_support_keys=USER_SUPPORTED_FIELDS) @utils.supported_filters(optional_support_keys=USER_SUPPORTED_FIELDS)
@user_api.check_user_admin_or_owner()
@database.run_in_session() @database.run_in_session()
@user_api.check_user_admin_or_owner()
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def list_user_actions(user_id, user=None, session=None, **filters): def list_user_actions(user_id, user=None, session=None, **filters):
"""list user actions.""" """list user actions of a user."""
list_user = user_api.get_user(user_id, user=user, session=session)
return utils.list_db_objects( return utils.list_db_objects(
session, models.UserLog, order_by=['timestamp'], session, models.UserLog, order_by=['timestamp'],
user_id=user_id, **filters user_id=list_user['id'], **filters
) )
@ -52,29 +53,30 @@ def list_user_actions(user_id, user=None, session=None, **filters):
@database.run_in_session() @database.run_in_session()
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def list_actions(user=None, session=None, **filters): def list_actions(user=None, session=None, **filters):
"""list actions.""" """list actions of all users."""
return utils.list_db_objects( return utils.list_db_objects(
session, models.UserLog, order_by=['timestamp'], **filters session, models.UserLog, order_by=['timestamp'], **filters
) )
@utils.supported_filters() @utils.supported_filters()
@user_api.check_user_admin_or_owner()
@database.run_in_session() @database.run_in_session()
@user_api.check_user_admin_or_owner()
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def del_user_actions(user_id, user=None, session=None, **filters): def del_user_actions(user_id, user=None, session=None, **filters):
"""delete user actions.""" """delete actions of a user."""
del_user = user_api.get_user(user_id, user=user, session=session)
return utils.del_db_objects( return utils.del_db_objects(
session, models.UserLog, user_id=user_id, **filters session, models.UserLog, user_id=del_user['id'], **filters
) )
@utils.supported_filters() @utils.supported_filters()
@user_api.check_user_admin()
@database.run_in_session() @database.run_in_session()
@user_api.check_user_admin()
@utils.wrap_to_dict(RESP_FIELDS) @utils.wrap_to_dict(RESP_FIELDS)
def del_actions(user=None, session=None, **filters): def del_actions(user=None, session=None, **filters):
"""delete actions.""" """delete actions of all users."""
return utils.del_db_objects( return utils.del_db_objects(
session, models.UserLog, **filters session, models.UserLog, **filters
) )

View File

@ -30,7 +30,10 @@ from compass.utils import util
def model_query(session, model): def model_query(session, model):
"""model query.""" """model query.
Return sqlalchemy query object.
"""
if not issubclass(model, models.BASE): if not issubclass(model, models.BASE):
raise exception.DatabaseException("model should be sublass of BASE!") raise exception.DatabaseException("model should be sublass of BASE!")
@ -38,6 +41,23 @@ def model_query(session, model):
def _default_list_condition_func(col_attr, value, condition_func): def _default_list_condition_func(col_attr, value, condition_func):
"""The default condition func for a list of data.
Given the condition func for single item of data, this function
wrap the condition_func and return another condition func using
or_ to merge the conditions of each single item to deal with a
list of data item.
Args:
col_attr: the colomn name
value: the column value need to be compared.
condition_func: the sqlalchemy condition object like ==
Examples:
col_attr is name, value is ['a', 'b', 'c'] and
condition_func is ==, the returned condition is
name == 'a' or name == 'b' or name == 'c'
"""
conditions = [] conditions = []
for sub_value in value: for sub_value in value:
condition = condition_func(col_attr, sub_value) condition = condition_func(col_attr, sub_value)
@ -50,6 +70,11 @@ def _default_list_condition_func(col_attr, value, condition_func):
def _one_item_list_condition_func(col_attr, value, condition_func): def _one_item_list_condition_func(col_attr, value, condition_func):
"""The wrapper condition func to deal with one item data list.
For simplification, it is used to reduce generating too complex
sql conditions.
"""
if value: if value:
return condition_func(col_attr, value[0]) return condition_func(col_attr, value[0])
else: else:
@ -61,6 +86,7 @@ def _model_condition_func(
item_condition_func, item_condition_func,
list_condition_func=_default_list_condition_func list_condition_func=_default_list_condition_func
): ):
"""Return sql condition based on value type."""
if isinstance(value, list): if isinstance(value, list):
if not value: if not value:
return None return None
@ -74,6 +100,7 @@ def _model_condition_func(
def _between_condition(col_attr, value): def _between_condition(col_attr, value):
"""Return sql range condition."""
if value[0] is not None and value[1] is not None: if value[0] is not None and value[1] is not None:
return col_attr.between(value[0], value[1]) return col_attr.between(value[0], value[1])
if value[0] is not None: if value[0] is not None:
@ -84,6 +111,7 @@ def _between_condition(col_attr, value):
def model_order_by(query, model, order_by): def model_order_by(query, model, order_by):
"""append order by into sql query model."""
if not order_by: if not order_by:
return query return query
order_by_cols = [] order_by_cols = []
@ -107,11 +135,39 @@ def model_order_by(query, model, order_by):
def _model_condition(col_attr, value): def _model_condition(col_attr, value):
"""Generate condition for one column.
Example for col_attr is name:
value is 'a': name == 'a'
value is ['a']: name == 'a'
value is ['a', 'b']: name == 'a' or name == 'b'
value is {'eq': 'a'}: name == 'a'
value is {'lt': 'a'}: name < 'a'
value is {'le': 'a'}: name <= 'a'
value is {'gt': 'a'}: name > 'a'
value is {'ge': 'a'}: name >= 'a'
value is {'ne': 'a'}: name != 'a'
value is {'in': ['a', 'b']}: name in ['a', 'b']
value is {'notin': ['a', 'b']}: name not in ['a', 'b']
value is {'startswith': 'abc'}: name like 'abc%'
value is {'endswith': 'abc'}: name like '%abc'
value is {'like': 'abc'}: name like '%abc%'
value is {'between': ('a', 'c')}: name >= 'a' and name <= 'c'
value is [{'lt': 'a'}]: name < 'a'
value is [{'lt': 'a'}, {'gt': c'}]: name < 'a' or name > 'c'
value is {'lt': 'c', 'gt': 'a'}: name > 'a' and name < 'c'
If value is a list, the condition is the or relationship among
conditions of each item.
If value is dict and there are multi keys in the dict, the relationship
is and conditions of each key.
Otherwise the condition is to compare the column with the value.
"""
if isinstance(value, list): if isinstance(value, list):
basetype_values = [] basetype_values = []
composite_values = [] composite_values = []
for item in value: for item in value:
if util.is_instance(item, [list, dict]): if isinstance(item, (list, dict)):
composite_values.append(item) composite_values.append(item)
else: else:
basetype_values.append(item) basetype_values.append(item)
@ -209,6 +265,7 @@ def _model_condition(col_attr, value):
def model_filter(query, model, **filters): def model_filter(query, model, **filters):
"""Append conditons to query for each possible column."""
for key, value in filters.items(): for key, value in filters.items():
if isinstance(key, basestring): if isinstance(key, basestring):
if hasattr(model, key): if hasattr(model, key):
@ -224,6 +281,10 @@ def model_filter(query, model, **filters):
def replace_output(**output_mapping): def replace_output(**output_mapping):
"""Decorator to recursively relace output by output mapping.
The replacement detail is described in _replace_output.
"""
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
@ -235,12 +296,34 @@ def replace_output(**output_mapping):
def _replace_output(data, **output_mapping): def _replace_output(data, **output_mapping):
"""Helper to replace output data.""" """Helper to replace output data.
Example:
data = {'a': 'hello'}
output_mapping = {'a': 'b'}
returns: {'b': 'hello'}
data = {'a': {'b': 'hello'}}
output_mapping = {'a': 'b'}
returns: {'b': {'b': 'hello'}}
data = {'a': {'b': 'hello'}}
output_mapping = {'a': {'b': 'c'}}
returns: {'a': {'c': 'hello'}}
data = [{'a': 'hello'}, {'a': 'hi'}]
output_mapping = {'a': 'b'}
returns: [{'b': 'hello'}, {'b': 'hi'}]
"""
if isinstance(data, list): if isinstance(data, list):
return [ return [
_replace_output(item, **output_mapping) _replace_output(item, **output_mapping)
for item in data for item in data
] ]
if not isinstance(data, dict):
raise exception.InvalidResponse(
'%s type is not dict' % data
)
info = {} info = {}
for key, value in data.items(): for key, value in data.items():
if key in output_mapping: if key in output_mapping:
@ -257,7 +340,23 @@ def _replace_output(data, **output_mapping):
def get_wrapped_func(func): def get_wrapped_func(func):
"""Get wrapped function instance.""" """Get wrapped function instance.
Example:
@dec1
@dec2
myfunc(*args, **kwargs)
get_wrapped_func(myfunc) returns function object with
following attributes:
__name__: 'myfunc'
args: args
kwargs: kwargs
otherwise myfunc is function object with following attributes:
__name__: partial object ...
args: ...
kwargs: ...
"""
if func.func_closure: if func.func_closure:
for closure in func.func_closure: for closure in func.func_closure:
if isfunction(closure.cell_contents): if isfunction(closure.cell_contents):
@ -268,6 +367,10 @@ def get_wrapped_func(func):
def wrap_to_dict(support_keys=[], **filters): def wrap_to_dict(support_keys=[], **filters):
"""Decrator to convert returned object to dict.
The details is decribed in _wrapper_dict.
"""
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
@ -279,7 +382,31 @@ def wrap_to_dict(support_keys=[], **filters):
def _wrapper_dict(data, support_keys, **filters): def _wrapper_dict(data, support_keys, **filters):
"""Helper for warpping db object into dictionary.""" """Helper for warpping db object into dictionary.
If data is list, convert it to a list of dict
If data is Base model, convert it to dict
for the data as a dict, filter it with the supported keys.
For each filter_key, filter_value in filters, also filter
data[filter_key] by filter_value recursively if it exists.
Example:
data is models.Switch, it will be converted to
{
'id': 1, 'ip': '10.0.0.1', 'ip_int': 123456,
'credentials': {'version': 2, 'password': 'abc'}
}
Then if support_keys are ['id', 'ip', 'credentials'],
it will be filtered to {
'id': 1, 'ip': '10.0.0.1',
'credentials': {'version': 2, 'password': 'abc'}
}
Then if filters is {'credentials': ['version']},
it will be filtered to {
'id': 1, 'ip': '10.0.0.1',
'credentials': {'version': 2}
}
"""
logging.debug( logging.debug(
'wrap dict %s by support_keys=%s filters=%s', 'wrap dict %s by support_keys=%s filters=%s',
data, support_keys, filters data, support_keys, filters
@ -296,32 +423,46 @@ def _wrapper_dict(data, support_keys, **filters):
'response %s type is not dict' % data 'response %s type is not dict' % data
) )
info = {} info = {}
for key in support_keys: try:
if key in data: for key in support_keys:
if key in filters: if key in data and data[key] is not None:
filter_keys = filters[key] if key in filters:
if isinstance(filter_keys, dict): filter_keys = filters[key]
info[key] = _wrapper_dict( if isinstance(filter_keys, dict):
data[key], filter_keys.keys(), info[key] = _wrapper_dict(
**filter_keys data[key], filter_keys.keys(),
) **filter_keys
)
else:
info[key] = _wrapper_dict(
data[key], filter_keys
)
else: else:
info[key] = _wrapper_dict( info[key] = data[key]
data[key], filter_keys return info
) except Exception as error:
else: logging.exception(error)
info[key] = data[key] raise error
return info
def replace_input_types(**kwarg_mapping): def replace_filters(**kwarg_mapping):
"""Decorator to replace kwargs.
Examples:
kwargs: {'a': 'b'}, kwarg_mapping: {'a': 'c'}
replaced kwargs to decorated func:
{'c': 'b'}
replace_filters is used to replace caller's input
to make it understandable by models.py.
"""
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
replaced_kwargs = {} replaced_kwargs = {}
for key, value in kwargs.items(): for key, value in kwargs.items():
if key in kwarg_mapping: if key in kwarg_mapping:
replaced_kwargs[key] = kwarg_mapping[key](value) replaced_kwargs[kwarg_mapping[key]] = value
else: else:
replaced_kwargs[key] = value replaced_kwargs[key] = value
return func(*args, **replaced_kwargs) return func(*args, **replaced_kwargs)
@ -329,52 +470,115 @@ def replace_input_types(**kwarg_mapping):
return decorator return decorator
def replace_filters(**filter_mapping):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **filters):
replaced_filters = {}
for key, value in filters.items():
if key in filter_mapping:
replaced_filters[filter_mapping[key]] = value
else:
replaced_filters[key] = value
return func(*args, **replaced_filters)
return wrapper
return decorator
def supported_filters( def supported_filters(
support_keys=[], support_keys=[],
optional_support_keys=[], optional_support_keys=[],
ignore_support_keys=[], ignore_support_keys=[],
): ):
"""Decorator to check kwargs keys.
keys in kwargs and in ignore_support_keys will be removed.
If any unsupported keys found, a InvalidParameter
exception raises.
Args:
support_keys: keys that must exist.
optional_support_keys: keys that may exist.
ignore_support_keys: keys should be ignored.
Assumption: args without default value is supposed to exist.
You can add them in support_keys or not but we will make sure
it appears when we call the decorated function.
We do best match on both args and kwargs to make sure if the
key appears or not.
Examples:
decorated func: func(a, b, c=3, d=4, **kwargs)
support_keys=['e'] and call func(e=5):
raises: InvalidParameter: missing declared arg
support_keys=['e'] and call func(1,2,3,4,5,e=6):
raises: InvalidParameter: caller sending more args
support_keys=['e'] and call func(1,2):
raises: InvalidParameter: supported keys ['e'] missing
support_keys=['d', 'e'] and call func(1,2,e=3):
raises: InvalidParameter: supported keys ['d'] missing
support_keys=['d', 'e'] and call func(1,2,d=4, e=3):
passed
support_keys=['d'], optional_support_keys=['e']
and call func(1,2, d=3):
passed
support_keys=['d'], optional_support_keys=['e']
and call func(1,2, d=3, e=4, f=5):
raises: InvalidParameter: unsupported keys ['f']
support_keys=['d'], optional_support_keys=['e'],
ignore_support_keys=['f']
and call func(1,2, d=3, e=4, f=5):
passed to decorated keys: func(1,2, d=3, e=4)
"""
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **filters): def wrapper(*args, **filters):
wrapped_func = get_wrapped_func(func) wrapped_func = get_wrapped_func(func)
argspec = inspect.getargspec(wrapped_func) argspec = inspect.getargspec(wrapped_func)
wrapped_args = argspec.args wrapped_args = argspec.args
args_defaults = argspec.defaults
# wrapped_must_args are positional args caller must pass in.
if args_defaults:
wrapped_must_args = wrapped_args[:-len(args_defaults)]
else:
wrapped_must_args = wrapped_args[:]
# make sure any positional args without default value in
# decorated function should appear in args or filters.
if len(args) < len(wrapped_must_args):
remain_args = wrapped_must_args[len(args):]
for remain_arg in remain_args:
if remain_arg not in filters:
raise exception.InvalidParameter(
'function missing declared arg %s '
'while caller sends args %s' % (
remain_arg, args
)
)
# make sure args should be no more than positional args
# declared in decorated function.
if len(args) > len(wrapped_args):
raise exception.InvalidParameter(
'function definition args %s while the caller '
'sends args %s' % (
wrapped_args, args
)
)
# exist_args are positional args caller has given.
exist_args = dict(zip(wrapped_args, args)).keys()
must_support_keys = set(support_keys) must_support_keys = set(support_keys)
all_support_keys = must_support_keys | set(optional_support_keys) all_support_keys = must_support_keys | set(optional_support_keys)
filter_keys = set(filters) - set(wrapped_args) wrapped_supported_keys = set(filters) | set(exist_args)
wrapped_support_keys = set(filters) | set(wrapped_args)
unsupported_keys = ( unsupported_keys = (
filter_keys - all_support_keys - set(ignore_support_keys) set(filters) - set(wrapped_args) -
all_support_keys - set(ignore_support_keys)
) )
# unsupported_keys are the keys that are not in support_keys,
# optional_support_keys, ignore_support_keys and are not passed in
# by positional args. It means the decorated function may
# not understand these parameters.
if unsupported_keys: if unsupported_keys:
raise exception.InvalidParameter( raise exception.InvalidParameter(
'filter keys %s are not supported' % str( 'filter keys %s are not supported for %s' % (
list(unsupported_keys) list(unsupported_keys), wrapped_func
) )
) )
missing_keys = must_support_keys - wrapped_support_keys # missing_keys are the keys that must exist but missing in
# both positional args or kwargs.
missing_keys = must_support_keys - wrapped_supported_keys
if missing_keys: if missing_keys:
raise exception.InvalidParameter( raise exception.InvalidParameter(
'filter keys %s not found' % str( 'filter keys %s not found for %s' % (
list(missing_keys) list(missing_keys), wrapped_func
) )
) )
# We filter kwargs to eliminate ignore_support_keys in kwargs
# passed to decorated function.
filtered_filters = dict([ filtered_filters = dict([
(key, value) (key, value)
for key, value in filters.items() for key, value in filters.items()
@ -385,61 +589,198 @@ def supported_filters(
return decorator return decorator
def _obj_equal(check, obj): def input_filters(
**filters
):
"""Decorator to filter kwargs.
For key in kwargs, if the key exists and filters
and the return of call filters[key] is False, the key
will be removed from kwargs.
The function definition of filters[key] is
func(value, *args, **kwargs) compared with decorated
function func(*args, **kwargs)
The function is used to filter kwargs in case some
kwargs should be removed conditionally depends on the
related filters.
Examples:
filters={'a': func(value, *args, **kwargs)}
@input_filters(**filters)
decorated_func(*args, **kwargs)
func returns False.
Then when call decorated_func(a=1, b=2)
it will be actually called the decorated func with
b=2. a=1 will be removed since it does not pass filtering.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
filtered_kwargs = {}
for key, value in kwargs.items():
if key in filters:
if filters[key](value, *args, **kwargs):
filtered_kwargs[key] = value
else:
logging.debug(
'ignore filtered input key %s' % key
)
else:
filtered_kwargs[key] = value
return func(*args, **filtered_kwargs)
return wrapper
return decorator
def _obj_equal_or_subset(check, obj):
"""Used by output filter to check if obj is in check."""
if check == obj: if check == obj:
return True return True
if not issubclass(obj.__class__, check.__class__): if not issubclass(obj.__class__, check.__class__):
return False return False
if isinstance(obj, dict): if isinstance(obj, dict):
return _dict_equal(check, obj) return _dict_equal_or_subset(check, obj)
elif isinstance(obj, list): elif isinstance(obj, list):
return _list_equal(check, obj) return _list_equal_or_subset(check, obj)
else: else:
return False return False
def _list_equal(check_list, obj_list): def _list_equal_or_subset(check_list, obj_list):
"""Used by output filter to check if obj_list is in check_list"""
if not isinstance(check_list, list):
return False
return set(check_list).issubset(set(obj_list)) return set(check_list).issubset(set(obj_list))
def _dict_equal(check_dict, obj_dict): def _dict_equal_or_subset(check_dict, obj_dict):
"""Used by output filter to check if obj_dict in check_dict."""
if not isinstance(check_dict, dict):
return False
for key, value in check_dict.items(): for key, value in check_dict.items():
if ( if (
key not in obj_dict or key not in obj_dict or
not _obj_equal(check_dict[key], obj_dict[key]) not _obj_equal_or_subset(check_dict[key], obj_dict[key])
): ):
return False return False
return True return True
def general_filter_callback(general_filter, obj): def general_filter_callback(general_filter, obj):
if 'resp_eq' in general_filter: """General filter function to filter output.
return _obj_equal(general_filter['resp_eq'], obj)
elif 'resp_in' in general_filter: Since some fields stored in database is json encoded and
in_filters = general_filter['resp_in'] we want to do the deep match for the json encoded field to
if not in_filters: do the filtering in some cases, we introduces the output_filters
and general_filter_callback to deal with this kind of cases.
We do special treatment for key 'resp_eq' to check if
obj is the recursively subset of general_filter['resp_eq']
Example:
obj: 'b'
general_filter: {}
returns: True
obj: 'b'
general_filter: {'resp_in': ['a', 'b']}
returns: True
obj: 'b'
general_filter: {'resp_in': ['a']}
returns: False
obj: 'b'
general_filter: {'resp_eq': 'b'}
returns: True
obj: 'b'
general_filter: {'resp_eq': 'a'}
returns: False
obj: 'b'
general_filter: {'resp_range': ('a', 'c')}
returns: True
obj: 'd'
general_filter: {'resp_range': ('a', 'c')}
returns: False
If there are multi keys in dict, the output is filtered
by and relationship.
If the general_filter is a list, the output is filtered
by or relationship.
Supported general filters: [
'resp_eq', 'resp_in', 'resp_lt',
'resp_le', 'resp_gt', 'resp_ge',
'resp_match', 'resp_range'
]
"""
if isinstance(general_filter, list):
if not general_filter:
return True return True
for in_filer in in_filters: return any([
if _obj_equal(in_filer, obj): general_filter_callback(item, obj)
return True for item in general_filter
return False ])
elif 'resp_lt' in general_filter: elif isinstance(general_filter, dict):
return obj < general_filter['resp_lt'] if 'resp_eq' in general_filter:
elif 'resp_le' in general_filter: if not _obj_equal_or_subset(
return obj <= general_filter['resp_le'] general_filter['resp_eq'], obj
elif 'resp_gt' in general_filter: ):
return obj > general_filter['resp_gt'] return False
elif 'resp_ge' in general_filter: if 'resp_in' in general_filter:
return obj >= general_filter['resp_gt'] in_filters = general_filter['resp_in']
elif 'resp_match' in general_filter: if not any([
return bool(re.match(general_filter['resp_match'], obj)) _obj_equal_or_subset(in_filer, obj)
for in_filer in in_filters
]):
return False
if 'resp_lt' in general_filter:
if obj >= general_filter['resp_lt']:
return False
if 'resp_le' in general_filter:
if obj > general_filter['resp_le']:
return False
if 'resp_gt' in general_filter:
if obj <= general_filter['resp_gt']:
return False
if 'resp_ge' in general_filter:
if obj < general_filter['resp_gt']:
return False
if 'resp_match' in general_filter:
if not re.match(general_filter['resp_match'], obj):
return False
if 'resp_range' in general_filter:
resp_range = general_filter['resp_range']
if not isinstance(resp_range, list):
resp_range = [resp_range]
in_range = False
for range_start, range_end in resp_range:
if range_start <= obj <= range_end:
in_range = True
if not in_range:
return False
return True
else: else:
return True return True
def filter_output(filter_callbacks, filters, obj, missing_ok=False): def filter_output(filter_callbacks, kwargs, obj, missing_ok=False):
"""Filter ouput.
For each key in filter_callbacks, if it exists in kwargs,
kwargs[key] tells what we need to filter. If the call of
filter_callbacks[key] returns False, it tells the obj should be
filtered out of output.
"""
for callback_key, callback_value in filter_callbacks.items(): for callback_key, callback_value in filter_callbacks.items():
if callback_key not in filters: if callback_key not in kwargs:
continue continue
if callback_key not in obj: if callback_key not in obj:
if missing_ok: if missing_ok:
@ -449,21 +790,26 @@ def filter_output(filter_callbacks, filters, obj, missing_ok=False):
'%s is not in %s' % (callback_key, obj) '%s is not in %s' % (callback_key, obj)
) )
if not callback_value( if not callback_value(
filters[callback_key], obj[callback_key] kwargs[callback_key], obj[callback_key]
): ):
return False return False
return True return True
def output_filters(missing_ok=False, **filter_callbacks): def output_filters(missing_ok=False, **filter_callbacks):
"""Decorator to filter output list.
Each filter_callback should have the definition like:
func({'resp_eq': 'a'}, 'a')
"""
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **filters): def wrapper(*args, **kwargs):
filtered_obj_list = [] filtered_obj_list = []
obj_list = func(*args, **filters) obj_list = func(*args, **kwargs)
for obj in obj_list: for obj in obj_list:
if filter_output( if filter_output(
filter_callbacks, filters, obj, missing_ok filter_callbacks, kwargs, obj, missing_ok
): ):
filtered_obj_list.append(obj) filtered_obj_list.append(obj)
return filtered_obj_list return filtered_obj_list
@ -472,6 +818,7 @@ def output_filters(missing_ok=False, **filter_callbacks):
def _input_validates(args_validators, kwargs_validators, *args, **kwargs): def _input_validates(args_validators, kwargs_validators, *args, **kwargs):
"""Used by input_validators to validate inputs."""
for i, value in enumerate(args): for i, value in enumerate(args):
if i < len(args_validators) and args_validators[i]: if i < len(args_validators) and args_validators[i]:
args_validators[i](value) args_validators[i](value)
@ -481,6 +828,11 @@ def _input_validates(args_validators, kwargs_validators, *args, **kwargs):
def input_validates(*args_validators, **kwargs_validators): def input_validates(*args_validators, **kwargs_validators):
"""Decorator to validate input.
Each validator should have definition like:
func('00:01:02:03:04:05')
"""
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
@ -493,7 +845,103 @@ def input_validates(*args_validators, **kwargs_validators):
return decorator return decorator
def _input_validates_with_args(
args_validators, kwargs_validators, *args, **kwargs
):
"""Validate input with validators.
Each validator takes the arguments of the decorated function
as its arguments. The function definition is like:
func(value, *args, **kwargs) compared with the decorated
function func(*args, **kwargs).
"""
for i, value in enumerate(args):
if i < len(args_validators) and args_validators[i]:
args_validators[i](value, *args, **kwargs)
for key, value in kwargs.items():
if kwargs_validators.get(key):
kwargs_validators[key](value, *args, **kwargs)
def input_validates_with_args(
*args_validators, **kwargs_validators
):
"""Decorator to validate input."""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
_input_validates_with_args(
args_validators, kwargs_validators,
*args, **kwargs
)
return func(*args, **kwargs)
return wrapper
return decorator
def _output_validates_with_args(
kwargs_validators, obj, *args, **kwargs
):
"""Validate output with validators.
Each validator takes the arguments of the decorated function
as its arguments. The function definition is like:
func(value, *args, **kwargs) compared with the decorated
function func(*args, **kwargs).
"""
if isinstance(obj, list):
for item in obj:
_output_validates_with_args(
kwargs_validators, item, *args, **kwargs
)
return
if isinstance(obj, models.HelperMixin):
obj = obj.to_dict()
if not isinstance(obj, dict):
raise exception.InvalidResponse(
'response %s type is not dict' % str(obj)
)
try:
for key, value in obj.items():
if key in kwargs_validators:
kwargs_validators[key](value, *args, **kwargs)
except Exception as error:
logging.exception(error)
raise error
def output_validates_with_args(**kwargs_validators):
"""Decorator to validate output.
The validator can take the arguments of the decorated
function as its arguments.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
obj = func(*args, **kwargs)
if isinstance(obj, list):
for obj_item in obj:
_output_validates_with_args(
kwargs_validators, obj_item,
*args, **kwargs
)
else:
_output_validates_with_args(
kwargs_validators, obj,
*args, **kwargs
)
return obj
return wrapper
return decorator
def _output_validates(kwargs_validators, obj): def _output_validates(kwargs_validators, obj):
"""Validate output.
Each validator has following signature:
func(value)
"""
if isinstance(obj, list): if isinstance(obj, list):
for item in obj: for item in obj:
_output_validates(kwargs_validators, item) _output_validates(kwargs_validators, item)
@ -504,12 +952,17 @@ def _output_validates(kwargs_validators, obj):
raise exception.InvalidResponse( raise exception.InvalidResponse(
'response %s type is not dict' % str(obj) 'response %s type is not dict' % str(obj)
) )
for key, value in obj.items(): try:
if key in kwargs_validators: for key, value in obj.items():
kwargs_validators[key](value) if key in kwargs_validators:
kwargs_validators[key](value)
except Exception as error:
logging.exception(error)
raise error
def output_validates(**kwargs_validators): def output_validates(**kwargs_validators):
"""Decorator to validate output."""
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
@ -525,7 +978,13 @@ def output_validates(**kwargs_validators):
def get_db_object(session, table, exception_when_missing=True, **kwargs): def get_db_object(session, table, exception_when_missing=True, **kwargs):
"""Get db object.""" """Get db object.
If not exception_when_missing and the db object can not be found,
return None instead of raising exception.
"""
if not session:
raise exception.DatabaseException('session param is None')
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug( logging.debug(
'session %s get db object %s from table %s', 'session %s get db object %s from table %s',
@ -551,7 +1010,13 @@ def get_db_object(session, table, exception_when_missing=True, **kwargs):
def add_db_object(session, table, exception_when_existing=True, def add_db_object(session, table, exception_when_existing=True,
*args, **kwargs): *args, **kwargs):
"""Create db object.""" """Create db object.
If not exception_when_existing and the db object exists,
Instead of raising exception, updating the existing db object.
"""
if not session:
raise exception.DatabaseException('session param is None')
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug( logging.debug(
'session %s add object %s atributes %s to table %s', 'session %s add object %s atributes %s to table %s',
@ -602,7 +1067,12 @@ def add_db_object(session, table, exception_when_existing=True,
def list_db_objects(session, table, order_by=[], **filters): def list_db_objects(session, table, order_by=[], **filters):
"""List db objects.""" """List db objects.
If order by given, the db objects should be sorted by the ordered keys.
"""
if not session:
raise exception.DatabaseException('session param is None')
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug( logging.debug(
'session %s list db objects by filters %s in table %s', 'session %s list db objects by filters %s in table %s',
@ -626,6 +1096,8 @@ def list_db_objects(session, table, order_by=[], **filters):
def del_db_objects(session, table, **filters): def del_db_objects(session, table, **filters):
"""delete db objects.""" """delete db objects."""
if not session:
raise exception.DatabaseException('session param is None')
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug( logging.debug(
'session %s delete db objects by filters %s in table %s', 'session %s delete db objects by filters %s in table %s',
@ -642,8 +1114,10 @@ def del_db_objects(session, table, **filters):
return db_objects return db_objects
def update_db_objects(session, table, **filters): def update_db_objects(session, table, updates={}, **filters):
"""Update db objects.""" """Update db objects."""
if not session:
raise exception.DatabaseException('session param is None')
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug( logging.debug(
'session %s update db objects by filters %s in table %s', 'session %s update db objects by filters %s in table %s',
@ -652,10 +1126,8 @@ def update_db_objects(session, table, **filters):
model_query(session, table), table, **filters model_query(session, table), table, **filters
).all() ).all()
for db_object in db_objects: for db_object in db_objects:
logging.debug('update db object %s', db_object) logging.debug('update db object %s: %s', db_object, updates)
session.flush() update_db_object(session, db_object, **updates)
db_object.update()
db_object.validate()
logging.debug( logging.debug(
'session %s db objects %s updated', 'session %s db objects %s updated',
id(session), db_objects id(session), db_objects
@ -665,6 +1137,8 @@ def update_db_objects(session, table, **filters):
def update_db_object(session, db_object, **kwargs): def update_db_object(session, db_object, **kwargs):
"""Update db object.""" """Update db object."""
if not session:
raise exception.DatabaseException('session param is None')
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug( logging.debug(
'session %s update db object %s by value %s', 'session %s update db object %s by value %s',
@ -684,6 +1158,8 @@ def update_db_object(session, db_object, **kwargs):
def del_db_object(session, db_object): def del_db_object(session, db_object):
"""Delete db object.""" """Delete db object."""
if not session:
raise exception.DatabaseException('session param is None')
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
logging.debug( logging.debug(
'session %s delete db object %s', 'session %s delete db object %s',
@ -698,6 +1174,7 @@ def del_db_object(session, db_object):
def check_ip(ip): def check_ip(ip):
"""Check ip is ip address formatted."""
try: try:
netaddr.IPAddress(ip) netaddr.IPAddress(ip)
except Exception as error: except Exception as error:
@ -708,6 +1185,7 @@ def check_ip(ip):
def check_mac(mac): def check_mac(mac):
"""Check mac is mac address formatted."""
try: try:
netaddr.EUI(mac) netaddr.EUI(mac)
except Exception as error: except Exception as error:
@ -721,6 +1199,7 @@ NAME_PATTERN = re.compile(r'[a-zA-Z0-9][a-zA-Z0-9_-]*')
def check_name(name): def check_name(name):
"""Check name meeting name format requirement."""
if not NAME_PATTERN.match(name): if not NAME_PATTERN.match(name):
raise exception.InvalidParameter( raise exception.InvalidParameter(
'name %s does not match the pattern %s' % ( 'name %s does not match the pattern %s' % (
@ -734,6 +1213,7 @@ def _check_ipmi_credentials_ip(ip):
def check_ipmi_credentials(ipmi_credentials): def check_ipmi_credentials(ipmi_credentials):
"""Check ipmi credentials format is correct."""
if not ipmi_credentials: if not ipmi_credentials:
return return
if not isinstance(ipmi_credentials, dict): if not isinstance(ipmi_credentials, dict):
@ -775,6 +1255,7 @@ def _check_switch_credentials_version(version):
def check_switch_credentials(credentials): def check_switch_credentials(credentials):
"""Check switch credentials format is correct."""
if not credentials: if not credentials:
return return
if not isinstance(credentials, dict): if not isinstance(credentials, dict):

View File

@ -17,6 +17,7 @@ import traceback
class DatabaseException(Exception): class DatabaseException(Exception):
"""Base class for all database exceptions."""
def __init__(self, message): def __init__(self, message):
super(DatabaseException, self).__init__(message) super(DatabaseException, self).__init__(message)
self.traceback = traceback.format_exc() self.traceback = traceback.format_exc()

File diff suppressed because it is too large Load Diff

View File

@ -226,7 +226,7 @@ class CobblerInstaller(OSInstaller):
os.path.join(self.tmpl_dir, os_version), self.SYS_TMPL_NAME os.path.join(self.tmpl_dir, os_version), self.SYS_TMPL_NAME
) )
if not os.path.exists(tmpl_path): if not os.path.exists(tmpl_path):
err_msg = "Template '%s' does not exists!" % self.SYS_TMPL_NAME err_msg = "Template '%s' does not exists!" % tmpl_path
logging.error(err_msg) logging.error(err_msg)
raise Exception(err_msg) raise Exception(err_msg)

View File

@ -30,7 +30,6 @@ USERNAME = 'username'
# Adapter info related keywords # Adapter info related keywords
DIST_SYS_NAME = 'distributed_system_name'
FLAVOR = 'flavor' FLAVOR = 'flavor'
FLAVORS = 'flavors' FLAVORS = 'flavors'
PLAYBOOK = 'playbook' PLAYBOOK = 'playbook'

View File

@ -96,27 +96,27 @@ class PackageMatcher(object):
"""Progress matcher for package installer.""" """Progress matcher for package installer."""
def __init__( def __init__(
self, package_installer_name, distributed_system_pattern, self, package_installer_name, adapter_pattern,
item_matcher, file_reader_factory item_matcher, file_reader_factory
): ):
self.name_ = re.compile(package_installer_name) self.name_ = re.compile(package_installer_name)
self.ds_regex_ = re.compile(distributed_system_pattern) self.adapter_regex_ = re.compile(adapter_pattern)
self.matcher_ = item_matcher self.matcher_ = item_matcher
self.file_reader_factory_ = file_reader_factory self.file_reader_factory_ = file_reader_factory
def __repr__(self): def __repr__(self):
return '%s[name:%s, ds_pattern:%s, matcher:%s]' % ( return '%s[name:%s, adapter_pattern:%s, matcher:%s]' % (
self.__class__.__name__, self.name_.pattern, self.__class__.__name__, self.name_.pattern,
self.ds_regex_.pattern, self.matcher_) self.adapter_regex_.pattern, self.matcher_)
def match(self, package_installer_name, distributed_system_name): def match(self, package_installer_name, adapter_name):
"""Check if the package matcher is acceptable.""" """Check if the package matcher is acceptable."""
if package_installer_name is None: if package_installer_name is None:
return False return False
else: else:
return all([ return all([
self.name_.match(package_installer_name), self.name_.match(package_installer_name),
self.ds_regex_.match(distributed_system_name) self.adapter_regex_.match(adapter_name)
]) ])
def update_progress(self, name, state, log_history_mapping): def update_progress(self, name, state, log_history_mapping):

View File

@ -0,0 +1,29 @@
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""module to provide environment to load progress calculator configurations.
.. moduleauthor:: Xiaodong Wang <xiaodongwang@huawei.com>
"""
from compass.log_analyzor.adapter_matcher import AdapterItemMatcher
from compass.log_analyzor.file_matcher import FileMatcher
from compass.log_analyzor.file_matcher import FileReaderFactory
from compass.log_analyzor.line_matcher import IncrementalProgress
from compass.log_analyzor.line_matcher import LineMatcher
from compass.utils import setting_wrapper as setting
from compass.utils import util
ENV_GLOBALS = globals()
ENV_LOCALS = locals()

View File

@ -158,7 +158,7 @@ class LineMatcher(object):
self.progress_ = SameProgress() self.progress_ = SameProgress()
elif isinstance(progress, ProgressCalculator): elif isinstance(progress, ProgressCalculator):
self.progress_ = progress self.progress_ = progress
elif util.is_instance(progress, [int, float]): elif isinstance(progress, (int, long, float)):
self.progress_ = RelativeProgress(progress) self.progress_ = RelativeProgress(progress)
else: else:
raise TypeError( raise TypeError(

View File

@ -20,6 +20,8 @@ import logging
from compass.log_analyzor.adapter_matcher import OSMatcher from compass.log_analyzor.adapter_matcher import OSMatcher
from compass.log_analyzor.adapter_matcher import PackageMatcher from compass.log_analyzor.adapter_matcher import PackageMatcher
from compass.log_analyzor.environment import ENV_GLOBALS
from compass.log_analyzor.environment import ENV_LOCALS
from compass.log_analyzor.file_matcher import FileReaderFactory from compass.log_analyzor.file_matcher import FileReaderFactory
from compass.utils import setting_wrapper as setting from compass.utils import setting_wrapper as setting
@ -30,90 +32,102 @@ PACKAGE_ADAPTER_CONFIGURATIONS = None
PROGRESS_CALCULATOR_CONFIGURATIONS = None PROGRESS_CALCULATOR_CONFIGURATIONS = None
def _load_calculator_configurations(): def _load_calculator_configurations(force=False):
global PROGRESS_CALCULATOR_CONFIGURATIONS global PROGRESS_CALCULATOR_CONFIGURATIONS
if PROGRESS_CALCULATOR_CONFIGURATIONS is None: if force or PROGRESS_CALCULATOR_CONFIGURATIONS is None:
env_locals = {}
env_locals.update(ENV_GLOBALS)
env_locals.update(ENV_LOCALS)
PROGRESS_CALCULATOR_CONFIGURATIONS = util.load_configs( PROGRESS_CALCULATOR_CONFIGURATIONS = util.load_configs(
setting.PROGRESS_CALCULATOR_DIR setting.PROGRESS_CALCULATOR_DIR,
env_locals=env_locals
) )
progress_calculator_configuration = ( if not PROGRESS_CALCULATOR_CONFIGURATIONS:
PROGRESS_CALCULATOR_CONFIGURATIONS[0] logging.info('No configuration found for progress calculator.')
)
os_installer_configurations = None global OS_ADAPTER_CONFIGURATIONS
package_installer_configurations = None if force or OS_ADAPTER_CONFIGURATIONS is None:
if progress_calculator_configuration is not None: OS_ADAPTER_CONFIGURATIONS = []
if 'OS_INSTALLER_CONFIGURATIONS' in ( for progress_calculator_configuration in (
PROGRESS_CALCULATOR_CONFIGURATIONS
):
if 'OS_LOG_CONFIGURATIONS' in (
progress_calculator_configuration progress_calculator_configuration
): ):
os_installer_configurations = ( os_installer_configurations = (
(progress_calculator_configuration[ progress_calculator_configuration['OS_LOG_CONFIGURATIONS']
'OS_INSTALLER_CONFIGURATIONS'])
) )
if 'PACKAGE_INSTALLER_CONFIGURATIONS' in ( for os_installer_configuration in os_installer_configurations:
OS_ADAPTER_CONFIGURATIONS.append(OSMatcher(
os_installer_name=(
os_installer_configuration['os_installer_name']
),
os_pattern=os_installer_configuration['os_pattern'],
item_matcher=(
os_installer_configuration['item_matcher']
),
file_reader_factory=FileReaderFactory(
os_installer_configuration['logdir']
)
))
if not OS_ADAPTER_CONFIGURATIONS:
logging.info(
'no OS_LOG_CONFIGURATIONS section found '
'in progress calculator.'
)
else:
logging.debug(
'OS_ADAPTER_CONFIGURATIONS is\n%s',
OS_ADAPTER_CONFIGURATIONS
)
global PACKAGE_ADAPTER_CONFIGURATIONS
if force or PACKAGE_ADAPTER_CONFIGURATIONS is None:
PACKAGE_ADAPTER_CONFIGURATIONS = []
for progress_calculator_configuration in (
PROGRESS_CALCULATOR_CONFIGURATIONS
):
if 'ADAPTER_LOG_CONFIGURATIONS' in (
progress_calculator_configuration progress_calculator_configuration
): ):
package_installer_configurations = ( package_installer_configurations = (
(progress_calculator_configuration[ progress_calculator_configuration[
'PACKAGE_INSTALLER_CONFIGURATIONS']) 'ADAPTER_LOG_CONFIGURATIONS'
]
) )
for package_installer_configuration in (
package_installer_configurations
):
PACKAGE_ADAPTER_CONFIGURATIONS.append(PackageMatcher(
package_installer_name=(
package_installer_configuration[
'package_installer_name'
]
),
adapter_pattern=(
package_installer_configuration['adapter_pattern']
),
item_matcher=(
package_installer_configuration['item_matcher']
),
file_reader_factory=FileReaderFactory(
package_installer_configuration['logdir']
)
))
if not PACKAGE_ADAPTER_CONFIGURATIONS:
logging.info(
'no PACKAGE_LOG_CONFIGURATIONS section found '
'in progress calculator.'
)
else: else:
logging.debug('No configuration found for progress calculator.') logging.debug(
'PACKAGE_ADAPTER_CONFIGURATIONS is\n%s',
PACKAGE_ADAPTER_CONFIGURATIONS
)
global OS_ADAPTER_CONFIGURATIONS
if OS_ADAPTER_CONFIGURATIONS is None:
if os_installer_configurations is not None:
OS_ADAPTER_CONFIGURATIONS = [
OSMatcher(
os_installer_name='cobbler',
os_pattern='CentOS-6.*',
item_matcher=(
(os_installer_configurations[
'cobbler']['CentOS6'])
),
file_reader_factory=FileReaderFactory(
setting.INSTALLATION_LOGDIR['CobblerInstaller']
)
),
OSMatcher(
os_installer_name='cobbler',
os_pattern='CentOS-7.*',
item_matcher=(
(os_installer_configurations[
'cobbler']['CentOS7'])
),
file_reader_factory=FileReaderFactory(
setting.INSTALLATION_LOGDIR['CobblerInstaller']
)
),
OSMatcher(
os_installer_name='cobbler',
os_pattern='Ubuntu.*',
item_matcher=(
(os_installer_configurations[
'cobbler']['Ubuntu'])
),
file_reader_factory=FileReaderFactory(
setting.INSTALLATION_LOGDIR['CobblerInstaller']
)
)
]
global PACKAGE_ADAPTER_CONFIGURATIONS def load_calculator_configurations(force_reload=False):
if PACKAGE_ADAPTER_CONFIGURATIONS is None: _load_calculator_configurations(force=force_reload)
if package_installer_configurations is not None:
PACKAGE_ADAPTER_CONFIGURATIONS = [
PackageMatcher(
package_installer_name='chef_installer',
distributed_system_pattern='openstack.*',
item_matcher=(
(package_installer_configurations[
'chef_installer']['openstack'])
),
file_reader_factory=FileReaderFactory(
setting.INSTALLATION_LOGDIR['ChefInstaller']
)
)
]
def _get_os_matcher(os_installer_name, os_name): def _get_os_matcher(os_installer_name, os_name):
@ -131,22 +145,22 @@ def _get_os_matcher(os_installer_name, os_name):
def _get_package_matcher( def _get_package_matcher(
package_installer_name, distributed_system_name package_installer_name, adapter_name
): ):
"""Get package adapter matcher by pacakge name and installer name.""" """Get package adapter matcher by adapter name and installer name."""
_load_calculator_configurations() _load_calculator_configurations()
for configuration in PACKAGE_ADAPTER_CONFIGURATIONS: for configuration in PACKAGE_ADAPTER_CONFIGURATIONS:
if configuration.match( if configuration.match(
package_installer_name, package_installer_name,
distributed_system_name adapter_name
): ):
return configuration return configuration
else: else:
logging.debug('configuration %s does not match %s and %s', logging.debug('configuration %s does not match %s and %s',
configuration, distributed_system_name, configuration, adapter_name,
package_installer_name) package_installer_name)
logging.error('No configuration found for package installer %s os %s', logging.error('No configuration found for package installer %s adapter %s',
package_installer_name, distributed_system_name) package_installer_name, adapter_name)
return None return None
@ -174,11 +188,11 @@ def update_clusterhost_progress(clusterhost_mapping):
) in ( ) in (
clusterhost_mapping.items() clusterhost_mapping.items()
): ):
distributed_system_name = clusterhost['distributed_system_name'] adapter_name = clusterhost['adapter_name']
package_installer_name = clusterhost['package_installer']['name'] package_installer_name = clusterhost['package_installer']['name']
package_matcher = _get_package_matcher( package_matcher = _get_package_matcher(
package_installer_name, package_installer_name,
distributed_system_name adapter_name
) )
if not package_matcher: if not package_matcher:
continue continue

View File

@ -30,6 +30,7 @@ from compass.actions import update_progress
from compass.db.api import adapter_holder as adapter_api from compass.db.api import adapter_holder as adapter_api
from compass.db.api import database from compass.db.api import database
from compass.db.api import metadata_holder as metadata_api from compass.db.api import metadata_holder as metadata_api
from compass.log_analyzor import progress_calculator
from compass.tasks.client import celery from compass.tasks.client import celery
from compass.utils import flags from compass.utils import flags
@ -46,6 +47,8 @@ def global_celery_init(**_):
database.init() database.init()
adapter_api.load_adapters() adapter_api.load_adapters()
metadata_api.load_metadatas() metadata_api.load_metadatas()
adapter_api.load_flavors()
progress_calculator.load_calculator_configurations()
@setup_logging.connect() @setup_logging.connect()

View File

@ -24,12 +24,16 @@ import unittest2
os.environ['COMPASS_IGNORE_SETTING'] = 'true' os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.actions import deploy
from compass.actions import util
from compass.utils import setting_wrapper as setting from compass.utils import setting_wrapper as setting
reload(setting) reload(setting)
from compass.actions import deploy
from compass.actions import util
from compass.utils import flags
from compass.utils import logsetting
class TestDeployAction(unittest2.TestCase): class TestDeployAction(unittest2.TestCase):
"""Test deploy moudle functions in actions.""" """Test deploy moudle functions in actions."""
def setUp(self): def setUp(self):
@ -169,3 +173,9 @@ class TestDeployAction(unittest2.TestCase):
output = util.ActionHelper.get_hosts_info(1, [1], None) output = util.ActionHelper.get_hosts_info(1, [1], None)
self.maxDiff = None self.maxDiff = None
self.assertDictEqual(expected_output, output) self.assertDictEqual(expected_output, output)
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()

View File

@ -27,8 +27,6 @@ os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.utils import setting_wrapper as setting from compass.utils import setting_wrapper as setting
reload(setting) reload(setting)
@ -60,6 +58,7 @@ from compass.log_analyzor import progress_calculator
from compass.utils import flags from compass.utils import flags
from compass.utils import logsetting from compass.utils import logsetting
ADAPTER_NAME = 'openstack_icehouse' ADAPTER_NAME = 'openstack_icehouse'
OS_NAME = 'CentOS-6.5-x86_64' OS_NAME = 'CentOS-6.5-x86_64'
SWITCH_IP = '172.29.8.40' SWITCH_IP = '172.29.8.40'
@ -72,8 +71,9 @@ class TestProgressCalculator(unittest2.TestCase):
"""Test end to end.""" """Test end to end."""
def _prepare_database(self): def _prepare_database(self):
adapter.load_adapters() adapter.load_adapters(force_reload=True)
metadata.load_metadatas() metadata.load_metadatas(force_reload=True)
adapter.load_flavors(force_reload=True)
self.user_object = ( self.user_object = (
user_api.get_user_object( user_api.get_user_object(
@ -87,25 +87,24 @@ class TestProgressCalculator(unittest2.TestCase):
# get adapter information # get adapter information
list_adapters = adapter.list_adapters(user=self.user_object) list_adapters = adapter.list_adapters(user=self.user_object)
for adptr in list_adapters: for adpt in list_adapters:
self.adapter_id = None self.adapter_id = None
if adptr['name'] != ADAPTER_NAME: if adpt['name'] != ADAPTER_NAME:
continue continue
self.adapter_id = adptr['id'] self.adapter_id = adpt['id']
self.os_id = None self.os_id = None
for supported_os in adptr['supported_oses']: for supported_os in adpt['supported_oses']:
if supported_os['name'] == OS_NAME: if supported_os['name'] == OS_NAME:
self.os_id = supported_os['os_id'] self.os_id = supported_os['os_id']
break break
if not self.os_id: if not self.os_id:
continue continue
if ( if (
'package_installer' in adptr.keys() and 'package_installer' in adpt.keys() and
adptr['flavors'] != [] and adpt['flavors'] != []
adptr['distributed_system_name'] == 'openstack'
): ):
self.flavor_id = None self.flavor_id = None
for flavor in adptr['flavors']: for flavor in adpt['flavors']:
if flavor['name'] == 'allinone': if flavor['name'] == 'allinone':
self.flavor_id = flavor['id'] self.flavor_id = flavor['id']
break break
@ -401,7 +400,7 @@ class TestProgressCalculator(unittest2.TestCase):
with open(target_log, 'w') as f: with open(target_log, 'w') as f:
for single_line in raw_file: for single_line in raw_file:
f.write(single_line + '\n') f.write(single_line + '\n')
f.close f.close()
def _mock_lock(self): def _mock_lock(self):
@contextmanager @contextmanager
@ -419,10 +418,15 @@ class TestProgressCalculator(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestProgressCalculator, self).setUp() super(TestProgressCalculator, self).setUp()
os.environ['COMPASS_IGNORE_SETTING'] = 'true'
parent_path = os.path.abspath(os.path.join( parent_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), "../../../.." os.path.dirname(__file__), "../../../.."
)) ))
setting.CONFIG_DIR = os.path.join(parent_path, 'conf') os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
parent_path,
'conf'
)
reload(setting)
logsetting.init() logsetting.init()
self._mock_lock() self._mock_lock()
database.init('sqlite://') database.init('sqlite://')
@ -439,7 +443,7 @@ class TestProgressCalculator(unittest2.TestCase):
'CobblerInstaller': setting.COBBLER_INSTALLATION_LOGDIR, 'CobblerInstaller': setting.COBBLER_INSTALLATION_LOGDIR,
'ChefInstaller': setting.CHEF_INSTALLATION_LOGDIR 'ChefInstaller': setting.CHEF_INSTALLATION_LOGDIR
} }
reload(progress_calculator) progress_calculator.load_calculator_configurations(force_reload=True)
def tearDown(self): def tearDown(self):
super(TestProgressCalculator, self).tearDown() super(TestProgressCalculator, self).tearDown()

View File

@ -1,3 +1,2 @@
NAME = 'ceph' NAME = 'ceph'
PARENT = 'general' PARENT = 'general'
DISTRIBUTED_SYSTEM = 'ceph'

View File

@ -1,4 +1,3 @@
NAME = 'openstack' NAME = 'openstack'
PARENT = 'general' PARENT = 'general'
DISTRIBUTED_SYSTEM = 'openstack'
SUPPORTED_OSES = ['CentOS6.5', 'Ubuntu12.04'] SUPPORTED_OSES = ['CentOS6.5', 'Ubuntu12.04']

View File

@ -4,20 +4,4 @@ FLAVORS = [{
'display_name': 'allinone', 'display_name': 'allinone',
'template': 'allinone.tmpl', 'template': 'allinone.tmpl',
'roles': ['allinone-compute'] 'roles': ['allinone-compute']
}, {
'flavor': 'multiroles',
'display_name': 'multiroles',
'template': 'multiroles.tmpl',
'roles': [
'os-compute-worker', 'os-network', 'os-block-storage-worker',
'os-image', 'os-compute-vncproxy', 'os-controller',
'os-ops-messaging', 'os-ops-database', 'ha-proxy'
]
},{
'flavor': 'single-contoller-multi-compute',
'display_name': 'Single Controller, Multi-compute',
'template': 'base.tmpl',
'roles': [
'os-controller', 'os-compute-worker', 'os-network'
]
}] }]

View File

@ -1,4 +1,6 @@
allinone = { ADAPTER = 'openstack_icehouse'
FLAVOR = 'allinone'
CONFIG_MAPPING = {
"mapped_name": "flavor_config", "mapped_name": "flavor_config",
"mapped_children": [{ "mapped_children": [{
"security": { "security": {

View File

@ -1,2 +1,3 @@
ADAPTER = 'openstack_icehouse'
FLAVOR = 'allinone' FLAVOR = 'allinone'
METADATA = {} METADATA = {}

View File

@ -1,4 +1,5 @@
OS_CONFIG_MAPPING = { OS = 'general'
CONFIG_MAPPING = {
"mapped_name": "os_global_config", "mapped_name": "os_global_config",
"mapped_children": [{ "mapped_children": [{
"server_credentials":{ "server_credentials":{

View File

@ -57,15 +57,17 @@ class ApiTestCase(unittest2.TestCase):
def setUp(self): def setUp(self):
super(ApiTestCase, self).setUp() super(ApiTestCase, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
adapter_api.load_adapters() adapter_api.load_adapters(force_reload=True)
metadata_api.load_metadatas() metadata_api.load_metadatas(force_reload=True)
adapter_api.load_flavors(force_reload=True)
from compass.api import api as compass_api from compass.api import api as compass_api
application = compass_api.app application = compass_api.app
@ -168,6 +170,14 @@ class ApiTestCase(unittest2.TestCase):
for flavor in adapter['flavors']: for flavor in adapter['flavors']:
flavor_id = flavor['id'] flavor_id = flavor['id']
break break
if not adapter_name:
raise Exception('adapter name not found')
if not adapter_id:
raise Exception('adapter id not found')
if not os_id:
raise Exception('os id not found')
if not flavor_id:
raise Exception('flavor id not found')
return (adapter_name, adapter_id, os_id, flavor_id) return (adapter_name, adapter_id, os_id, flavor_id)
@ -336,9 +346,18 @@ class TestClusterAPI(ApiTestCase):
data['name'] = 'cluster_invalid' data['name'] = 'cluster_invalid'
data['adapter_id'] = 9 data['adapter_id'] = 9
data['os_id'] = 1 data['os_id'] = 1
data['flavor_id'] = flavor_id
return_value = self.post(url, data)
self.assertEqual(return_value.status_code, 404)
# add a cluster with a non-existed flavor-id
data = {}
data['name'] = 'cluster_invalid'
data['adapter_id'] = adapter_id
data['os_id'] = 1
data['flavor_id'] = 1 data['flavor_id'] = 1
return_value = self.post(url, data) return_value = self.post(url, data)
self.assertEqual(return_value.status_code, 400) self.assertEqual(return_value.status_code, 404)
def test_update_cluster(self): def test_update_cluster(self):
# update a cluster sucessfully # update a cluster sucessfully
@ -403,8 +422,7 @@ class TestClusterAPI(ApiTestCase):
# give a non-existed cluster_id # give a non-existed cluster_id
url = '/clusters/99/hosts' url = '/clusters/99/hosts'
return_value = self.get(url) return_value = self.get(url)
resp = json.loads(return_value.get_data()) self.assertEqual(return_value.status_code, 404)
self.assertEqual(resp, [])
def test_show_cluster_host(self): def test_show_cluster_host(self):
# show a cluster_host successfully # show a cluster_host successfully
@ -951,8 +969,7 @@ class TestSwitchMachines(ApiTestCase):
# give a non-existed switch_id # give a non-existed switch_id
url = '/switches/99/machines' url = '/switches/99/machines'
return_value = self.get(url) return_value = self.get(url)
resp = json.loads(return_value.get_data()) self.assertEqual(return_value.status_code, 404)
self.assertEqual(resp, [])
def test_add_switch_machine(self): def test_add_switch_machine(self):
# add a switch machine successfully # add a switch machine successfully
@ -978,12 +995,12 @@ class TestSwitchMachines(ApiTestCase):
self.assertEqual(return_value.status_code, 409) self.assertEqual(return_value.status_code, 409)
# add a invalid switch machine # add a invalid switch machine
url = '/switches/2/machines' url = 's/witchedes'
data = { data = {
'mac': 'xxx' 'mac': 'xxx'
} }
return_value = self.post(url, data) return_value = self.post(url, data)
self.assertEqual(return_value.status_code, 400) self.assertEqual(return_value.status_code, 404)
def test_add_switch_machines(self): def test_add_switch_machines(self):
# batch switch machines # batch switch machines
@ -1030,7 +1047,7 @@ class TestSwitchMachines(ApiTestCase):
'port': '200', 'port': '200',
'mac': 'b1:b2:c3:d4:e5:f6' 'mac': 'b1:b2:c3:d4:e5:f6'
}] }]
expect_duplicate = {'mac': 'a1:b2:c3:d4:e5:f6', 'port': '101'} expect_duplicate = [{'mac': 'a1:b2:c3:d4:e5:f6', 'port': '101'}]
expect_failed = [ expect_failed = [
{'mac': 'a1:b2:f3:d4:e5:f6', 'port': '100'}, {'mac': 'a1:b2:f3:d4:e5:f6', 'port': '100'},
{'mac': 'a1:b2:c3:d4:e5:f6', 'port': '102'} {'mac': 'a1:b2:c3:d4:e5:f6', 'port': '102'}
@ -1049,18 +1066,21 @@ class TestSwitchMachines(ApiTestCase):
if k == 'fail_switches_machines': if k == 'fail_switches_machines':
for item in v: for item in v:
res_fail.append(item) res_fail.append(item)
self.assertEqual(len(res), len(expected))
for i, v in enumerate(res): for i, v in enumerate(res):
self.assertTrue( self.assertDictContainsSubset(
all(item in res[i].items() for item in expected[i].items()) expected[i], res[i]
) )
self.assertEqual(len(res_fail), len(expect_failed))
for i, v in enumerate(res_fail): for i, v in enumerate(res_fail):
self.assertTrue( self.assertDictContainsSubset(
all(item in res_fail[i].items() for expect_failed[i], res_fail[i]
item in expect_failed[i].items()) )
self.assertEqual(len(res_du), len(expect_duplicate))
for i, v in enumerate(res_du):
self.assertDictContainsSubset(
expect_duplicate[i], res_du[i]
) )
self.assertTrue(
all(item in res_du[0].items() for item in expect_duplicate.items())
)
def test_show_switch_machine(self): def test_show_switch_machine(self):
# show a switch_machine successfully # show a switch_machine successfully

View File

@ -17,17 +17,23 @@
import os import os
import simplejson as json import simplejson as json
import unittest2
os.environ['COMPASS_IGNORE_SETTING'] = 'true' os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.utils import setting_wrapper as setting from compass.utils import setting_wrapper as setting
reload(setting) reload(setting)
from test_api import ApiTestCase
from compass.db.api import cluster as cluster_db from compass.db.api import cluster as cluster_db
from compass.db.api import health_check_report as health_check_db from compass.db.api import health_check_report as health_check_db
from compass.db import models from compass.utils import flags
from compass.tests.api.test_api import ApiTestCase from compass.utils import logsetting
report_sample = { report_sample = {
@ -152,9 +158,14 @@ class TestHealthCheckAPI(ApiTestCase):
self.assertEqual(403, return_value.status_code) self.assertEqual(403, return_value.status_code)
# Cluster has been deployed successfully. # Cluster has been deployed successfully.
user = models.User.query.filter_by(email='admin@huawei.com').first()
cluster_db.update_cluster_state( cluster_db.update_cluster_state(
self.cluster_id, user=user, state='SUCCESSFUL' self.cluster_id, state='SUCCESSFUL'
) )
return_value = self.test_client.post(url, data=request_data) return_value = self.test_client.post(url, data=request_data)
self.assertEqual(202, return_value.status_code) self.assertEqual(202, return_value.status_code)
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()

View File

@ -41,15 +41,17 @@ class BaseTest(unittest2.TestCase):
def setUp(self): def setUp(self):
super(BaseTest, self).setUp() super(BaseTest, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
adapter_api.load_adapters() adapter_api.load_adapters(force_reload=True)
metadata_api.load_metadatas() metadata_api.load_metadatas(force_reload=True)
adapter_api.load_flavors(force_reload=True)
self.user_object = ( self.user_object = (
user_api.get_user_object( user_api.get_user_object(
setting.COMPASS_ADMIN_EMAIL setting.COMPASS_ADMIN_EMAIL

View File

@ -1,3 +1,2 @@
NAME = 'ceph' NAME = 'ceph'
PARENT = 'general' PARENT = 'general'
DISTRIBUTED_SYSTEM = 'ceph'

View File

@ -1,4 +1,3 @@
NAME = 'openstack' NAME = 'openstack'
PARENT = 'general' PARENT = 'general'
DISTRIBUTED_SYSTEM = 'openstack'
SUPPORTED_OSES = ['CentOS6.5', 'Ubuntu12.04'] SUPPORTED_OSES = ['CentOS6.5', 'Ubuntu12.04']

View File

@ -1,4 +1,6 @@
HA_MULTINODES = { ADAPTER = 'openstack-icehouse'
FLAVOR = 'HA-multinodes'
CONFIG_MAPPING = {
"mapped_name": "flavor_config", "mapped_name": "flavor_config",
"mapped_children": [{ "mapped_children": [{
"security": { "security": {

View File

@ -1,3 +1,4 @@
ADAPTER = 'openstack_icehouse'
FLAVOR = 'HA-multinodes' FLAVOR = 'HA-multinodes'
METADATA = { METADATA = {
'ha_proxy': { 'ha_proxy': {

View File

@ -1,2 +1,3 @@
ADAPTER = 'openstack_icehouse'
FLAVOR = 'allinone' FLAVOR = 'allinone'
METADATA = {} METADATA = {}

View File

@ -1,2 +1,3 @@
ADAPTER = 'openstack_icehouse'
FLAVOR = 'single-contoller-multi-compute' FLAVOR = 'single-contoller-multi-compute'
METADATA = {} METADATA = {}

View File

@ -1,4 +1,5 @@
OS_CONFIG_MAPPING = { OS = 'general'
CONFIG_MAPPING = {
"mapped_name": "os_global_config", "mapped_name": "os_global_config",
"mapped_children": [{ "mapped_children": [{
"server_credentials":{ "server_credentials":{

View File

@ -43,13 +43,100 @@ from compass.utils import util
class AdapterTestCase(unittest2.TestCase): class AdapterTestCase(unittest2.TestCase):
"""Adapter base test case.""" """Adapter base test case."""
def _mock_load_configs(self, config_dir):
if config_dir == setting.OS_INSTALLER_DIR:
return [{
'NAME': 'cobbler',
'INSTANCE_NAME': 'cobbler',
'SETTINGS': {
'cobbler_url': 'http://127.0.0.1/cobbler_api',
'credentials': {
'username': 'cobbler',
'password': 'cobbler'
}
}
}]
elif config_dir == setting.PACKAGE_INSTALLER_DIR:
return [{
'NAME': 'chef_installer',
'INSTANCE_NAME': 'chef_installer',
'SETTINGS': {
'chef_url': 'https://127.0.0.1',
'key_dir': '',
'client_name': '',
'databags': [
'user_passwords', 'db_passwords',
'service_passwords', 'secrets'
]
}
}]
elif config_dir == setting.ADAPTER_DIR:
return [{
'NAME': 'openstack_icehouse',
'DISLAY_NAME': 'Test OpenStack Icehouse',
'PACKAGE_INSTALLER': 'chef_installer',
'OS_INSTALLER': 'cobbler',
'SUPPORTED_OS_PATTERNS': ['(?i)centos.*', '(?i)ubuntu.*'],
'DEPLOYABLE': True
}, {
'NAME': 'ceph(chef)',
'DISPLAY_NAME': 'ceph(ceph)',
'PACKAGE_INSTALLER': 'chef_installer',
'OS_INSTALLER': 'cobbler',
'SUPPORTED_OS_PATTERNS': ['(?i)centos.*', '(?i)ubuntu.*'],
'DEPLOYABLE': True
}, {
'NAME': 'os_only',
'OS_INSTALLER': 'cobbler',
'SUPPORTED_OS_PATTERNS': ['(?i)centos.*', '(?i)ubuntu.*'],
'DEPLOYABLE': True
}]
elif config_dir == setting.ADAPTER_ROLE_DIR:
return [{
'ADAPTER_NAME': 'openstack_icehouse',
'ROLES': [{
'role': 'allinone-compute',
'display_name': 'all in one compute',
'description': 'all in one compute',
'optional': True
}]
}]
elif config_dir == setting.ADAPTER_FLAVOR_DIR:
return [{
'ADAPTER_NAME': 'openstack_icehouse',
'FLAVORS': [{
'flavor': 'allinone',
'display_name': 'allinone',
'template': 'allinone.tmpl',
'roles': ['allinone-compute']
}, {
'flavor': 'multiroles',
'display_name': 'multiroles',
'template': 'multiroles.tmpl',
'roles': ['allinone-compute']
}, {
'flavor': 'HA-multinodes',
'display_name': 'Multi-node Cluster with HA',
'template': 'ha_multinodes.tmpl',
'roles': ['allinone-compute']
}, {
'flavor': 'single-contoller-multi-compute',
'display_name': 'Single Controller, Multi-compute',
'template': 'base.tmpl',
'roles': ['allinone-compute']
}]
}]
else:
return []
def setUp(self): def setUp(self):
super(AdapterTestCase, self).setUp() super(AdapterTestCase, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
self.user_object = ( self.user_object = (
@ -58,27 +145,26 @@ class AdapterTestCase(unittest2.TestCase):
) )
) )
mock_config = mock.Mock() mock_config = mock.Mock(side_effect=self._mock_load_configs)
self.backup_adapter_configs = util.load_configs self.backup_adapter_configs = util.load_configs
util.load_configs = mock_config util.load_configs = mock_config
configs = [{ adapter.load_adapters(force_reload=True)
'NAME': 'openstack_test', adapter.load_flavors(force_reload=True)
'DISLAY_NAME': 'Test OpenStack Icehouse',
'PACKAGE_INSTALLER': 'chef_installer',
'OS_INSTALLER': 'cobbler',
'SUPPORTED_OS_PATTERNS': ['(?i)centos.*', '(?i)ubuntu.*'],
'DEPLOYABLE': True
}]
util.load_configs.return_value = configs
with database.session() as session:
adapter_api.add_adapters_internal(session)
adapter.load_adapters()
self.adapter_object = adapter.list_adapters(user=self.user_object) self.adapter_object = adapter.list_adapters(user=self.user_object)
self.adapter_obj = None
self.adapter_id = None
self.flavor_id = None
for adapter_obj in self.adapter_object: for adapter_obj in self.adapter_object:
if adapter_obj['name'] == 'openstack_icehouse': if adapter_obj['name'] == 'openstack_icehouse':
self.adapter_obj = adapter_obj
self.adapter_id = adapter_obj['id'] self.adapter_id = adapter_obj['id']
break break
for flavor in self.adapter_obj['flavors']:
if flavor['name'] == 'HA-multinodes':
self.flavor_id = flavor['id']
break
def tearDown(self): def tearDown(self):
super(AdapterTestCase, self).tearDown() super(AdapterTestCase, self).tearDown()
util.load_configs = self.backup_adapter_configs util.load_configs = self.backup_adapter_configs
@ -106,7 +192,6 @@ class TestListAdapters(AdapterTestCase):
'openstack_icehouse', 'openstack_icehouse',
'os_only', 'os_only',
'ceph(chef)', 'ceph(chef)',
'openstack_test'
] ]
self.assertIsNotNone(adapters) self.assertIsNotNone(adapters)
for expect in expects: for expect in expects:
@ -143,6 +228,55 @@ class TestGetAdapter(AdapterTestCase):
) )
class TestListFlavors(AdapterTestCase):
def setUp(self):
super(TestListFlavors, self).setUp()
def tesrDown(self):
super(TestListFlavors, self).tearDown()
def test_list_flavors(self):
"""Test list flavors."""
flavors = adapter.list_flavors(
user=self.user_object
)
flavor_name = []
for flavor in flavors:
flavor_name.append(flavor['name'])
expected = [
'allinone',
'multiroles',
'HA-multinodes',
'single-contoller-multi-compute'
]
for expect in expected:
self.assertIn(expect, flavor_name)
class TestGetFlavors(AdapterTestCase):
def setUp(self):
super(TestGetFlavors, self).setUp()
def tearDown(self):
super(TestGetFlavors, self).tearDown()
def test_get_flavor(self):
"""Test get a flavor."""
flavor = adapter.get_flavor(
self.flavor_id,
user=self.user_object
)
expected = {
'display_name': 'Multi-node Cluster with HA',
'id': 'openstack_icehouse:HA-multinodes',
'template': 'ha_multinodes.tmpl',
'name': 'HA-multinodes'
}
self.assertTrue(
all(item in flavor.items() for item in expected.items())
)
if __name__ == '__main__': if __name__ == '__main__':
flags.init() flags.init()
logsetting.init() logsetting.init()

View File

@ -51,15 +51,17 @@ class ClusterTestCase(unittest2.TestCase):
def setUp(self): def setUp(self):
super(ClusterTestCase, self).setUp() super(ClusterTestCase, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
adapter.load_adapters() adapter.load_adapters(force_reload=True)
metadata.load_metadatas() metadata.load_metadatas(force_reload=True)
adapter.load_flavors(force_reload=True)
self.user_object = ( self.user_object = (
user_api.get_user_object( user_api.get_user_object(
@ -1771,7 +1773,7 @@ class TestGetClusterHostSelfState(ClusterTestCase):
def test_get_cluster_host_self_state(self): def test_get_cluster_host_self_state(self):
cluster_host_self_state = cluster.get_cluster_host_self_state( cluster_host_self_state = cluster.get_cluster_host_self_state(
self.cluster_id, self.cluster_id,
self.host_id, self.host_id[0],
user=self.user_object, user=self.user_object,
) )
self.assertEqual(cluster_host_self_state['state'], 'UNINITIALIZED') self.assertEqual(cluster_host_self_state['state'], 'UNINITIALIZED')
@ -1823,13 +1825,13 @@ class TestUpdateClusterHostState(ClusterTestCase):
def test_update_cluster_host_state(self): def test_update_cluster_host_state(self):
cluster.update_cluster_host_state( cluster.update_cluster_host_state(
self.cluster_id, self.cluster_id,
self.host_id, self.host_id[0],
user=self.user_object, user=self.user_object,
state='INSTALLING' state='INSTALLING'
) )
update_state = cluster.get_cluster_host_state( update_state = cluster.get_cluster_host_state(
self.cluster_id, self.cluster_id,
self.host_id, self.host_id[0],
user=self.user_object, user=self.user_object,
) )
self.assertEqual(update_state['state'], 'INSTALLING') self.assertEqual(update_state['state'], 'INSTALLING')

View File

@ -51,15 +51,17 @@ class HostTestCase(unittest2.TestCase):
def setUp(self): def setUp(self):
super(HostTestCase, self).setUp() super(HostTestCase, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
adapter.load_adapters() adapter.load_adapters(force_reload=True)
metadata.load_metadatas() metadata.load_metadatas(force_reload=True)
adapter.load_flavors(force_reload=True)
self.user_object = ( self.user_object = (
user_api.get_user_object( user_api.get_user_object(

View File

@ -45,15 +45,17 @@ class MetadataTestCase(unittest2.TestCase):
def setUp(self): def setUp(self):
super(MetadataTestCase, self).setUp() super(MetadataTestCase, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
adapter.load_adapters() adapter.load_adapters(force_reload=True)
metadata.load_metadatas() metadata.load_metadatas(force_reload=True)
adapter.load_flavors(force_reload=True)
# Get a os_id and adapter_id # Get a os_id and adapter_id
self.user_object = ( self.user_object = (
@ -249,6 +251,7 @@ class TestGetFlavorMetadata(MetadataTestCase):
config_dir, *args, **kwargs config_dir, *args, **kwargs
) )
config = { config = {
'ADAPTER': 'openstack_icehouse',
'FLAVOR': 'HA-multinodes', 'FLAVOR': 'HA-multinodes',
'METADATA': { 'METADATA': {
'test_ha_proxy': { 'test_ha_proxy': {
@ -279,7 +282,7 @@ class TestGetFlavorMetadata(MetadataTestCase):
) )
self.assertIsNotNone(flavor_metadata) self.assertIsNotNone(flavor_metadata)
self.assertTrue( self.assertTrue(
'test_ha_proxy' in flavor_metadata['flavor_config'].keys() 'test_ha_proxy' in flavor_metadata['package_config'].keys()
) )
@ -310,55 +313,6 @@ class TestGetPackageOsMetadata(MetadataTestCase):
) )
class TestListFlavors(MetadataTestCase):
def setUp(self):
super(TestListFlavors, self).setUp()
def tesrDown(self):
super(TestListFlavors, self).tearDown()
def test_list_flavors(self):
"""Test list flavors."""
flavors = metadata.list_flavors(
user=self.user_object
)
flavor_name = []
for flavor in flavors:
flavor_name.append(flavor['name'])
expected = [
'allinone',
'multiroles',
'HA-multinodes',
'single-contoller-multi-compute'
]
for expect in expected:
self.assertIn(expect, flavor_name)
class TestGetFlavors(MetadataTestCase):
def setUp(self):
super(TestGetFlavors, self).setUp()
def tearDown(self):
super(TestGetFlavors, self).tearDown()
def test_get_flavor(self):
"""Test get a flavor."""
flavor = metadata.get_flavor(
self.flavor_id,
user=self.user_object
)
expected = {
'display_name': 'Multi-node Cluster with HA',
'id': 3,
'template': 'ha_multinodes.tmpl',
'name': 'HA-multinodes'
}
self.assertTrue(
all(item in flavor.items() for item in expected.items())
)
if __name__ == '__main__': if __name__ == '__main__':
flags.init() flags.init()
logsetting.init() logsetting.init()

View File

@ -234,7 +234,7 @@ class TestPatchSwitch(BaseTest):
switch.patch_switch( switch.patch_switch(
1, 1,
user=self.user_object, user=self.user_object,
patched_credentials={ credentials={
'version': '2c', 'version': '2c',
'community': 'public' 'community': 'public'
} }
@ -316,7 +316,7 @@ class TestUpdateSwitchFilters(BaseTest):
switch.update_switch_filters( switch.update_switch_filters(
1, 1,
user=self.user_object, user=self.user_object,
filters=[ machine_filters=[
{ {
'filter_type': 'allow' 'filter_type': 'allow'
} }
@ -352,7 +352,7 @@ class TestPatchSwitchFilter(BaseTest):
switch.patch_switch_filter( switch.patch_switch_filter(
2, 2,
user=self.user_object, user=self.user_object,
patched_filters=[ machine_filters=[
{ {
'filter_type': 'allow' 'filter_type': 'allow'
} }
@ -811,7 +811,7 @@ class TestPatchSwitchMachine(BaseTest):
def tearDown(self): def tearDown(self):
super(TestPatchSwitchMachine, self).tearDown() super(TestPatchSwitchMachine, self).tearDown()
def test_pathc_switch_machine(self): def test_patch_switch_machine(self):
switch.add_switch_machine( switch.add_switch_machine(
1, 1,
mac='28:6e:d4:46:c4:25', mac='28:6e:d4:46:c4:25',
@ -822,7 +822,7 @@ class TestPatchSwitchMachine(BaseTest):
1, 1,
1, 1,
user=self.user_object, user=self.user_object,
patched_tag={ tag={
'patched_tag': 'test_patched_tag' 'patched_tag': 'test_patched_tag'
} }
) )
@ -858,7 +858,7 @@ class TestPatchSwitchmachine(BaseTest):
switch.patch_switchmachine( switch.patch_switchmachine(
1, 1,
user=self.user_object, user=self.user_object,
patched_location={ location={
'patched_location': 'test_location' 'patched_location': 'test_location'
} }
) )

View File

@ -38,11 +38,12 @@ class TestGetUserObject(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestGetUserObject, self).setUp() super(TestGetUserObject, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
@ -322,8 +323,8 @@ class TestAddDelUserPermission(BaseTest):
def test_add_permission_position(self): def test_add_permission_position(self):
user_api.add_permission( user_api.add_permission(
self.user_object.id, self.user_object.id,
True,
2, 2,
True,
user=self.user_object, user=self.user_object,
) )
permissions = user_api.get_permissions( permissions = user_api.get_permissions(

View File

@ -65,11 +65,11 @@ class TestListUserActions(BaseTest):
self.user_object.id, self.user_object.id,
action='/testaction' action='/testaction'
) )
user_action = user_log.list_user_actions( self.assertRaises(
2, exception.RecordNotExists,
user=self.user_object user_log.list_user_actions,
2, user=self.user_object
) )
self.assertEqual([], user_action)
class TestListActions(BaseTest): class TestListActions(BaseTest):
@ -92,7 +92,6 @@ class TestListActions(BaseTest):
'action': '/testaction', 'action': '/testaction',
'user_id': 1 'user_id': 1
} }
print action
self.assertTrue( self.assertTrue(
all(item in action[0].items() all(item in action[0].items()
for item in expected.items())) for item in expected.items()))

View File

@ -38,11 +38,12 @@ class TestModelQuery(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestModelQuery, self).setUp() super(TestModelQuery, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
def tearDown(self): def tearDown(self):
@ -70,11 +71,12 @@ class TestModelFilter(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestModelFilter, self).setUp() super(TestModelFilter, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
@ -275,11 +277,12 @@ class TestModelFilter(unittest2.TestCase):
class TestGetDbObject(unittest2.TestCase): class TestGetDbObject(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestGetDbObject, self).setUp() super(TestGetDbObject, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
@ -322,11 +325,12 @@ class TestGetDbObject(unittest2.TestCase):
class TestAddDbObject(unittest2.TestCase): class TestAddDbObject(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestAddDbObject, self).setUp() super(TestAddDbObject, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
@ -408,26 +412,40 @@ class TestAddDbObject(unittest2.TestCase):
def test_add_with_multiple_args(self): def test_add_with_multiple_args(self):
with database.session() as session: with database.session() as session:
db_permission = utils.add_db_object(
session,
models.Permission,
False,
'test',
alias='test'
)
db_user = utils.add_db_object(
session,
models.User,
False,
'test@huawei.com',
password='test'
)
db_objs = utils.add_db_object( db_objs = utils.add_db_object(
session, session,
models.AdapterRole, models.UserPermission,
True, True,
'test1', db_user.id,
1, db_permission.id
name='test1',
alias='test1'
) )
self.assertEqual('test1', db_objs.alias) self.assertEqual(db_user.id, db_objs.user_id)
self.assertEqual(db_permission.id, db_objs.permission_id)
class TestListDbObjects(unittest2.TestCase): class TestListDbObjects(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestListDbObjects, self).setUp() super(TestListDbObjects, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
@ -484,11 +502,12 @@ class TestListDbObjects(unittest2.TestCase):
class TestDelDbObjects(unittest2.TestCase): class TestDelDbObjects(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestDelDbObjects, self).setUp() super(TestDelDbObjects, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
@ -527,11 +546,12 @@ class TestDelDbObjects(unittest2.TestCase):
class TestUpdateDbObject(unittest2.TestCase): class TestUpdateDbObject(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestUpdateDbObject, self).setUp() super(TestUpdateDbObject, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
@ -575,11 +595,12 @@ class TestUpdateDbObject(unittest2.TestCase):
class TestDelDbObject(unittest2.TestCase): class TestDelDbObject(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestDelDbObject, self).setUp() super(TestDelDbObject, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
@ -610,11 +631,12 @@ class TestDelDbObject(unittest2.TestCase):
class TestCheckIp(unittest2.TestCase): class TestCheckIp(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestCheckIp, self).setUp() super(TestCheckIp, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()
@ -639,11 +661,12 @@ class TestCheckIp(unittest2.TestCase):
class TestCheckMac(unittest2.TestCase): class TestCheckMac(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestCheckMac, self).setUp() super(TestCheckMac, self).setUp()
reload(setting) os.environ['COMPASS_IGNORE_SETTING'] = 'true'
setting.CONFIG_DIR = os.path.join( os.environ['COMPASS_CONFIG_DIR'] = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.path.dirname(os.path.abspath(__file__)),
'data' 'data'
) )
reload(setting)
database.init('sqlite://') database.init('sqlite://')
database.create_db() database.create_db()

View File

@ -28,12 +28,16 @@ import unittest2
os.environ['COMPASS_IGNORE_SETTING'] = 'true' os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.utils import setting_wrapper as compass_setting
reload(compass_setting)
from compass.deployment.installers.config_manager import BaseConfigManager from compass.deployment.installers.config_manager import BaseConfigManager
from compass.deployment.installers.os_installers.cobbler.cobbler \ from compass.deployment.installers.os_installers.cobbler.cobbler \
import CobblerInstaller import CobblerInstaller
from compass.tests.deployment.test_data import config_data from compass.tests.deployment.test_data import config_data
from compass.utils import setting_wrapper as compass_setting from compass.utils import flags
reload(compass_setting) from compass.utils import logsetting
class TestCobblerInstaller(unittest2.TestCase): class TestCobblerInstaller(unittest2.TestCase):
@ -291,3 +295,9 @@ class TestCobblerInstaller(unittest2.TestCase):
} }
output = self.test_cobbler._check_and_set_system_impi(3, "test_sys_id") output = self.test_cobbler._check_and_set_system_impi(3, "test_sys_id")
self.assertTrue(output) self.assertTrue(output)
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()

View File

@ -27,14 +27,17 @@ import unittest2
os.environ['COMPASS_IGNORE_SETTING'] = 'true' os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.deployment.installers.config_manager import BaseConfigManager
from compass.tests.deployment.test_data import config_data
from compass.utils import setting_wrapper as compass_setting from compass.utils import setting_wrapper as compass_setting
reload(compass_setting) reload(compass_setting)
from compass.deployment.installers.config_manager import BaseConfigManager
from compass.deployment.installers.pk_installers.chef_installer.chef_installer\ from compass.deployment.installers.pk_installers.chef_installer.chef_installer\
import ChefInstaller import ChefInstaller
from compass.tests.deployment.test_data import config_data
from compass.utils import flags
from compass.utils import logsetting
class TestChefInstaller(unittest2.TestCase): class TestChefInstaller(unittest2.TestCase):
@ -816,3 +819,9 @@ class TestChefInstaller(unittest2.TestCase):
output = self.test_chef.generate_installer_config() output = self.test_chef.generate_installer_config()
self.maxDiff = None self.maxDiff = None
self.assertDictEqual(entry["excepted_output"], output) self.assertDictEqual(entry["excepted_output"], output)
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()

View File

@ -19,11 +19,16 @@ import unittest2
os.environ['COMPASS_IGNORE_SETTING'] = 'true' os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.utils import setting_wrapper as compass_setting
reload(compass_setting)
from compass.deployment.installers.config_manager import BaseConfigManager from compass.deployment.installers.config_manager import BaseConfigManager
from compass.deployment.utils import constants as const from compass.deployment.utils import constants as const
from compass.tests.deployment.test_data import config_data from compass.tests.deployment.test_data import config_data
from compass.utils import setting_wrapper as compass_setting from compass.utils import flags
reload(compass_setting) from compass.utils import logsetting
class TestConfigManager(unittest2.TestCase): class TestConfigManager(unittest2.TestCase):
@ -225,3 +230,9 @@ class TestConfigManager(unittest2.TestCase):
self.maxDiff = None self.maxDiff = None
output = self.test_config_manager.get_host_roles_mapping(3) output = self.test_config_manager.get_host_roles_mapping(3)
self.assertEqual(expected_output, output) self.assertEqual(expected_output, output)
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()

View File

@ -19,12 +19,17 @@ import unittest2
os.environ['COMPASS_IGNORE_SETTING'] = 'true' os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.deployment.installers.installer import BaseInstaller
from compass.tests.deployment.test_data import config_data
from compass.utils import setting_wrapper as compass_setting from compass.utils import setting_wrapper as compass_setting
reload(compass_setting) reload(compass_setting)
from compass.deployment.installers.installer import BaseInstaller
from compass.tests.deployment.test_data import config_data
from compass.utils import flags
from compass.utils import logsetting
class TestBaseInstaller(unittest2.TestCase): class TestBaseInstaller(unittest2.TestCase):
"""Test base installer.""" """Test base installer."""
def setUp(self): def setUp(self):
@ -48,3 +53,9 @@ class TestBaseInstaller(unittest2.TestCase):
self.maxDiff = None self.maxDiff = None
self.assertDictEqual(expected_output, output) self.assertDictEqual(expected_output, output)
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()

View File

@ -32,7 +32,6 @@ test_client = compass_setting.TEST_CLIENT_NAME
adapter_test_config = { adapter_test_config = {
"name": "openstack_icehouse", "name": "openstack_icehouse",
"distributed_system_name": "openstack_icehouse",
"flavors": [ "flavors": [
{ {
"falvor_name": "test_flavor", "falvor_name": "test_flavor",

View File

@ -34,6 +34,8 @@ reload(setting)
from compass.deployment.deploy_manager import DeployManager from compass.deployment.deploy_manager import DeployManager
from compass.tests.deployment.test_data import config_data from compass.tests.deployment.test_data import config_data
from compass.utils import flags
from compass.utils import logsetting
class TestDeployManager(unittest2.TestCase): class TestDeployManager(unittest2.TestCase):
@ -54,3 +56,9 @@ class TestDeployManager(unittest2.TestCase):
test_manager = DeployManager(adapter_info, cluster_info, hosts_info) test_manager = DeployManager(adapter_info, cluster_info, hosts_info)
self.assertIsNotNone(test_manager) self.assertIsNotNone(test_manager)
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()

View File

@ -251,7 +251,7 @@ class TestPackageMatcher(unittest2.TestCase):
) )
self.package_matcher = adapter_matcher.PackageMatcher( self.package_matcher = adapter_matcher.PackageMatcher(
package_installer_name='chef', package_installer_name='chef',
distributed_system_pattern=r'openstack', adapter_pattern=r'openstack',
item_matcher=self.item_matcher, item_matcher=self.item_matcher,
file_reader_factory=self.file_reader_factory file_reader_factory=self.file_reader_factory
) )
@ -262,7 +262,7 @@ class TestPackageMatcher(unittest2.TestCase):
def test_match_none(self): def test_match_none(self):
test_match_none = { test_match_none = {
'package_installer_name': None, 'package_installer_name': None,
'distributed_system_name': 'openstack' 'adapter_name': 'openstack'
} }
matcher = self.package_matcher.match(**test_match_none) matcher = self.package_matcher.match(**test_match_none)
self.assertFalse(matcher) self.assertFalse(matcher)
@ -270,7 +270,7 @@ class TestPackageMatcher(unittest2.TestCase):
def test_match(self): def test_match(self):
test_match = { test_match = {
'package_installer_name': 'chef', 'package_installer_name': 'chef',
'distributed_system_name': 'openstack' 'adapter_name': 'openstack'
} }
matcher = self.package_matcher.match(**test_match) matcher = self.package_matcher.match(**test_match)
self.assertTrue(matcher) self.assertTrue(matcher)
@ -278,7 +278,7 @@ class TestPackageMatcher(unittest2.TestCase):
def test_installer_unmatch(self): def test_installer_unmatch(self):
test_unmatch = { test_unmatch = {
'package_installer_name': 'dummy', 'package_installer_name': 'dummy',
'distributed_system_name': 'openstack' 'adapter_name': 'openstack'
} }
matcher = self.package_matcher.match(**test_unmatch) matcher = self.package_matcher.match(**test_unmatch)
self.assertFalse(matcher) self.assertFalse(matcher)
@ -286,7 +286,7 @@ class TestPackageMatcher(unittest2.TestCase):
def test_name_unmatch(self): def test_name_unmatch(self):
test_unmatch = { test_unmatch = {
'package_installer_name': 'chef', 'package_installer_name': 'chef',
'distributed_system_name': 'dummy' 'adapter_name': 'dummy'
} }
matcher = self.package_matcher.match(**test_unmatch) matcher = self.package_matcher.match(**test_unmatch)
self.assertFalse(matcher) self.assertFalse(matcher)
@ -294,7 +294,7 @@ class TestPackageMatcher(unittest2.TestCase):
def test_both_unmatch(self): def test_both_unmatch(self):
test_unmatch = { test_unmatch = {
'package_installer_name': 'dummy', 'package_installer_name': 'dummy',
'distributed_system_name': 'dummy' 'adapter_name': 'dummy'
} }
matcher = self.package_matcher.match(**test_unmatch) matcher = self.package_matcher.match(**test_unmatch)
self.assertFalse(matcher) self.assertFalse(matcher)

View File

@ -42,6 +42,7 @@ flags.add('log_format',
flags.add('log_backup_count', type='int', flags.add('log_backup_count', type='int',
help='log backup count', default=setting.DEFAULT_LOGBACKUPCOUNT) help='log backup count', default=setting.DEFAULT_LOGBACKUPCOUNT)
# mapping str setting in flag --loglevel to logging level. # mapping str setting in flag --loglevel to logging level.
LOGLEVEL_MAPPING = { LOGLEVEL_MAPPING = {
'finest': logging.DEBUG - 2, # more detailed log. 'finest': logging.DEBUG - 2, # more detailed log.
@ -53,13 +54,20 @@ LOGLEVEL_MAPPING = {
'critical': logging.CRITICAL, 'critical': logging.CRITICAL,
} }
logging.addLevelName(LOGLEVEL_MAPPING['fine'], 'fine') logging.addLevelName(LOGLEVEL_MAPPING['fine'], 'fine')
logging.addLevelName(LOGLEVEL_MAPPING['finest'], 'finest') logging.addLevelName(LOGLEVEL_MAPPING['finest'], 'finest')
# disable logging when logsetting.init not called # disable logging when logsetting.init not called
logging.getLogger().setLevel(logging.CRITICAL) logging.getLogger().setLevel(logging.CRITICAL)
def getLevelByName(level_name):
"""Get log level by level name."""
return LOGLEVEL_MAPPING[level_name]
def init(): def init():
"""Init loggsetting. It should be called after flags.init.""" """Init loggsetting. It should be called after flags.init."""
loglevel = flags.OPTIONS.loglevel.lower() loglevel = flags.OPTIONS.loglevel.lower()

View File

@ -24,7 +24,7 @@ import os.path
# default setting # default setting
CONFIG_DIR = '/etc/compass' CONFIG_DIR = os.environ.get('COMPASS_CONFIG_DIR', '/etc/compass')
SQLALCHEMY_DATABASE_URI = 'sqlite://' SQLALCHEMY_DATABASE_URI = 'sqlite://'
SQLALCHEMY_DATABASE_POOL_TYPE = 'static' SQLALCHEMY_DATABASE_POOL_TYPE = 'static'
COBBLER_INSTALLATION_LOGDIR = '/var/log/cobbler/anamon' COBBLER_INSTALLATION_LOGDIR = '/var/log/cobbler/anamon'
@ -77,68 +77,29 @@ TEST_CLIENT_NAME = "graceyu"
PROGRESS_UPDATE_PID_FILE = '/var/run/progress_update.pid' PROGRESS_UPDATE_PID_FILE = '/var/run/progress_update.pid'
OS_INSTALLER_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'os_installer')
)
PACKAGE_INSTALLER_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'package_installer')
)
OS_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'os')
)
DISTRIBUTED_SYSTEM_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'distributed_system')
)
ADAPTER_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'adapter')
)
OS_METADATA_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'os_metadata')
)
PACKAGE_METADATA_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'package_metadata')
)
FLAVOR_METADATA_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'flavor_metadata')
)
OS_FIELD_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'os_field')
)
PACKAGE_FIELD_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'package_field')
)
FLAVOR_FIELD_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'flavor_field')
)
ADAPTER_ROLE_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'role')
)
ADAPTER_FLAVOR_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'flavor')
)
VALIDATOR_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'validator')
)
CALLBACK_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'callback')
)
TMPL_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'templates')
)
MACHINE_LIST_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'machine_list')
)
PROGRESS_CALCULATOR_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'progress_calculator')
)
OS_MAPPING_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'os_mapping')
)
FLAVOR_MAPPING_DIR = lazypy.delay(
lambda: os.path.join(CONFIG_DIR, 'flavor_mapping')
)
PROXY_URL_PREFIX = 'http://10.145.81.205:5000' PROXY_URL_PREFIX = 'http://10.145.81.205:5000'
OS_INSTALLER_DIR = ''
PACKAGE_INSTALLER_DIR = ''
OS_DIR = ''
ADAPTER_DIR = ''
OS_METADATA_DIR = ''
PACKAGE_METADATA_DIR = ''
FLAVOR_METADATA_DIR = ''
OS_FIELD_DIR = ''
PACKAGE_FIELD_DIR = ''
FLAVOR_FIELD_DIR = ''
ADAPTER_ROLE_DIR = ''
ADAPTER_FLAVOR_DIR = ''
VALIDATOR_DIR = ''
CALLBACK_DIR = ''
TMPL_DIR = ''
MACHINE_LIST_DIR = ''
PROGRESS_CALCULATOR_DIR = ''
OS_MAPPING_DIR = ''
FLAVOR_MAPPING_DIR = ''
if ( if (
'COMPASS_IGNORE_SETTING' in os.environ and 'COMPASS_IGNORE_SETTING' in os.environ and
os.environ['COMPASS_IGNORE_SETTING'] os.environ['COMPASS_IGNORE_SETTING']
@ -156,3 +117,60 @@ else:
except Exception as error: except Exception as error:
logging.exception(error) logging.exception(error)
raise error raise error
if not OS_INSTALLER_DIR:
OS_INSTALLER_DIR = os.path.join(CONFIG_DIR, 'os_installer')
if not PACKAGE_INSTALLER_DIR:
PACKAGE_INSTALLER_DIR = os.path.join(CONFIG_DIR, 'package_installer')
if not OS_DIR:
OS_DIR = os.path.join(CONFIG_DIR, 'os')
if not ADAPTER_DIR:
ADAPTER_DIR = os.path.join(CONFIG_DIR, 'adapter')
if not OS_METADATA_DIR:
OS_METADATA_DIR = os.path.join(CONFIG_DIR, 'os_metadata')
if not PACKAGE_METADATA_DIR:
PACKAGE_METADATA_DIR = os.path.join(CONFIG_DIR, 'package_metadata')
if not FLAVOR_METADATA_DIR:
FLAVOR_METADATA_DIR = os.path.join(CONFIG_DIR, 'flavor_metadata')
if not OS_FIELD_DIR:
OS_FIELD_DIR = os.path.join(CONFIG_DIR, 'os_field')
if not PACKAGE_FIELD_DIR:
PACKAGE_FIELD_DIR = os.path.join(CONFIG_DIR, 'package_field')
if not FLAVOR_FIELD_DIR:
FLAVOR_FIELD_DIR = os.path.join(CONFIG_DIR, 'flavor_field')
if not ADAPTER_ROLE_DIR:
ADAPTER_ROLE_DIR = os.path.join(CONFIG_DIR, 'role')
if not ADAPTER_FLAVOR_DIR:
ADAPTER_FLAVOR_DIR = os.path.join(CONFIG_DIR, 'flavor')
if not VALIDATOR_DIR:
VALIDATOR_DIR = os.path.join(CONFIG_DIR, 'validator')
if not CALLBACK_DIR:
CALLBACK_DIR = os.path.join(CONFIG_DIR, 'callback')
if not TMPL_DIR:
TMPL_DIR = os.path.join(CONFIG_DIR, 'templates')
if not MACHINE_LIST_DIR:
MACHINE_LIST_DIR = os.path.join(CONFIG_DIR, 'machine_list')
if not PROGRESS_CALCULATOR_DIR:
PROGRESS_CALCULATOR_DIR = os.path.join(CONFIG_DIR, 'progress_calculator')
if not OS_MAPPING_DIR:
OS_MAPPING_DIR = os.path.join(CONFIG_DIR, 'os_mapping')
if not FLAVOR_MAPPING_DIR:
FLAVOR_MAPPING_DIR = os.path.join(CONFIG_DIR, 'flavor_mapping')

View File

@ -24,10 +24,32 @@ import os
import os.path import os.path
import re import re
import sys import sys
import warnings
def deprecated(func):
"""This is a decorator which can be used to mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
def new_func(*args, **kwargs):
warnings.warn(
"Call to deprecated function %s." % func.__name__,
category=DeprecationWarning
)
return func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
def parse_datetime(date_time, exception_class=Exception): def parse_datetime(date_time, exception_class=Exception):
"""Parse datetime str to get datetime object.""" """Parse datetime str to get datetime object.
The date time format is %Y-%m-%d %H:%M:%S
"""
try: try:
return datetime.datetime.strptime( return datetime.datetime.strptime(
date_time, '%Y-%m-%d %H:%M:%S' date_time, '%Y-%m-%d %H:%M:%S'
@ -40,7 +62,10 @@ def parse_datetime(date_time, exception_class=Exception):
def parse_datetime_range(date_time_range, exception_class=Exception): def parse_datetime_range(date_time_range, exception_class=Exception):
"""parse datetime range str to pair of datetime objects.""" """parse datetime range str to pair of datetime objects.
The date time range format is %Y-%m-%d %H:%M:%S,%Y-%m-%d %H:%M:%S
"""
try: try:
start, end = date_time_range.split(',') start, end = date_time_range.split(',')
except Exception as error: except Exception as error:
@ -60,7 +85,11 @@ def parse_datetime_range(date_time_range, exception_class=Exception):
def parse_request_arg_dict(arg, exception_class=Exception): def parse_request_arg_dict(arg, exception_class=Exception):
"""parse string to dict.""" """parse string to dict.
The str is formatted like a=b;c=d and parsed to
{'a': 'b', 'c': 'd'}
"""
arg_dict = {} arg_dict = {}
arg_pairs = arg.split(';') arg_pairs = arg.split(';')
for arg_pair in arg_pairs: for arg_pair in arg_pairs:
@ -105,6 +134,16 @@ def merge_dict(lhs, rhs, override=True):
return lhs return lhs
def recursive_merge_dict(name, all_dicts, parents):
"""Recursively merge parent dict into base dict."""
parent_name = parents.get(name, None)
base_dict = all_dicts.get(name, {})
if not parent_name:
return base_dict
merged = recursive_merge_dict(parent_name, all_dicts, parents)
return merge_dict(base_dict, merged, override=False)
def encrypt(value, crypt_method=None): def encrypt(value, crypt_method=None):
"""Get encrypted value.""" """Get encrypted value."""
if not crypt_method: if not crypt_method:
@ -129,6 +168,12 @@ def encrypt(value, crypt_method=None):
def parse_time_interval(time_interval_str): def parse_time_interval(time_interval_str):
"""parse string of time interval to time interval.
supported time interval unit: ['d', 'w', 'h', 'm', 's']
Examples:
time_interval_str: '3d 2h' time interval to 3 days and 2 hours.
"""
if not time_interval_str: if not time_interval_str:
return 0 return 0
@ -171,10 +216,11 @@ def load_configs(
config_dir, config_name_suffix='.conf', config_dir, config_name_suffix='.conf',
env_globals={}, env_locals={} env_globals={}, env_locals={}
): ):
"""Load configurations from config dir."""
configs = [] configs = []
config_dir = str(config_dir) config_dir = str(config_dir)
if not os.path.exists(config_dir): if not os.path.exists(config_dir):
logging.debug('path %s does not exist', config_dir) logging.error('path %s does not exist', config_dir)
return configs return configs
for component in os.listdir(config_dir): for component in os.listdir(config_dir):
if not component.endswith(config_name_suffix): if not component.endswith(config_name_suffix):
@ -194,22 +240,6 @@ def load_configs(
return configs return configs
def is_instance(instance, expected_types):
"""Check instance type is in one of expected types.
:param instance: instance to check the type.
:param expected_types: types to check if instance type is in them.
:type expected_types: list of type
:returns: True if instance type is in expect_types.
"""
for expected_type in expected_types:
if isinstance(instance, expected_type):
return True
return False
def pretty_print(*contents): def pretty_print(*contents):
"""pretty print contents.""" """pretty print contents."""
if len(contents) == 0: if len(contents) == 0:

View File

@ -1,3 +1,2 @@
NAME = 'ceph' NAME = 'ceph'
PARENT = 'general' PARENT = 'general'
DISTRIBUTED_SYSTEM = 'ceph'

View File

@ -1,5 +1,5 @@
NAME = 'ceph_firefly' NAME = 'ceph_firefly'
DSPLAY_NAME = 'Ceph Firefly' DISPLAY_NAME = 'Ceph Firefly'
PARENT = 'ceph' PARENT = 'ceph'
PACKAGE_INSTALLER = 'chef_installer' PACKAGE_INSTALLER = 'chef_installer'
OS_INSTALLER = 'cobbler' OS_INSTALLER = 'cobbler'

View File

@ -1,7 +1,6 @@
NAME = 'ceph_openstack_icehouse' NAME = 'ceph_openstack_icehouse'
DISPLAY_NAME = 'Ceph + OpenStack Icehouse' DISPLAY_NAME = 'Ceph + OpenStack Icehouse'
PARENT = 'openstack' PARENT = 'openstack'
DISTRIBUTED_SYSTEM = 'openstack_ceph'
PACKAGE_INSTALLER = 'chef_installer' PACKAGE_INSTALLER = 'chef_installer'
OS_INSTALLER = 'cobbler' OS_INSTALLER = 'cobbler'
SUPPORTED_OS_PATTERNS = ['(?i)centos-6\.5.*', '(?i)ubuntu-12\.04.*'] SUPPORTED_OS_PATTERNS = ['(?i)centos-6\.5.*', '(?i)ubuntu-12\.04.*']

View File

@ -1,3 +1,2 @@
NAME = 'openstack' NAME = 'openstack'
PARENT = 'general' PARENT = 'general'
DISTRIBUTED_SYSTEM = 'openstack'

View File

@ -1,4 +1,6 @@
allinone = { ADAPTER = 'openstack-icehouse'
FLAVOR = 'allinone'
CONFIG_MAPPING = {
"mapped_name": "flavor_config", "mapped_name": "flavor_config",
"mapped_children": [{ "mapped_children": [{
"security": { "security": {

View File

@ -1,4 +1,6 @@
ceph_firefly = { ADAPTER = 'ceph_firefly'
FLAVOR = 'ceph_firefly'
CONFIG_MAPPING = {
"mapped_name": "flavor_config", "mapped_name": "flavor_config",
"mapped_children": [{ "mapped_children": [{
"ceph_config": { "ceph_config": {

View File

@ -1,4 +1,6 @@
ceph_openstack_multinodes = { ADAPTER = 'ceph_openstack_icehouse'
FLAVOR = 'ceph-openstack-multinodes'
CONFIG_MAPPING = {
"mapped_name": "flavor_config", "mapped_name": "flavor_config",
"mapped_children": [{ "mapped_children": [{
"security": { "security": {

View File

@ -1,4 +1,6 @@
ceph_openstack_single_controller = { ADAPTER = 'ceph_openstack_icehouse'
FLAVOR = 'ceph-openstack-single-controller'
CONFIG_MAPPING = {
"mapped_name": "flavor_config", "mapped_name": "flavor_config",
"mapped_children": [{ "mapped_children": [{
"security": { "security": {

View File

@ -1,4 +1,6 @@
HA_multinodes = { ADAPTER = 'openstack-icehouse'
FLAVOR = 'HA-multinodes'
CONFIG_MAPPING = {
"mapped_name": "flavor_config", "mapped_name": "flavor_config",
"mapped_children": [{ "mapped_children": [{
"security": { "security": {

View File

@ -1,4 +1,6 @@
multinodes = { ADAPTER = 'openstack-icehouse'
FLAVOR = 'multinodes'
CONFIG_MAPPING = {
"mapped_name": "flavor_config", "mapped_name": "flavor_config",
"mapped_children": [{ "mapped_children": [{
"security": { "security": {

View File

@ -1,4 +1,6 @@
single_contoller_multi_compute = { ADAPTER = 'openstack-icehouse'
FLAVOR = 'single-contoller-multi-compute'
CONFIG_MAPPING = {
"mapped_name": "flavor_config", "mapped_name": "flavor_config",
"mapped_children": [{ "mapped_children": [{
"security": { "security": {

View File

@ -1,3 +1,4 @@
ADAPTER = 'openstack_icehouse'
FLAVOR = 'HA-multinodes' FLAVOR = 'HA-multinodes'
METADATA = { METADATA = {
'ha_proxy': { 'ha_proxy': {

View File

@ -1,2 +1,3 @@
ADAPTER = 'openstack_icehouse'
FLAVOR = 'allinone' FLAVOR = 'allinone'
METADATA = {} METADATA = {}

View File

@ -1,2 +1,3 @@
ADAPTER = 'ceph_firefly'
FLAVOR = 'ceph_firefly' FLAVOR = 'ceph_firefly'
METADATA = {} METADATA = {}

View File

@ -1,2 +1,3 @@
ADAPTER = 'ceph_openstack_icehouse'
FLAVOR = 'ceph_openstack_multinodes' FLAVOR = 'ceph_openstack_multinodes'
METADATA = {} METADATA = {}

View File

@ -1,2 +1,3 @@
ADAPTER = 'ceph_openstack_icehouse'
FLAVOR = 'ceph_openstack_single_controller' FLAVOR = 'ceph_openstack_single_controller'
METADATA = {} METADATA = {}

View File

@ -1,2 +1,3 @@
ADAPTER = 'openstack_icehouse'
FLAVOR = 'multinodes' FLAVOR = 'multinodes'
METADATA = {} METADATA = {}

View File

@ -1,2 +1,3 @@
ADAPTER = 'openstack_icehouse'
FLAVOR = 'single-contoller-multi-compute' FLAVOR = 'single-contoller-multi-compute'
METADATA = {} METADATA = {}

View File

@ -1,4 +1,5 @@
OS_CONFIG_MAPPING = { OS = 'general'
CONFIG_MAPPING = {
"mapped_name": "os_global_config", "mapped_name": "os_global_config",
"mapped_children": [{ "mapped_children": [{
"server_credentials":{ "server_credentials":{

View File

@ -1,10 +1,3 @@
from compass.log_analyzor.adapter_matcher import AdapterItemMatcher
from compass.log_analyzor.file_matcher import FileMatcher
from compass.log_analyzor.file_matcher import FileReaderFactory
from compass.log_analyzor.line_matcher import IncrementalProgress
from compass.log_analyzor.line_matcher import LineMatcher
OS_INSTALLER_CONFIGURATIONS = { OS_INSTALLER_CONFIGURATIONS = {
'cobbler': { 'cobbler': {
'Ubuntu': AdapterItemMatcher( 'Ubuntu': AdapterItemMatcher(
@ -503,10 +496,26 @@ OS_INSTALLER_CONFIGURATIONS = {
} }
} }
OS_LOG_CONFIGURATIONS = [{
'os_installer_name': 'cobbler',
'os_pattern': 'CentOS-6.*',
'item_matcher': OS_INSTALLER_CONFIGURATIONS['cobbler']['CentOS6'],
'logdir': setting.INSTALLATION_LOGDIR['CobblerInstaller']
}, {
'os_installer_name': 'cobbler',
'os_pattern': 'CentOS-7.*',
'item_matcher': OS_INSTALLER_CONFIGURATIONS['cobbler']['CentOS7'],
'logdir': setting.INSTALLATION_LOGDIR['CobblerInstaller']
}, {
'os_installer_name': 'cobbler',
'os_pattern': 'Ubuntu.*',
'item_matcher': OS_INSTALLER_CONFIGURATIONS['cobbler']['Ubuntu'],
'logdir': setting.INSTALLATION_LOGDIR['CobblerInstaller']
}]
PACKAGE_INSTALLER_CONFIGURATIONS = { PACKAGE_INSTALLER_CONFIGURATIONS = {
'chef_installer': { 'chef_installer': {
'openstack': AdapterItemMatcher( 'default': AdapterItemMatcher(
file_matchers=[ file_matchers=[
FileMatcher( FileMatcher(
filename='chef-client.log', filename='chef-client.log',
@ -538,3 +547,12 @@ PACKAGE_INSTALLER_CONFIGURATIONS = {
), ),
} }
} }
ADAPTER_LOG_CONFIGURATIONS = [{
'package_installer_name': 'chef_installer',
'adapter_pattern': '.*',
'item_matcher': PACKAGE_INSTALLER_CONFIGURATIONS['chef_installer']['default'],
'logdir': setting.INSTALLATION_LOGDIR['ChefInstaller']
}]

View File

@ -59,11 +59,9 @@ ROLES = [{
}, { }, {
'role': 'os-ha', 'role': 'os-ha',
'display_name': 'ha proxy node', 'display_name': 'ha proxy node',
'description': 'ha proxy node', 'description': 'ha proxy node'
'optional': True
}, { }, {
'role': 'allinone-compute', 'role': 'allinone-compute',
'display_name': 'all in one compute', 'display_name': 'all in one compute',
'description': 'all in one compute', 'description': 'all in one compute'
'optional': True
}] }]

View File

@ -5,7 +5,7 @@ echo "Installing Ansible"
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $DIR/install.conf source $DIR/install.conf
if [ -f $DIR/env.conf ]; then if [ -f $DIR/env.conf ]; then
source env.conf source $DIR/env.conf
else else
echo "failed to load environment" echo "failed to load environment"
exit 1 exit 1

View File

@ -27,7 +27,7 @@ fi
echo "reconfigure chef server" echo "reconfigure chef server"
# configure chef-server # configure chef-server
sudo chef-server-ctl cleanse sudo chef-server-ctl cleanse
mkdir -p /etc/chef-server sudo mkdir -p /etc/chef-server
sudo cp -rn /etc/chef-server/chef-server.rb /root/backup/chef/ sudo cp -rn /etc/chef-server/chef-server.rb /root/backup/chef/
sudo rm -f /etc/chef-server/chef-server.rb sudo rm -f /etc/chef-server/chef-server.rb
sudo cp -rf $COMPASSDIR/misc/chef-server/chef-server.rb /etc/chef-server/chef-server.rb sudo cp -rf $COMPASSDIR/misc/chef-server/chef-server.rb /etc/chef-server/chef-server.rb
@ -60,10 +60,10 @@ if [ ! -f /etc/chef-server/chef-validator.pem ]; then
fi fi
sudo knife configure -y -i --defaults -r ~/chef-repo -s https://$IPADDR:443 -u $USER --admin-client-name admin --admin-client-key /etc/chef-server/admin.pem --validation-client-name chef-validator --validation-key /etc/chef-server/chef-validator.pem <<EOF sudo knife configure -y -i --defaults -r ~/chef-repo -s https://$IPADDR:443 -u $USER --admin-client-name admin --admin-client-key /etc/chef-server/admin.pem --validation-client-name chef-validator --validation-key /etc/chef-server/chef-validator.pem <<EOF
$CHEF_PASSWORD $CHEF_PASSWD
EOF EOF
sudo sed -i "/node_name/c\node_name \'admin\'" /$USER/.chef/knife.rb sudo sed -i "/node_name/c\node_name \'admin\'" /$HOME/.chef/knife.rb
sudo sed -i "/client_key/c\client_key \'\/etc\/chef-server\/admin.pem\'" /$USER/.chef/knife.rb sudo sed -i "/client_key/c\client_key \'\/etc\/chef-server\/admin.pem\'" /$HOME/.chef/knife.rb
sudo rm -rf /var/chef sudo rm -rf /var/chef

View File

@ -17,7 +17,6 @@ export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24"
export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*' export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*' #export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*'
export ADAPTER_NAME="openstack_juno" export ADAPTER_NAME="openstack_juno"
export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
export ADAPTER_FLAVOR_PATTERN="allinone" export ADAPTER_FLAVOR_PATTERN="allinone"
export HOST_ROLES="host1=allinone-compute" export HOST_ROLES="host1=allinone-compute"
export DEFAULT_ROLES="allinone" export DEFAULT_ROLES="allinone"

View File

@ -17,7 +17,6 @@ export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24"
export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*' export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*' #export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*'
export ADAPTER_NAME="openstack_juno" export ADAPTER_NAME="openstack_juno"
export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
export ADAPTER_FLAVOR_PATTERN="single-controller" export ADAPTER_FLAVOR_PATTERN="single-controller"
export HOST_ROLES="host1=controller;host2=network;host3=compute;host4=storage" export HOST_ROLES="host1=controller;host2=network;host3=compute;host4=storage"
export DEFAULT_ROLES="controller" export DEFAULT_ROLES="controller"

View File

@ -17,7 +17,6 @@ export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24"
export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*' export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*' #export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*'
export ADAPTER_NAME="os_only" export ADAPTER_NAME="os_only"
export ADAPTER_TARGET_SYSTEM_PATTERN=""
export ADAPTER_FLAVOR_PATTERN="" export ADAPTER_FLAVOR_PATTERN=""
export PROXY="" export PROXY=""
export IGNORE_PROXY="" export IGNORE_PROXY=""

Some files were not shown because too many files have changed in this diff Show More