diff --git a/etc/valet/api/config.py b/etc/valet/api/config.py index 65a6c88..b0e48e1 100644 --- a/etc/valet/api/config.py +++ b/etc/valet/api/config.py @@ -15,9 +15,10 @@ from oslo_config import cfg from pecan.hooks import TransactionHook -from valet.api.db import models -from valet.api.common.hooks import NotFoundHook, MessageNotificationHook +from valet.api.common.hooks import MessageNotificationHook +from valet.api.common.hooks import NotFoundHook +from valet.api.db import models CONF = cfg.CONF diff --git a/etc/valet/openstack/notification_listener/notification_listener.py b/etc/valet/openstack/notification_listener/notification_listener.py index 6fdc2b2..764fd89 100644 --- a/etc/valet/openstack/notification_listener/notification_listener.py +++ b/etc/valet/openstack/notification_listener/notification_listener.py @@ -40,6 +40,7 @@ transport = oslo_messaging.get_transport(cfg.CONF) targets = [oslo_messaging.Target(topic='notifications')] endpoints = [NotificationEndpoint()] -server = oslo_messaging.get_notification_listener(transport, targets, endpoints) +server = oslo_messaging.get_notification_listener( + transport, targets, endpoints) server.start() server.wait() diff --git a/plugins/setup.py b/plugins/setup.py index a68a83c..47e505d 100644 --- a/plugins/setup.py +++ b/plugins/setup.py @@ -1,21 +1,15 @@ -# -*- encoding: utf-8 -*- # -# Copyright (c) 2014-2016 AT&T +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 # -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing permissions and -# limitations under the License. - - -'''Setup''' +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. import setuptools diff --git a/plugins/valet_plugins/common/valet_api.py b/plugins/valet_plugins/common/valet_api.py index 4130f98..7bc245c 100644 --- a/plugins/valet_plugins/common/valet_api.py +++ b/plugins/valet_plugins/common/valet_api.py @@ -1,18 +1,16 @@ -# -*- encoding: utf-8 -*- # -# Copyright (c) 2014-2016 AT&T +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 # -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + '''Valet API Wrapper''' @@ -44,7 +42,8 @@ def _exception(exc, exc_info, req): # FIXME(GJ): if Valet returns error error = response.get('error') msg = "%(explanation)s (valet-api: %(message)s)" % { - 'explanation': response.get('explanation', _('No remediation available')), + 'explanation': response.get('explanation', + _('No remediation available')), 'message': error.get('message', _('Unknown error')) } LOG.error("Response with error: " + msg) @@ -52,7 +51,9 @@ def _exception(exc, exc_info, req): else: # TODO(JD): Re-evaluate if this clause is necessary. exc_class, exc, traceback = exc_info # pylint: disable=W0612 - msg = _("%(exc)s for %(method)s %(url)s with body %(body)s") % {'exc': exc, 'method': exc.request.method, 'url': exc.request.url, 'body': exc.request.body} + msg = (_("%(exc)s for %(method)s %(url)s with body %(body)s") % + {'exc': exc, 'method': exc.request.method, + 'url': exc.request.url, 'body': exc.request.body}) LOG.error("Response is none: " + msg) return "error" @@ -89,7 +90,10 @@ class ValetAPIWrapper(object): raise # exception.Error(_('API Endpoint not defined.')) def _get_timeout(self): - '''Returns Valet plugin API request timeout tuple (conn_timeout, read_timeout)''' + '''Returns Valet plugin API request timeout. + + Returns the timeout values tuple (conn_timeout, read_timeout) + ''' read_timeout = 600 try: opt = getattr(cfg.CONF, self.opt_group_str) @@ -97,18 +101,24 @@ class ValetAPIWrapper(object): read_timeout = opt[self.opt_read_timeout] except Exception: pass - # Timeout accepts tupple on 'requests' version 2.4.0 and above - adding *connect* timeouts + # Timeout accepts tupple on 'requests' version 2.4.0 and above - + # adding *connect* timeouts # return conn_timeout, read_timeout return read_timeout def _register_opts(self): '''Register options''' opts = [] - option = cfg.StrOpt(self.opt_name_str, default=None, help=_('Valet API endpoint')) + option = cfg.StrOpt( + self.opt_name_str, default=None, help=_('Valet API endpoint')) opts.append(option) - option = cfg.IntOpt(self.opt_conn_timeout, default=3, help=_('Valet Plugin Connect Timeout')) + option = cfg.IntOpt( + self.opt_conn_timeout, default=3, + help=_('Valet Plugin Connect Timeout')) opts.append(option) - option = cfg.IntOpt(self.opt_read_timeout, default=5, help=_('Valet Plugin Read Timeout')) + option = cfg.IntOpt( + self.opt_read_timeout, default=5, + help=_('Valet Plugin Read Timeout')) opts.append(option) opt_group = cfg.OptGroup(self.opt_group_str) @@ -116,7 +126,7 @@ class ValetAPIWrapper(object): cfg.CONF.register_opts(opts, group=opt_group) # TODO(JD): Keep stack param for now. We may need it again. - def plans_create(self, stack, plan, auth_token=None): # pylint: disable=W0613 + def plans_create(self, stack, plan, auth_token=None): '''Create a plan''' response = None try: @@ -125,11 +135,12 @@ class ValetAPIWrapper(object): url = self._api_endpoint() + '/plans/' payload = json.dumps(plan) self.headers['X-Auth-Token'] = auth_token - req = requests.post(url, data=payload, headers=self.headers, timeout=timeout) + req = requests.post( + url, data=payload, headers=self.headers, timeout=timeout) req.raise_for_status() response = json.loads(req.text) - except (requests.exceptions.HTTPError, requests.exceptions.Timeout, requests.exceptions.ConnectionError)\ - as exc: + except (requests.exceptions.HTTPError, requests.exceptions.Timeout, + requests.exceptions.ConnectionError) as exc: return _exception(exc, sys.exc_info(), req) except Exception as e: LOG.error("Exception (at plans_create) is: %s" % e) @@ -145,8 +156,8 @@ class ValetAPIWrapper(object): url = self._api_endpoint() + '/plans/' + stack.id self.headers['X-Auth-Token'] = auth_token req = requests.delete(url, headers=self.headers, timeout=timeout) - except (requests.exceptions.HTTPError, requests.exceptions.Timeout, requests.exceptions.ConnectionError)\ - as exc: + except (requests.exceptions.HTTPError, requests.exceptions.Timeout, + requests.exceptions.ConnectionError) as exc: return _exception(exc, sys.exc_info(), req) except Exception as e: LOG.error("Exception (plans_delete) is: %s" % e) @@ -167,7 +178,8 @@ class ValetAPIWrapper(object): "resource_id": res_id } payload = json.dumps(kwargs) - req = requests.post(url, data=payload, headers=self.headers, timeout=timeout) + req = requests.post( + url, data=payload, headers=self.headers, timeout=timeout) else: req = requests.get(url, headers=self.headers, timeout=timeout) @@ -175,8 +187,8 @@ class ValetAPIWrapper(object): # req.raise_for_status() response = json.loads(req.text) - except (requests.exceptions.HTTPError, requests.exceptions.Timeout, requests.exceptions.ConnectionError)\ - as exc: + except (requests.exceptions.HTTPError, requests.exceptions.Timeout, + requests.exceptions.ConnectionError) as exc: return _exception(exc, sys.exc_info(), req) except Exception as e: # pylint: disable=W0702 LOG.error("Exception (placement) is: %s" % e) diff --git a/plugins/valet_plugins/heat/GroupAssignment.py b/plugins/valet_plugins/heat/GroupAssignment.py index 2f4cf88..b330886 100644 --- a/plugins/valet_plugins/heat/GroupAssignment.py +++ b/plugins/valet_plugins/heat/GroupAssignment.py @@ -1,39 +1,37 @@ -# -*- encoding: utf-8 -*- # -# Copyright (c) 2014-2016 AT&T +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 # -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing permissions and -# limitations under the License. - -'''GroupAssignment Heat Resource Plugin''' +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. from heat.common.i18n import _ from heat.engine import constraints from heat.engine import properties from heat.engine import resource - from oslo_log import log as logging LOG = logging.getLogger(__name__) class GroupAssignment(resource.Resource): - ''' A Group Assignment describes one or more resources assigned to a particular type of group. + '''Group assignment + + A Group Assignment describes one or more resources assigned to a particular + type of group. Assignments can reference other assignments, so long as + there are no circular references. - Assignments can reference other assignments, so long as there are no circular references. There are three types of groups: affinity, diversity, and exclusivity. Exclusivity groups have a unique name, assigned through Valet. - This resource is purely informational in nature and makes no changes to heat, nova, or cinder. + This resource is purely informational in nature and makes no changes to + heat, nova, or cinder. The Valet Heat Lifecycle Plugin passes this information to the optimizer. ''' @@ -91,7 +89,7 @@ class GroupAssignment(resource.Resource): '''Create resource''' self.resource_id_set(self.physical_resource_name()) - def handle_update(self, json_snippet, templ_diff, prop_diff): # pylint: disable=W0613 + def handle_update(self, json_snippet, templ_diff, prop_diff): '''Update resource''' self.resource_id_set(self.physical_resource_name()) diff --git a/plugins/valet_plugins/plugins/heat/plugins.py b/plugins/valet_plugins/plugins/heat/plugins.py index d7c37c1..c01a393 100644 --- a/plugins/valet_plugins/plugins/heat/plugins.py +++ b/plugins/valet_plugins/plugins/heat/plugins.py @@ -1,37 +1,31 @@ -# -*- encoding: utf-8 -*- +# Copyright 2014-2017 AT&T Intellectual Property # -# Copyright (c) 2014-2016 AT&T +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 # -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing permissions and -# limitations under the License. - -'''Valet Plugins for Heat''' +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import string +import uuid from heat.engine import lifecycle_plugin - -from valet_plugins.common import valet_api - from oslo_config import cfg from oslo_log import log as logging -import string -import uuid +from valet_plugins.common import valet_api CONF = cfg.CONF LOG = logging.getLogger(__name__) def validate_uuid4(uuid_string): - ''' Validate that a UUID string is in fact a valid uuid4. + '''Validate that a UUID string is in fact a valid uuid4. Happily, the uuid module does the actual checking for us. It is vital that the 'version' kwarg be passed to the @@ -55,7 +49,7 @@ def validate_uuid4(uuid_string): class ValetLifecyclePlugin(lifecycle_plugin.LifecyclePlugin): - ''' Base class for pre-op and post-op work on a stack. + '''Base class for pre-op and post-op work on a stack. Implementations should extend this class and override the methods. ''' @@ -68,9 +62,10 @@ class ValetLifecyclePlugin(lifecycle_plugin.LifecyclePlugin): self.hints_enabled = cfg.CONF.stack_scheduler_hints def _parse_stack_preview(self, dest, preview): - ''' Walk the preview list (possibly nested) + '''Walk the preview list (possibly nested) - extracting parsed template dicts and storing modified versions in a flat dict. + extracting parsed template dicts and storing modified + versions in a flat dict. ''' # The preview is either a list or not. if not isinstance(preview, list): @@ -100,7 +95,7 @@ class ValetLifecyclePlugin(lifecycle_plugin.LifecyclePlugin): self._parse_stack_preview(dest, item) def do_pre_op(self, cnxt, stack, current_stack=None, action=None): - ''' Method to be run by heat before stack operations. ''' + '''Method to be run by heat before stack operations. ''' if not self.hints_enabled or stack.status != 'IN_PROGRESS': return @@ -131,9 +126,9 @@ class ValetLifecyclePlugin(lifecycle_plugin.LifecyclePlugin): self.api.plans_create(stack, plan, auth_token=cnxt.auth_token) - def do_post_op(self, cnxt, stack, current_stack=None, action=None, # pylint: disable=R0913 + def do_post_op(self, cnxt, stack, current_stack=None, action=None, is_stack_failure=False): - ''' Method to be run by heat after stack operations, including failures. + '''Method to be run by heat after stack operations, including failures. On failure to execute all the registered pre_ops, this method will be called if and only if the corresponding pre_op was successfully called. @@ -143,7 +138,7 @@ class ValetLifecyclePlugin(lifecycle_plugin.LifecyclePlugin): pass def get_ordinal(self): - ''' An ordinal used to order class instances for pre and post operation execution. + '''An ordinal used to order instances for pre/post operation execution. The values returned by get_ordinal are used to create a partial order for pre and post operation method invocations. The default ordinal diff --git a/plugins/valet_plugins/plugins/nova/valet_filter.py b/plugins/valet_plugins/plugins/nova/valet_filter.py index 5b75af0..2c0753c 100644 --- a/plugins/valet_plugins/plugins/nova/valet_filter.py +++ b/plugins/valet_plugins/plugins/nova/valet_filter.py @@ -1,33 +1,28 @@ -# -*- encoding: utf-8 -*- +# Copyright 2014-2017 AT&T Intellectual Property # -# Copyright (c) 2014-2016 AT&T +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 # -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing permissions and -# limitations under the License. - -'''Valet Nova Scheduler Filter''' +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time from keystoneclient.v2_0 import client - from nova.i18n import _ -from nova.i18n import _LI, _LW, _LE +from nova.i18n import _LE +from nova.i18n import _LI +from nova.i18n import _LW from nova.scheduler import filters - -from valet_plugins.common import valet_api - from oslo_config import cfg from oslo_log import log as logging -import time +from valet_plugins.common import valet_api CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -80,16 +75,23 @@ class ValetFilter(filters.BaseHostFilter): def _register_opts(self): '''Register Options''' opts = [] - option = cfg.StrOpt(self.opt_failure_mode_str, choices=['reject', 'yield'], default='reject', - help=_('Mode to operate in if Valet planning fails for any reason.')) + option = cfg.StrOpt( + self.opt_failure_mode_str, + choices=['reject', 'yield'], default='reject', + help=_('Mode to operate in if Valet planning fails.')) opts.append(option) - option = cfg.StrOpt(self.opt_project_name_str, default=None, help=_('Valet Project Name')) + option = cfg.StrOpt(self.opt_project_name_str, + default=None, help=_('Valet Project Name')) opts.append(option) - option = cfg.StrOpt(self.opt_username_str, default=None, help=_('Valet Username')) + option = cfg.StrOpt(self.opt_username_str, + default=None, help=_('Valet Username')) opts.append(option) - option = cfg.StrOpt(self.opt_password_str, default=None, help=_('Valet Password')) + option = cfg.StrOpt(self.opt_password_str, + default=None, help=_('Valet Password')) opts.append(option) - option = cfg.StrOpt(self.opt_auth_uri_str, default=None, help=_('Keystone Authorization API Endpoint')) + option = cfg.StrOpt(self.opt_auth_uri_str, + default=None, + help=_('Keystone Authorization API Endpoint')) opts.append(option) opt_group = cfg.OptGroup(self.opt_group_str) @@ -127,7 +129,8 @@ class ValetFilter(filters.BaseHostFilter): except Exception as ex: failed = ex if failed: - LOG.warn("Failed to filter the hosts, failure mode is %s" % failure_mode) + msg = _LW("Failed to filter the hosts, failure mode is %s") + LOG.warn(msg % failure_mode) if failure_mode == 'yield': LOG.warn(failed) yield_all = True @@ -135,7 +138,9 @@ class ValetFilter(filters.BaseHostFilter): LOG.error(failed) # if not filter_properties.get(hints_key, {}).has_key(orch_id_key): elif orch_id_key not in filter_properties.get(hints_key, {}): - LOG.info(_LW("Valet: Heat Stack Lifecycle Scheduler Hints not found. Performing ad-hoc placement.")) + msg = _LW("Valet: Heat Stack Lifecycle Scheduler Hints not found. " + "Performing ad-hoc placement.") + LOG.info(msg) ad_hoc = True # We'll need the flavor. @@ -175,14 +180,18 @@ class ValetFilter(filters.BaseHostFilter): response = None while count < self.retries: try: - response = self.api.plans_create(None, plan, auth_token=self._auth_token) + response = self.api.plans_create( + None, plan, auth_token=self._auth_token) except Exception: # TODO(JD): Get context from exception - LOG.error(_LE("Raise exception for ad hoc placement request.")) + msg = _LE("Raise exception for ad hoc placement request.") + LOG.error(msg) response = None if response is None: count += 1 - LOG.warn("Valet conn error for plan_create, Retry " + str(count) + " for stack = " + res_id) + msg = ("Valet conn error for plan_create Retry {0} " + "for stack = {1}.") + LOG.warn(msg.format(str(count), res_id)) time.sleep(self.interval) else: break @@ -196,9 +205,13 @@ class ValetFilter(filters.BaseHostFilter): location = placement['location'] if not location: - LOG.error(_LE("Valet ad-hoc placement unknown for resource id %s.") % res_id) + msg = _LE("Valet ad-hoc placement unknown " + "for resource id {0}.") + LOG.error(msg.format(res_id)) if failure_mode == 'yield': - LOG.warn(_LW("Valet will yield to Nova for placement decisions.")) + msg = _LW("Valet will yield to Nova for " + "placement decisions.") + LOG.warn(msg) yield_all = True else: yield_all = False @@ -209,14 +222,18 @@ class ValetFilter(filters.BaseHostFilter): response = None while count < self.retries: try: - response = self.api.placement(orch_id, res_id, hosts=hosts, auth_token=self._auth_token) + response = self.api.placement( + orch_id, res_id, hosts=hosts, + auth_token=self._auth_token) except Exception: LOG.error(_LW("Raise exception for placement request.")) response = None if response is None: count += 1 - LOG.warn("Valet conn error for placement Retry " + str(count) + " for stack = " + orch_id) + msg = _LW("Valet conn error for placement Retry {0} " + "for stack = {1}.") + LOG.warn(msg.format(str(count), orch_id)) time.sleep(self.interval) else: break @@ -228,9 +245,13 @@ class ValetFilter(filters.BaseHostFilter): if not location: # TODO(JD): Get context from exception - LOG.error(_LE("Valet placement unknown for resource id {0}, orchestration id {1}.").format(res_id, orch_id)) + msg = _LE("Valet placement unknown for resource id {0}, " + "orchestration id {1}.") + LOG.error(msg.format(res_id, orch_id)) if failure_mode == 'yield': - LOG.warn(_LW("Valet will yield to Nova for placement decisions.")) + msg = _LW("Valet will yield to Nova for " + "placement decisions.") + LOG.warn(msg) yield_all = True else: yield_all = False @@ -243,15 +264,19 @@ class ValetFilter(filters.BaseHostFilter): match = self._is_same_host(obj.host, location) if match: if ad_hoc: - LOG.info(_LI("Valet ad-hoc placement for resource id {0}: {1}.").format(res_id, obj.host)) + msg = _LI("Valet ad-hoc placement for resource id " + "{0}: {1}.") + LOG.info(msg.format(res_id, obj.host)) else: - LOG.info(_LI("Valet placement for resource id %s, orchestration id {0}: {1}.").format(res_id, orch_id, obj.host)) + msg = _LI("Valet placement for resource id {0}, " + "orchestration id {1}: {2}.") + LOG.info(msg.format(res_id, orch_id, obj.host)) else: match = None if yield_all or match: yield obj - def host_passes(self, host_state, filter_properties): # pylint: disable=W0613,R0201 + def host_passes(self, host_state, filter_properties): '''Individual host pass check''' # Intentionally let filter_all() handle in one swell foop. return False diff --git a/plugins/valet_plugins/tests/base.py b/plugins/valet_plugins/tests/base.py index 26665b8..3d2461a 100644 --- a/plugins/valet_plugins/tests/base.py +++ b/plugins/valet_plugins/tests/base.py @@ -27,7 +27,6 @@ class Base(BaseTestCase): """Test case base class for all unit tests.""" def __init__(self, *args, **kwds): - ''' ''' super(Base, self).__init__(*args, **kwds) self.CONF = self.useFixture(fixture_config.Config()).conf @@ -36,7 +35,6 @@ class Base(BaseTestCase): super(Base, self).setUp() def run_test(self, stack_name, template_path): - ''' main function ''' pass def validate(self, result): diff --git a/plugins/valet_plugins/tests/unit/mocks/heat/engine/lifecycle_plugin.py b/plugins/valet_plugins/tests/unit/mocks/heat/engine/lifecycle_plugin.py index 8044105..8f8ac40 100644 --- a/plugins/valet_plugins/tests/unit/mocks/heat/engine/lifecycle_plugin.py +++ b/plugins/valet_plugins/tests/unit/mocks/heat/engine/lifecycle_plugin.py @@ -11,15 +11,8 @@ # License for the specific language governing permissions and limitations # under the License. -''' -Created on Sep 14, 2016 - -@author: stack -''' - class LifecyclePlugin(object): - ''' classdocs ''' def __init__(self, params): - ''' Constructor ''' + pass diff --git a/plugins/valet_plugins/tests/unit/mocks/nova/scheduler/filters.py b/plugins/valet_plugins/tests/unit/mocks/nova/scheduler/filters.py index ad629e1..8c889e2 100644 --- a/plugins/valet_plugins/tests/unit/mocks/nova/scheduler/filters.py +++ b/plugins/valet_plugins/tests/unit/mocks/nova/scheduler/filters.py @@ -10,15 +10,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -''' -Created on Sep 15, 2016 - -@author: stack -''' class BaseHostFilter(object): - ''' classdocs ''' def __init__(self, params): - ''' Constructor ''' + pass diff --git a/plugins/valet_plugins/tests/unit/test_plugins.py b/plugins/valet_plugins/tests/unit/test_plugins.py index bb9d8db..99888f0 100644 --- a/plugins/valet_plugins/tests/unit/test_plugins.py +++ b/plugins/valet_plugins/tests/unit/test_plugins.py @@ -1,15 +1,15 @@ # -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. import mock from valet_plugins.plugins.heat.plugins import ValetLifecyclePlugin @@ -50,8 +50,10 @@ class TestPlugins(Base): self.valet_life_cycle_plugin.hints_enabled = True stack.status = "IN_PROGRESS" self.valet_life_cycle_plugin.do_pre_op(cnxt, stack, action="DELETE") - self.validate_test("plans_delete" in self.valet_life_cycle_plugin.api.method_calls[0]) + self.validate_test( + "plans_delete" in self.valet_life_cycle_plugin.api.method_calls[0]) # action create self.valet_life_cycle_plugin.do_pre_op(cnxt, stack, action="CREATE") - self.validate_test("plans_create" in self.valet_life_cycle_plugin.api.method_calls[1]) + self.validate_test( + "plans_create" in self.valet_life_cycle_plugin.api.method_calls[1]) diff --git a/plugins/valet_plugins/tests/unit/test_valet_api.py b/plugins/valet_plugins/tests/unit/test_valet_api.py index cd1383b..c1b46a5 100644 --- a/plugins/valet_plugins/tests/unit/test_valet_api.py +++ b/plugins/valet_plugins/tests/unit/test_valet_api.py @@ -12,8 +12,10 @@ # under the License. import mock + +from valet_plugins.common.valet_api import requests +from valet_plugins.common.valet_api import ValetAPIWrapper from valet_plugins.tests.base import Base -from valet_plugins.common.valet_api import ValetAPIWrapper, requests class TestValetApi(Base): diff --git a/plugins/valet_plugins/tests/unit/test_valet_filter.py b/plugins/valet_plugins/tests/unit/test_valet_filter.py index 7219265..a764521 100644 --- a/plugins/valet_plugins/tests/unit/test_valet_filter.py +++ b/plugins/valet_plugins/tests/unit/test_valet_filter.py @@ -45,27 +45,36 @@ class TestValetFilter(Base): mock_create.return_value = None with mock.patch('oslo_config.cfg.CONF') as config: - setattr(config, "valet", {self.valet_filter.opt_failure_mode_str: "yield", - self.valet_filter.opt_project_name_str: "test_admin_tenant_name", - self.valet_filter.opt_username_str: "test_admin_username", - self.valet_filter.opt_password_str: "test_admin_password", - self.valet_filter.opt_auth_uri_str: "test_admin_auth_url"}) + setattr( + config, "valet", + {self.valet_filter.opt_failure_mode_str: "yield", + self.valet_filter.opt_project_name_str: + "test_admin_tenant_name", + self.valet_filter.opt_username_str: "test_admin_username", + self.valet_filter.opt_password_str: "test_admin_password", + self.valet_filter.opt_auth_uri_str: "test_admin_auth_url"}) - filter_properties = {'request_spec': {'instance_properties': {'uuid': ""}}, - 'scheduler_hints': {'heat_resource_uuid': "123456"}, - 'instance_type': {'name': "instance_name"}} + filter_properties = { + 'request_spec': {'instance_properties': {'uuid': ""}}, + 'scheduler_hints': {'heat_resource_uuid': "123456"}, + 'instance_type': {'name': "instance_name"}} - resources = self.valet_filter.filter_all([TestResources("first_host"), TestResources("second_host")], filter_properties) + resources = self.valet_filter.filter_all( + [TestResources("first_host"), TestResources("second_host")], + filter_properties) for resource in resources: self.validate_test(resource.host in "first_host, second_host") self.validate_test(mock_placement.called) - filter_properties = {'request_spec': {'instance_properties': {'uuid': ""}}, - 'scheduler_hints': "scheduler_hints", - 'instance_type': {'name': "instance_name"}} + filter_properties = { + 'request_spec': {'instance_properties': {'uuid': ""}}, + 'scheduler_hints': "scheduler_hints", + 'instance_type': {'name': "instance_name"}} - resources = self.valet_filter.filter_all([TestResources("first_host"), TestResources("second_host")], filter_properties) + resources = self.valet_filter.filter_all( + [TestResources("first_host"), TestResources("second_host")], + filter_properties) for _ in resources: self.validate_test(mock_create.called) diff --git a/tox.ini b/tox.ini index 09e030a..64df0a8 100644 --- a/tox.ini +++ b/tox.ini @@ -16,10 +16,9 @@ whitelist_externals = find [testenv:pep8] -# TODO(lamt) Making this gate noop until other gates are fixed. Need to revert -# when the other gates are fixed. -# commands = flake8 -commands = /bin/true +basepython = python2.7 +deps = {[testenv]deps} +commands = flake8 [testenv:venv] commands = {posargs} @@ -46,9 +45,14 @@ whitelist_externals = bash [flake8] -# E123, E125 skipped as they are invalid PEP-8. -show-source = True -ignore = E123,E125,E501,H401,H105,H301 +filename = *.py +show-source = true +# D100: Missing docstring in public module +# D101: Missing docstring in public class +# D102: Missing docstring in public method +# D103: Missing docstring in public function +# D104: Missing docstring in public package +# D203: 1 blank line required before class docstring (deprecated in pep257) +ignore = D100,D101,D102,D103,D104,D203 builtins = _ -exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build -max-complexity=24 +exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build diff --git a/valet/api/app.py b/valet/api/app.py index 2992b16..3e87c80 100644 --- a/valet/api/app.py +++ b/valet/api/app.py @@ -17,8 +17,10 @@ from pecan.deploy import deploy from pecan import make_app + from valet import api -from valet.api.common import identity, messaging +from valet.api.common import identity +from valet.api.common import messaging from valet.api.db import models from valet.common.conf import get_logger from valet.common.conf import init_conf diff --git a/valet/api/common/__init__.py b/valet/api/common/__init__.py index 3bdd265..ec2f94d 100644 --- a/valet/api/common/__init__.py +++ b/valet/api/common/__init__.py @@ -26,9 +26,10 @@ def terminate_thread(thread): if not thread.isAlive(): return - # print('valet watcher thread: notifier thread is alive... - kill it...') exc = ctypes.py_object(SystemExit) - res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc) + res = ctypes.pythonapi.PyThreadState_SetAsyncExc( + ctypes.c_long(thread.ident), exc) + if res == 0: raise ValueError("nonexistent thread id") elif res > 1: @@ -36,4 +37,3 @@ def terminate_thread(thread): # and you should call it again with exc=NULL to revert the effect ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None) raise SystemError("PyThreadState_SetAsyncExc failed") - # print('valet watcher thread exits') diff --git a/valet/api/common/hooks.py b/valet/api/common/hooks.py index 7f16de9..b906ee4 100644 --- a/valet/api/common/hooks.py +++ b/valet/api/common/hooks.py @@ -88,11 +88,13 @@ class MessageNotificationHook(PecanHook): # notifier_fn blocks in case rabbit mq is down # it prevents Valet API to return its response # send the notification in a different thread - notifier_thread = threading.Thread(target=notifier_fn, args=(ctxt, event_type, payload)) + notifier_thread = threading.Thread( + target=notifier_fn, args=(ctxt, event_type, payload)) notifier_thread.start() # launch a timer to verify no hung threads are left behind # (when timeout expires kill the notifier thread if it still alive) - watcher = threading.Timer(conf.messaging.timeout, terminate_thread, args=[notifier_thread]) + watcher = threading.Timer( + conf.messaging.timeout, terminate_thread, args=[notifier_thread]) watcher.start() api.LOG.info('notification sent.') diff --git a/valet/api/common/ostro_helper.py b/valet/api/common/ostro_helper.py index afa6796..2661836 100755 --- a/valet/api/common/ostro_helper.py +++ b/valet/api/common/ostro_helper.py @@ -129,7 +129,7 @@ class Ostro(object): def _send(self, stack_id, request): """Send request.""" # Creating the placement request effectively enqueues it. - PlacementRequest(stack_id=stack_id, request=request) # pylint: disable=W0612 + PlacementRequest(stack_id=stack_id, request=request) result_query = Query(PlacementResult) for __ in range(self.tries, 0, -1): # pylint: disable=W0612 @@ -193,7 +193,7 @@ class Ostro(object): GROUP_TYPE, EXCLUSIVITY) - group = Group.query.filter_by(name=group_name).first() # pylint: disable=E1101 + group = Group.query.filter_by(name=group_name).first() if not group: self.error_uri = '/errors/not_found' diff --git a/valet/api/db/models/music/__init__.py b/valet/api/db/models/music/__init__.py index 3f59a43..7b3d7af 100644 --- a/valet/api/db/models/music/__init__.py +++ b/valet/api/db/models/music/__init__.py @@ -15,7 +15,8 @@ """Music ORM - Common Methods""" -from abc import ABCMeta, abstractmethod +from abc import ABCMeta +from abc import abstractmethod import inspect from pecan import conf import six @@ -102,7 +103,7 @@ class Results(list): @six.add_metaclass(ABCMeta) class Base(object): - """ A custom declarative base that provides some Elixir-inspired shortcuts. """ + """Custom declarative base that provides some Elixir-inspired shortcuts.""" __tablename__ = None diff --git a/valet/api/db/models/music/groups.py b/valet/api/db/models/music/groups.py index 6bf62fb..6982079 100644 --- a/valet/api/db/models/music/groups.py +++ b/valet/api/db/models/music/groups.py @@ -12,12 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Group Model.""" - -from . import Base import simplejson +from valet.api.db.models.music import Base + class Group(Base): """Group model.""" diff --git a/valet/api/db/models/music/ostro.py b/valet/api/db/models/music/ostro.py index fe51f8f..13109e7 100644 --- a/valet/api/db/models/music/ostro.py +++ b/valet/api/db/models/music/ostro.py @@ -15,7 +15,7 @@ """Ostro Models.""" -from . import Base +from valet.api.db.models.music import Base class PlacementRequest(Base): diff --git a/valet/api/db/models/music/placements.py b/valet/api/db/models/music/placements.py index 3fe8ab1..5ac78fd 100644 --- a/valet/api/db/models/music/placements.py +++ b/valet/api/db/models/music/placements.py @@ -15,7 +15,8 @@ """Placement Model.""" -from . import Base, Query +from valet.api.db.models.music import Base +from valet.api.db.models.music import Query class Placement(Base): diff --git a/valet/api/v1/commands/populate.py b/valet/api/v1/commands/populate.py index b0fe9b2..e4c4dc2 100644 --- a/valet/api/v1/commands/populate.py +++ b/valet/api/v1/commands/populate.py @@ -12,10 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Populate command.""" - from pecan.commands.base import BaseCommand + from valet import api from valet.api.common.i18n import _ from valet.api.db import models @@ -25,7 +23,8 @@ from valet.api.db.models import Placement from valet.api.db.models import PlacementRequest from valet.api.db.models import PlacementResult from valet.api.db.models import Plan -from valet.common.conf import init_conf, get_logger +from valet.common.conf import get_logger +from valet.common.conf import init_conf class PopulateCommand(BaseCommand): diff --git a/valet/api/v1/controllers/__init__.py b/valet/api/v1/controllers/__init__.py index 0159ac5..b4ab0cc 100644 --- a/valet/api/v1/controllers/__init__.py +++ b/valet/api/v1/controllers/__init__.py @@ -18,24 +18,23 @@ from notario.decorators import instance_of from notario import ensure from os import path - -from pecan import redirect, request +from pecan import redirect +from pecan import request import string + from valet import api from valet.api.common.i18n import _ from valet.api.db.models.music.placements import Placement -# -# Notario Helpers -# - def valid_group_name(value): """Validator for group name type.""" - if not value or not set(value) <= set(string.letters + string.digits + "-._~"): + if (not value or + not set(value) <= set(string.letters + string.digits + "-._~")): api.LOG.error("group name is not valid") - api.LOG.error("group name must contain only uppercase and lowercase letters, decimal digits, \ - hyphens, periods, underscores, and tildes [RFC 3986, Section 2.3]") + api.LOG.error("group name must contain only uppercase and lowercase " + "letters, decimal digits, hyphens, periods, " + "underscores, "" and tildes [RFC 3986, Section 2.3]") @instance_of((list, dict)) @@ -59,7 +58,7 @@ def set_placements(plan, resources, placements): name = resources[uuid]['name'] properties = placements[uuid]['properties'] location = properties['host'] - Placement(name, uuid, plan=plan, location=location) # pylint: disable=W0612 + Placement(name, uuid, plan=plan, location=location) return plan @@ -73,11 +72,14 @@ def reserve_placement(placement, resource_id=None, reserve=True, update=True): if placement: api.LOG.info(_('%(rsrv)s placement of %(orch_id)s in %(loc)s.'), {'rsrv': _("Reserving") if reserve else _("Unreserving"), - 'orch_id': placement.orchestration_id, 'loc': placement.location}) + 'orch_id': placement.orchestration_id, + 'loc': placement.location}) placement.reserved = reserve if resource_id: - api.LOG.info(_('Associating resource id %(res_id)s with orchestration id %(orch_id)s.'), - {'res_id': resource_id, 'orch_id': placement.orchestration_id}) + msg = _('Associating resource id %(res_id)s with orchestration ' + 'id %(orch_id)s.') + api.LOG.info(msg, {'res_id': resource_id, + 'orch_id': placement.orchestration_id}) placement.resource_id = resource_id if update: placement.update() @@ -92,9 +94,11 @@ def update_placements(placements, reserve_id=None, unlock_all=False): properties = placements[uuid]['properties'] location = properties['host'] if placement.location != location: - api.LOG.info(_('Changing placement of %(orch_id)s from %(old_loc)s to %(new_loc)s.'), - {'orch_id': placement.orchestration_id, 'old_loc': placement.location, - 'new_loc': location}) + msg = _('Changing placement of %(orch_id)s from %(old_loc)s ' + 'to %(new_loc)s.') + api.LOG.info(msg, {'orch_id': placement.orchestration_id, + 'old_loc': placement.location, + 'new_loc': location}) placement.location = location if unlock_all: reserve_placement(placement, reserve=False, update=False) diff --git a/valet/api/v1/controllers/errors.py b/valet/api/v1/controllers/errors.py index 60a0fd2..77c4fb2 100644 --- a/valet/api/v1/controllers/errors.py +++ b/valet/api/v1/controllers/errors.py @@ -15,10 +15,13 @@ """Errors.""" -from pecan import expose, request, response +from pecan import expose +from pecan import request +from pecan import response +from webob.exc import status_map + from valet.api.common.i18n import _ from valet.api import LOG -from webob.exc import status_map def error_wrapper(func): @@ -49,7 +52,6 @@ def error_wrapper(func): return func_wrapper -# pylint: disable=W0613 class ErrorsController(object): """Error Controller /errors/{error_name}.""" diff --git a/valet/api/v1/controllers/groups.py b/valet/api/v1/controllers/groups.py index 0fe878f..a4ac013 100644 --- a/valet/api/v1/controllers/groups.py +++ b/valet/api/v1/controllers/groups.py @@ -17,15 +17,20 @@ from notario import decorators from notario.validators import types -from pecan import conf, expose, request, response +from pecan import conf +from pecan import expose +from pecan import request +from pecan import response from pecan_notario import validate +from valet import api from valet.api.common.compute import nova_client from valet.api.common.i18n import _ from valet.api.common.ostro_helper import Ostro from valet.api.db.models.music.groups import Group -from valet.api.v1.controllers import error, valid_group_name -from valet import api +from valet.api.v1.controllers import error +from valet.api.v1.controllers import valid_group_name + GROUPS_SCHEMA = ( (decorators.optional('description'), types.string), @@ -260,7 +265,8 @@ class GroupsItemController(object): """Delete a group.""" group = request.context['group'] if isinstance(group.members, list) and len(group.members) > 0: - error('/errors/conflict', _('Unable to delete a Group with members.')) + error('/errors/conflict', + _('Unable to delete a Group with members.')) group.delete() response.status = 204 diff --git a/valet/api/v1/controllers/placements.py b/valet/api/v1/controllers/placements.py index cdb4900..07e796c 100644 --- a/valet/api/v1/controllers/placements.py +++ b/valet/api/v1/controllers/placements.py @@ -12,16 +12,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from pecan import expose +from pecan import request +from pecan import response -"""Placements.""" - -from pecan import expose, request, response +from valet import api from valet.api.common.i18n import _ from valet.api.common.ostro_helper import Ostro from valet.api.db.models.music.placements import Placement from valet.api.db.models.music.plans import Plan -from valet.api.v1.controllers import error, reserve_placement, update_placements -from valet import api +from valet.api.v1.controllers import error +from valet.api.v1.controllers import reserve_placement +from valet.api.v1.controllers import update_placements + # pylint: disable=R0201 @@ -75,8 +78,10 @@ class PlacementsItemController(object): Once reserved, the location effectively becomes immutable. """ res_id = kwargs.get('resource_id') - api.LOG.info(_('Placement reservation request for resource id %(res_id)s, orchestration id %(orch_id)s.'), - {'res_id': res_id, 'orch_id': self.placement.orchestration_id}) + msg = _('Placement reservation request for resource id %(res_id)s, ' + 'orchestration id %(orch_id)s.') + api.LOG.info(msg, {'res_id': res_id, + 'orch_id': self.placement.orchestration_id}) locations = kwargs.get('locations', []) locations_str = ', '.join(locations) api.LOG.info(_('Candidate locations: %s'), locations_str) @@ -89,8 +94,11 @@ class PlacementsItemController(object): else: # Ostro's placement is NOT in the list of candidates. # Time for Plan B. - api.LOG.info(_('Placement of resource id %(res_id)s, orchestration id %(orch_id)s in %(loc)s not allowed. Replanning.'), - {'res_id': res_id, 'orch_id': self.placement.orchestration_id, 'loc': self.placement.location}) + msg = _('Placement of resource id %(res_id)s, orchestration id ' + '%(orch_id)s in %(loc)s not allowed. Replanning.') + api.LOG.info(msg, {'res_id': res_id, + 'orch_id': self.placement.orchestration_id, + 'loc': self.placement.location}) # Unreserve the placement. Remember the resource id too. kwargs = {'resource_id': res_id, 'reserve': False} @@ -107,7 +115,8 @@ class PlacementsItemController(object): exclusions = [x.orchestration_id for x in reserved] if exclusions: exclusions_str = ', '.join(exclusions) - api.LOG.info(_('Excluded orchestration IDs: %s'), exclusions_str) + api.LOG.info(_('Excluded orchestration IDs: %s'), + exclusions_str) else: api.LOG.info(_('No excluded orchestration IDs.')) diff --git a/valet/api/v1/controllers/plans.py b/valet/api/v1/controllers/plans.py index 3b4e354..8b67121 100644 --- a/valet/api/v1/controllers/plans.py +++ b/valet/api/v1/controllers/plans.py @@ -17,18 +17,20 @@ from notario import decorators from notario.validators import types -from pecan import expose, request, response +from pecan import expose +from pecan import request +from pecan import response from pecan_notario import validate from valet.api.common.i18n import _ from valet.api.common.ostro_helper import Ostro from valet.api.db.models.music.placements import Placement from valet.api.db.models.music.plans import Plan +from valet.api import LOG from valet.api.v1.controllers import error from valet.api.v1.controllers import set_placements from valet.api.v1.controllers import update_placements from valet.api.v1.controllers import valid_plan_update_action -from valet.api import LOG CREATE_SCHEMA = ( @@ -49,9 +51,6 @@ UPDATE_SCHEMA = ( ) -# pylint: disable=R0201 - - class PlansItemController(object): """Plan Item Controller /v1/plans/{plan_id}.""" @@ -115,8 +114,9 @@ class PlansItemController(object): placement = Placement.query.filter_by( orchestration_id=the_id).first() # pylint: disable=E1101 if not placement: - error('/errors/invalid', _('Unknown resource or ' - 'orchestration id: %s') % the_id) + error('/errors/invalid', + _('Unknown resource or ' + 'orchestration id: %s') % the_id) LOG.info(_('Migration request for resource id {0}, ' 'orchestration id {1}.').format( diff --git a/valet/api/v1/controllers/root.py b/valet/api/v1/controllers/root.py index 7211a04..ff9cdfb 100644 --- a/valet/api/v1/controllers/root.py +++ b/valet/api/v1/controllers/root.py @@ -12,25 +12,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Root.""" - -from pecan import expose, request, response -from valet.api.common.i18n import _ -from valet.api.v1.controllers import error -from valet.api.v1.controllers.errors import ErrorsController, error_wrapper -from valet.api.v1.controllers.v1 import V1Controller - +from pecan import expose +from pecan import request +from pecan import response from webob.exc import status_map -# pylint: disable=R0201 +from valet.api.common.i18n import _ +from valet.api.v1.controllers import error +from valet.api.v1.controllers.errors import error_wrapper +from valet.api.v1.controllers.errors import ErrorsController +from valet.api.v1.controllers.v1 import V1Controller class RootController(object): """Root Controller.""" errors = ErrorsController() - v1 = V1Controller() # pylint: disable=C0103 + v1 = V1Controller() @classmethod def allow(cls): @@ -77,7 +75,7 @@ class RootController(object): """Error handler.""" try: status = int(status) - except ValueError: # pragma: no cover + except ValueError: status = 500 message = getattr(status_map.get(status), 'explanation', '') return dict(status=status, message=message) diff --git a/valet/api/v1/controllers/status.py b/valet/api/v1/controllers/status.py index 39bc1cf..c4425f9 100644 --- a/valet/api/v1/controllers/status.py +++ b/valet/api/v1/controllers/status.py @@ -12,16 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from pecan import expose +from pecan import request +from pecan import response -"""Status.""" - -from pecan import expose, request, response from valet.api.common.i18n import _ from valet.api.common.ostro_helper import Ostro from valet.api.v1.controllers import error -# pylint: disable=R0201 - class StatusController(object): """Status Controller /v1/status.""" diff --git a/valet/api/v1/controllers/v1.py b/valet/api/v1/controllers/v1.py index a2927f4..014a0e8 100644 --- a/valet/api/v1/controllers/v1.py +++ b/valet/api/v1/controllers/v1.py @@ -12,11 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""v1.""" - -from pecan import conf, expose, request, response +from pecan import conf +from pecan import expose +from pecan import request +from pecan import response from pecan.secure import SecureController + from valet import api from valet.api.common.i18n import _ from valet.api.v1.controllers import error diff --git a/valet/api/wsgi.py b/valet/api/wsgi.py index 7b264f7..5df357c 100644 --- a/valet/api/wsgi.py +++ b/valet/api/wsgi.py @@ -12,14 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""WSGI Wrapper.""" - -from common.i18n import _ import os from pecan.deploy import deploy -from valet.common.conf import init_conf, get_logger + +from common.i18n import _ + from valet import api +from valet.common.conf import get_logger +from valet.common.conf import init_conf def config_file(file_name=None): @@ -50,7 +50,8 @@ if __name__ == '__main__': # from valet.api.conf import register_conf, set_domain init_conf("wsgi.log") api.LOG = get_logger("wsgi") - HTTPD = make_server('', 8090, deploy(config_file('/var/www/valet/config.py'))) + HTTPD = make_server( + '', 8090, deploy(config_file('/var/www/valet/config.py'))) print(_("Serving HTTP on port 8090...")) # Respond to requests until process is killed diff --git a/valet/cli/groupcli.py b/valet/cli/groupcli.py index 2b7a0ed..2ec92d6 100644 --- a/valet/cli/groupcli.py +++ b/valet/cli/groupcli.py @@ -87,9 +87,8 @@ def add_to_parser(service_sub): parser_delete_group.add_argument('groupid', type=str, help='') # delete group member - parser_delete_group_member = subparsers.add_parser('delete-member', - help='Delete member from' - 'specified group.') + parser_delete_group_member = subparsers.add_parser( + 'delete-member', help='Delete member from specified group.') parser_delete_group_member.add_argument('groupid', type=str, help='') parser_delete_group_member.add_argument('memberid', type=str, @@ -144,7 +143,8 @@ def cmd_details(args): elif args.subcmd == 'delete-all-members': return requests.delete, '/%s/members' % args.groupid elif args.subcmd == 'delete-member': - return requests.delete, '/%s/members/%s' % (args.groupid, args.memberid) + return (requests.delete, '/%s/members/%s' % + (args.groupid, args.memberid)) elif args.subcmd == 'show': return requests.get, '/%s' % args.groupid elif args.subcmd == 'list': @@ -219,8 +219,8 @@ def run(args): args.body = populate_args_request_body(args) try: - print_verbose(args.verbose, args.url, args.headers, args.body, rest_cmd, - args.timeout) + print_verbose(args.verbose, args.url, args.headers, args.body, + rest_cmd, args.timeout) if args.body: resp = rest_cmd(args.url, timeout=args.timeout, data=args.body, headers=args.headers) diff --git a/valet/common/__init__.py b/valet/common/__init__.py index 3befeaf..4f7c8d7 100644 --- a/valet/common/__init__.py +++ b/valet/common/__init__.py @@ -1,10 +1,25 @@ +# +# Copyright 2014-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from oslo_config import cfg def logger_conf(logger_name): return [ - cfg.StrOpt('output_format', default="%(asctime)s - %(levelname)s - %(message)s"), # dict + cfg.StrOpt('output_format', + default="%(asctime)s - %(levelname)s - %(message)s"), cfg.BoolOpt('store', default=True), cfg.StrOpt('logging_level', default='debug'), cfg.StrOpt('logging_dir', default='/var/log/valet/'), diff --git a/valet/common/conf.py b/valet/common/conf.py index d6c3472..4e3ca14 100644 --- a/valet/common/conf.py +++ b/valet/common/conf.py @@ -73,17 +73,24 @@ music_opts = [ cfg.IntOpt('music_server_retries', default=3), ] + def load_conf(args=None, project=DOMAIN, default_files=None): - CONF(default_config_files=default_files) if default_files else CONF(args or [], project=project) + if default_files: + CONF(default_config_files=default_files) + else: + CONF(args or [], project=project) -def init_conf(log_file="valet.log", args=None, grp2opt=None, cli_opts=None, default_config_files=None): +def init_conf(log_file="valet.log", args=None, grp2opt=None, + cli_opts=None, default_config_files=None): CONF.log_file = log_file logging.register_options(CONF) # init conf - general_groups = {server_group: server_opts, music_group: music_opts, - identity_group: identity_opts, messaging_group: messaging_opts} + general_groups = {server_group: server_opts, + music_group: music_opts, + identity_group: identity_opts, + messaging_group: messaging_opts} general_groups.update(grp2opt or {}) @@ -97,6 +104,7 @@ def init_conf(log_file="valet.log", args=None, grp2opt=None, cli_opts=None, defa def _set_logger(): logging.setup(CONF, DOMAIN) + def _register_conf(grp2opt, cli_opts): for grp in grp2opt: CONF.register_group(grp) diff --git a/valet/common/music.py b/valet/common/music.py index 5b04ee8..32fc658 100644 --- a/valet/common/music.py +++ b/valet/common/music.py @@ -31,7 +31,8 @@ class REST(object): _urls = None - def __init__(self, hosts, port, path='/', timeout='10', music_server_retries=3, logger=None): + def __init__(self, hosts, port, path='/', timeout='10', + music_server_retries=3, logger=None): """Initializer. Accepts target host list, port, and path.""" self.hosts = hosts # List of IP or FQDNs @@ -82,7 +83,10 @@ class REST(object): for attempt in range(self.music_server_retries): # Ignore the previous exception. try: - response = method_fn(full_url, data=data_json, headers=self.__headers(content_type), timeout=self.timeout) + response = method_fn( + full_url, data=data_json, + headers=self.__headers(content_type), + timeout=self.timeout) response.raise_for_status() return response @@ -91,13 +95,17 @@ class REST(object): response.status_code = 408 response.url = full_url if self.logger: - self.logger.debug("Music: %s Method: %s Full Url: %s", err.message, method.upper(), full_url) + self.logger.debug( + "Music: %s Method: %s Full Url: %s", + err.message, method.upper(), full_url) except requests.exceptions.RequestException as err: response = requests.Response() response.status_code = 400 response.url = full_url if self.logger: - self.logger.debug("Music: %s Method: %s Full Url: %s", err.message, method.upper(), full_url) + self.logger.debug( + "Music: %s Method: %s Full Url: %s", + err.message, method.upper(), full_url) # If we get here, an exception was raised for every url, # but we passed so we could try each endpoint. Raise status diff --git a/valet/engine/conf.py b/valet/engine/conf.py index dfe36e1..53be86a 100755 --- a/valet/engine/conf.py +++ b/valet/engine/conf.py @@ -12,12 +12,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Conf.""" +import sys from oslo_config import cfg -import sys -from valet.common import logger_conf, conf as common + +from valet.common import conf as common +from valet.common import logger_conf + CONF = cfg.CONF @@ -30,25 +31,59 @@ ostro_cli_opts = [ engine_group = cfg.OptGroup(name='engine', title='Valet Engine conf') engine_opts = [ - cfg.StrOpt('pid', default='/var/run/valet/valet-engine.pid'), - cfg.StrOpt('mode', default='live', help='run as actual or simulation for test'), - cfg.StrOpt('sim_cfg_loc', default='/etc/valet/engine/ostro_sim.cfg'), - cfg.StrOpt('ip', default='localhost'), - cfg.IntOpt('health_timeout', default=10, help='health check grace period (seconds, default=10)'), - cfg.IntOpt('priority', default=1, help='this instance priority (master=1)'), - cfg.StrOpt('datacenter_name', default='aic', help='The name of region'), - cfg.IntOpt('num_of_region_chars', default='3', help='number of chars that indicates the region code'), - cfg.StrOpt('rack_code_list', default='r', help='rack indicator.'), - cfg.ListOpt('node_code_list', default='a,c,u,f,o,p,s', help='Indicates the node type'), - cfg.IntOpt('compute_trigger_frequency', default=1800, help='Frequency for checking compute hosting status'), - cfg.IntOpt('topology_trigger_frequency', default=3600, help='Frequency for checking datacenter topology'), - cfg.IntOpt('update_batch_wait', default=600, help='Wait time before start resource synch from Nova'), - cfg.FloatOpt('default_cpu_allocation_ratio', default=16, help='Default CPU overbooking ratios'), - cfg.FloatOpt('default_ram_allocation_ratio', default=1.5, help='Default mem overbooking ratios'), - cfg.FloatOpt('default_disk_allocation_ratio', default=1, help='Default disk overbooking ratios'), - cfg.FloatOpt('static_cpu_standby_ratio', default=20, help='Percentages of standby cpu resources'), - cfg.FloatOpt('static_mem_standby_ratio', default=20, help='Percentages of standby mem resources'), - cfg.FloatOpt('static_local_disk_standby_ratio', default=20, help='Percentages of disk standby esources'), + cfg.StrOpt('pid', + default='/var/run/valet/valet-engine.pid'), + cfg.StrOpt('mode', + default='live', + help='run as actual or simulation for test'), + cfg.StrOpt('sim_cfg_loc', + default='/etc/valet/engine/ostro_sim.cfg'), + cfg.StrOpt('ip', + default='localhost'), + cfg.IntOpt('health_timeout', + default=10, + help='health check grace period (seconds, default=10)'), + cfg.IntOpt('priority', + default=1, + help='this instance priority (master=1)'), + cfg.StrOpt('datacenter_name', + default='aic', + help='The name of region'), + cfg.IntOpt('num_of_region_chars', + default='3', + help='number of chars that indicates the region code'), + cfg.StrOpt('rack_code_list', + default='r', + help='rack indicator.'), + cfg.ListOpt('node_code_list', + default='a,c,u,f,o,p,s', + help='Indicates the node type'), + cfg.IntOpt('compute_trigger_frequency', + default=1800, + help='Frequency for checking compute hosting status'), + cfg.IntOpt('topology_trigger_frequency', + default=3600, + help='Frequency for checking datacenter topology'), + cfg.IntOpt('update_batch_wait', + default=600, + help='Wait time before start resource synch from Nova'), + cfg.FloatOpt('default_cpu_allocation_ratio', + default=16, + help='Default CPU overbooking ratios'), + cfg.FloatOpt('default_ram_allocation_ratio', + default=1.5, help='Default mem overbooking ratios'), + cfg.FloatOpt('default_disk_allocation_ratio', + default=1, + help='Default disk overbooking ratios'), + cfg.FloatOpt('static_cpu_standby_ratio', + default=20, + help='Percentages of standby cpu resources'), + cfg.FloatOpt('static_mem_standby_ratio', + default=20, + help='Percentages of standby mem resources'), + cfg.FloatOpt('static_local_disk_standby_ratio', + default=20, + help='Percentages of disk standby esources'), ] + logger_conf("engine") listener_group = cfg.OptGroup(name='events_listener', @@ -62,7 +97,7 @@ listener_opts = [ def init_engine(default_config_files=None): - """ register the engine and the listener groups """ + """Register the engine and the listener groups """ common.init_conf("engine.log", args=sys.argv[1:], grp2opt={engine_group: engine_opts, listener_group: listener_opts}, diff --git a/valet/engine/listener/listener_manager.py b/valet/engine/listener/listener_manager.py index 2d23902..7139ab2 100755 --- a/valet/engine/listener/listener_manager.py +++ b/valet/engine/listener/listener_manager.py @@ -56,16 +56,17 @@ class ListenerManager(threading.Thread): 'hosts': self.config.music.hosts, 'port': self.config.music.port, 'replication_factor': self.config.music.replication_factor, - 'music_server_retries': self.config.music.music_server_retries, + 'music_server_retries': + self.config.music.music_server_retries, 'logger': self.listener_logger, } engine = Music(**kwargs) engine.create_keyspace(self.config.music.keyspace) self.MUSIC = {'engine': engine, 'keyspace': self.config.music.keyspace} - self.listener_logger.debug('Storing in music on %s, keyspace %s' - % (self.config.music.host, - self.config.music.keyspace)) + self.listener_logger.debug( + 'Storing in music on %s, keyspace %s' % + (self.config.music.host, self.config.music.keyspace)) self.listener_logger.debug('Connecting to %s, with %s' % (self.config.messaging.host, @@ -103,7 +104,8 @@ class ListenerManager(threading.Thread): channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=binding_key) self.listener_logger.info('Channel is bound,listening on%s ' - 'exchange %s', self.config.messaging.host, + 'exchange %s', + self.config.messaging.host, self.config.events_listener.exchange) # Start consuming messages @@ -134,8 +136,10 @@ class ListenerManager(threading.Thread): else: return - self.listener_logger.debug("\nMessage No: %s\n", method_frame.delivery_tag) - self.listener_logger.debug(json.dumps(message, sort_keys=True, indent=2)) + self.listener_logger.debug( + "\nMessage No: %s\n", method_frame.delivery_tag) + self.listener_logger.debug( + json.dumps(message, sort_keys=True, indent=2)) channel.basic_ack(delivery_tag=method_frame.delivery_tag) except Exception: self.listener_logger.error(traceback.format_exc()) diff --git a/valet/engine/optimizer/app_manager/app_handler.py b/valet/engine/optimizer/app_manager/app_handler.py index 05c0062..ab69e6a 100755 --- a/valet/engine/optimizer/app_manager/app_handler.py +++ b/valet/engine/optimizer/app_manager/app_handler.py @@ -64,13 +64,16 @@ class AppHandler(object): if action == "create": decision_key = stack_id + ":" + action + ":none" if decision_key in self.decision_history.keys(): - return (decision_key, self.decision_history[decision_key].result) + return (decision_key, + self.decision_history[decision_key].result) else: return (decision_key, None) elif action == "replan": - decision_key = stack_id + ":" + action + ":" + _app["orchestration_id"] + msg = "%s:%s:%s" + decision_key = msg % (stack_id, action, _app["orchestration_id"]) if decision_key in self.decision_history.keys(): - return (decision_key, self.decision_history[decision_key].result) + return (decision_key, + self.decision_history[decision_key].result) else: return (decision_key, None) else: @@ -92,7 +95,8 @@ class AppHandler(object): count = 0 num_of_removes = len(self.decision_history) - self.min_decision_history remove_item_list = [] - for decision in (sorted(self.decision_history.values(), key=operator.attrgetter('timestamp'))): + for decision in (sorted(self.decision_history.values(), + key=operator.attrgetter('timestamp'))): remove_item_list.append(decision.decision_key) count += 1 if count == num_of_removes: @@ -127,7 +131,8 @@ class AppHandler(object): app_topology, action) if re_app is None: self.apps[stack_id] = None - self.status = "cannot locate the original plan for stack = " + stack_id + msg = "cannot locate the original plan for stack = %s" + self.status = msg % stack_id return None if action == "replan": @@ -171,13 +176,17 @@ class AppHandler(object): if isinstance(v, VM): if self.apps[v.app_uuid].request_type == "replan": if v.uuid in _app_topology.planned_vm_map.keys(): - self.apps[v.app_uuid].add_vm(v, _placement_map[v], "replanned") + self.apps[v.app_uuid].add_vm( + v, _placement_map[v], "replanned") else: - self.apps[v.app_uuid].add_vm(v, _placement_map[v], "scheduled") + self.apps[v.app_uuid].add_vm( + v, _placement_map[v], "scheduled") if v.uuid == _app_topology.candidate_list_map.keys()[0]: - self.apps[v.app_uuid].add_vm(v, _placement_map[v], "replanned") + self.apps[v.app_uuid].add_vm( + v, _placement_map[v], "replanned") else: - self.apps[v.app_uuid].add_vm(v, _placement_map[v], "scheduled") + self.apps[v.app_uuid].add_vm( + v, _placement_map[v], "scheduled") # NOTE(GJ): do not handle Volume in this version else: if _placement_map[v] in self.resource.hosts.keys(): @@ -226,7 +235,8 @@ class AppHandler(object): return True - def _regenerate_app_topology(self, _stack_id, _app, _app_topology, _action): + def _regenerate_app_topology(self, _stack_id, _app, + _app_topology, _action): re_app = {} old_app = self.db.get_app_info(_stack_id) @@ -257,23 +267,22 @@ class AppHandler(object): properties["availability_zone"] = vm["availability_zones"] resources[vmk]["properties"] = properties - if len(vm["diversity_groups"]) > 0: - for divk, level_name in vm["diversity_groups"].iteritems(): - div_id = divk + ":" + level_name - if div_id not in diversity_groups.keys(): - diversity_groups[div_id] = [] - diversity_groups[div_id].append(vmk) + for divk, level_name in vm["diversity_groups"].iteritems(): + div_id = divk + ":" + level_name + if div_id not in diversity_groups.keys(): + diversity_groups[div_id] = [] + diversity_groups[div_id].append(vmk) - if len(vm["exclusivity_groups"]) > 0: - for exk, level_name in vm["exclusivity_groups"].iteritems(): - ex_id = exk + ":" + level_name - if ex_id not in exclusivity_groups.keys(): - exclusivity_groups[ex_id] = [] - exclusivity_groups[ex_id].append(vmk) + for exk, level_name in vm["exclusivity_groups"].iteritems(): + ex_id = exk + ":" + level_name + if ex_id not in exclusivity_groups.keys(): + exclusivity_groups[ex_id] = [] + exclusivity_groups[ex_id].append(vmk) if _action == "replan": if vmk == _app["orchestration_id"]: - _app_topology.candidate_list_map[vmk] = _app["locations"] + _app_topology.candidate_list_map[vmk] = \ + _app["locations"] elif vmk in _app["exclusions"]: _app_topology.planned_vm_map[vmk] = vm["host"] if vm["status"] == "replanned": @@ -320,11 +329,12 @@ class AppHandler(object): exclusivity_groups[ex_id] = [] exclusivity_groups[ex_id].append(gk) + group_type = "ATT::Valet::GroupAssignment" + for div_id, resource_list in diversity_groups.iteritems(): divk_level_name = div_id.split(":") resources[divk_level_name[0]] = {} - resources[divk_level_name[0]]["type"] = \ - "ATT::Valet::GroupAssignment" + resources[divk_level_name[0]]["type"] = group_type properties = {} properties["group_type"] = "diversity" properties["group_name"] = divk_level_name[2] @@ -335,7 +345,7 @@ class AppHandler(object): for ex_id, resource_list in exclusivity_groups.iteritems(): exk_level_name = ex_id.split(":") resources[exk_level_name[0]] = {} - resources[exk_level_name[0]]["type"] = "ATT::Valet::GroupAssignment" + resources[exk_level_name[0]]["type"] = group_type properties = {} properties["group_type"] = "exclusivity" properties["group_name"] = exk_level_name[2] diff --git a/valet/engine/optimizer/app_manager/app_topology.py b/valet/engine/optimizer/app_manager/app_topology.py index f3c3886..8e7c88d 100755 --- a/valet/engine/optimizer/app_manager/app_topology.py +++ b/valet/engine/optimizer/app_manager/app_topology.py @@ -12,10 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""App Topology.""" - -from valet.engine.optimizer.app_manager.app_topology_base import VM, VGroup +from valet.engine.optimizer.app_manager.app_topology_base import VGroup +from valet.engine.optimizer.app_manager.app_topology_base import VM from valet.engine.optimizer.app_manager.app_topology_parser import Parser diff --git a/valet/engine/optimizer/app_manager/app_topology_parser.py b/valet/engine/optimizer/app_manager/app_topology_parser.py index 6e5a7c9..e7be90a 100755 --- a/valet/engine/optimizer/app_manager/app_topology_parser.py +++ b/valet/engine/optimizer/app_manager/app_topology_parser.py @@ -26,8 +26,10 @@ """ import six -from valet.engine.optimizer.app_manager.app_topology_base \ - import VGroup, VGroupLink, VM, VMLink, LEVELS + +from valet.engine.optimizer.app_manager.app_topology_base import LEVELS +from valet.engine.optimizer.app_manager.app_topology_base import VGroup +from valet.engine.optimizer.app_manager.app_topology_base import VM class Parser(object): @@ -35,7 +37,8 @@ class Parser(object): This class handles parsing out the data related to the desired topology from a template. - not supported OS::Nova::ServerGroup OS::Heat::AutoScalingGroup OS::Heat::Stack OS::Heat::ResourceGroup + not supported OS::Nova::ServerGroup OS::Heat::AutoScalingGroup + OS::Heat::Stack OS::Heat::ResourceGroup """ def __init__(self, _high_level_allowed, _logger): @@ -142,14 +145,16 @@ class Parser(object): vgroup.level = r["properties"]["level"] if vgroup.level != "host": if self.high_level_allowed is False: - self.status = "only host level of affinity group allowed " + \ - "due to the mis-match of host naming convention" + self.status = ("only host level of affinity group " + "allowed due to the mis-match of " + "host naming convention") return {}, {} else: self.status = "no grouping level" return {}, {} vgroups[vgroup.uuid] = vgroup - self.logger.info("group = " + vgroup.name + vgroup.name + ", type = " + vgroup.vgroup_type) + msg = "group = %s, type = %s" + self.logger.info(msg % (vgroup.name, vgroup.vgroup_type)) if self._merge_diversity_groups(_elements, vgroups, vms) is False: return {}, {} @@ -176,15 +181,19 @@ class Parser(object): for vk in r["properties"]["resources"]: if vk in _vms.keys(): vgroup.subvgroups[vk] = _vms[vk] - _vms[vk].diversity_groups[rk] = vgroup.level + ":" + vgroup.name + _vms[vk].diversity_groups[rk] = ( + vgroup.level + ":" + vgroup.name) elif vk in _vgroups.keys(): vg = _vgroups[vk] if LEVELS.index(vg.level) > LEVELS.index(level): - self.status = "grouping scope: nested " \ - "group's level is higher" + self.status = ("grouping scope: nested " + "group's level is higher") return False - if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": - self.status = vg.vgroup_type + " not allowd to be nested in diversity group" + if (vg.vgroup_type == "DIV" or + vg.vgroup_type == "EX"): + msg = ("{0} not allowd to be nested in " + "diversity group") + self.status = msg.format(vg.vgroup_type) return False vgroup.subvgroups[vk] = vg vg.diversity_groups[rk] = vgroup.level + ":" + \ @@ -204,15 +213,19 @@ class Parser(object): for vk in r["properties"]["resources"]: if vk in _vms.keys(): vgroup.subvgroups[vk] = _vms[vk] - _vms[vk].exclusivity_groups[rk] = vgroup.level + ":" + vgroup.name + _vms[vk].exclusivity_groups[rk] = ( + vgroup.level + ":" + vgroup.name) elif vk in _vgroups.keys(): vg = _vgroups[vk] if LEVELS.index(vg.level) > LEVELS.index(level): self.status = "grouping scope: nested " \ "group's level is higher" return False - if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": - self.status = vg.vgroup_type + ") not allowd to be nested in exclusivity group" + if (vg.vgroup_type == "DIV" or + vg.vgroup_type == "EX"): + msg = ("{0}) not allowd to be nested in " + "exclusivity group") + self.status = msg.format(vg.vgroup_type) return False vgroup.subvgroups[vk] = vg vg.exclusivity_groups[rk] = vgroup.level + ":" + \ @@ -241,32 +254,38 @@ class Parser(object): vgroup.subvgroups[vk] = _vms[vk] _vms[vk].survgroup = vgroup affinity_map[vk] = vgroup - self._add_implicit_diversity_groups(vgroup, _vms[vk].diversity_groups) - self._add_implicit_exclusivity_groups(vgroup, _vms[vk].exclusivity_groups) + self._add_implicit_diversity_groups( + vgroup, _vms[vk].diversity_groups) + self._add_implicit_exclusivity_groups( + vgroup, _vms[vk].exclusivity_groups) self._add_memberships(vgroup, _vms[vk]) del _vms[vk] elif vk in _vgroups.keys(): vg = _vgroups[vk] if LEVELS.index(vg.level) > LEVELS.index(level): - self.status = "grouping scope: nested " \ - "group's level is higher" + self.status = ("grouping scope: nested " + "group's level is higher") return False - if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": - if self._merge_subgroups(vgroup, vg.subvgroups, _vms, _vgroups, - _elements, affinity_map) is False: + if (vg.vgroup_type == "DIV" or + vg.vgroup_type == "EX"): + if not self._merge_subgroups( + vgroup, vg.subvgroups, _vms, _vgroups, + _elements, affinity_map): return False del _vgroups[vk] else: - if self._exist_in_subgroups(vk, vgroup) is None: - if self._get_subgroups(vg, _elements, - _vgroups, _vms, - affinity_map) is False: + if not self._exist_in_subgroups(vk, vgroup): + if not self._get_subgroups( + vg, _elements, _vgroups, _vms, + affinity_map): return False vgroup.subvgroups[vk] = vg vg.survgroup = vgroup affinity_map[vk] = vgroup - self._add_implicit_diversity_groups(vgroup, vg.diversity_groups) - self._add_implicit_exclusivity_groups(vgroup, vg.exclusivity_groups) + self._add_implicit_diversity_groups( + vgroup, vg.diversity_groups) + self._add_implicit_exclusivity_groups( + vgroup, vg.exclusivity_groups) self._add_memberships(vgroup, vg) del _vgroups[vk] else: @@ -276,43 +295,49 @@ class Parser(object): self.status = "invalid resource = " + vk return False if affinity_map[vk].uuid != vgroup.uuid: - if self._exist_in_subgroups(vk, vgroup) is None: + if not self._exist_in_subgroups(vk, vgroup): self._set_implicit_grouping( vk, vgroup, affinity_map, _vgroups) return True - def _merge_subgroups(self, _vgroup, _subgroups, _vms, _vgroups, _elements, _affinity_map): + def _merge_subgroups(self, _vgroup, _subgroups, _vms, _vgroups, + _elements, _affinity_map): for vk, _ in _subgroups.iteritems(): if vk in _vms.keys(): _vgroup.subvgroups[vk] = _vms[vk] _vms[vk].survgroup = _vgroup _affinity_map[vk] = _vgroup - self._add_implicit_diversity_groups(_vgroup, _vms[vk].diversity_groups) - self._add_implicit_exclusivity_groups(_vgroup, _vms[vk].exclusivity_groups) + self._add_implicit_diversity_groups( + _vgroup, _vms[vk].diversity_groups) + self._add_implicit_exclusivity_groups( + _vgroup, _vms[vk].exclusivity_groups) self._add_memberships(_vgroup, _vms[vk]) del _vms[vk] elif vk in _vgroups.keys(): vg = _vgroups[vk] if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level): - self.status = "grouping scope: nested group's level is " \ - "higher" + self.status = ("grouping scope: nested group's level is " + "higher") return False if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": - if self._merge_subgroups(_vgroup, vg.subvgroups, - _vms, _vgroups, - _elements, _affinity_map) is False: + if not self._merge_subgroups(_vgroup, vg.subvgroups, + _vms, _vgroups, + _elements, _affinity_map): return False del _vgroups[vk] else: if self._exist_in_subgroups(vk, _vgroup) is None: - if self._get_subgroups(vg, _elements, _vgroups, _vms, _affinity_map) is False: + if not self._get_subgroups(vg, _elements, _vgroups, + _vms, _affinity_map): return False _vgroup.subvgroups[vk] = vg vg.survgroup = _vgroup _affinity_map[vk] = _vgroup - self._add_implicit_diversity_groups(_vgroup, vg.diversity_groups) - self._add_implicit_exclusivity_groups(_vgroup, vg.exclusivity_groups) + self._add_implicit_diversity_groups( + _vgroup, vg.diversity_groups) + self._add_implicit_exclusivity_groups( + _vgroup, vg.exclusivity_groups) self._add_memberships(_vgroup, vg) del _vgroups[vk] else: @@ -323,40 +348,47 @@ class Parser(object): return False if _affinity_map[vk].uuid != _vgroup.uuid: if self._exist_in_subgroups(vk, _vgroup) is None: - self._set_implicit_grouping(vk, _vgroup, _affinity_map, _vgroups) + self._set_implicit_grouping( + vk, _vgroup, _affinity_map, _vgroups) return True - def _get_subgroups(self, _vgroup, _elements, _vgroups, _vms, _affinity_map): + def _get_subgroups(self, _vgroup, _elements, + _vgroups, _vms, _affinity_map): for vk in _elements[_vgroup.uuid]["properties"]["resources"]: if vk in _vms.keys(): _vgroup.subvgroups[vk] = _vms[vk] _vms[vk].survgroup = _vgroup _affinity_map[vk] = _vgroup - self._add_implicit_diversity_groups(_vgroup, _vms[vk].diversity_groups) - self._add_implicit_exclusivity_groups(_vgroup, _vms[vk].exclusivity_groups) + self._add_implicit_diversity_groups( + _vgroup, _vms[vk].diversity_groups) + self._add_implicit_exclusivity_groups( + _vgroup, _vms[vk].exclusivity_groups) self._add_memberships(_vgroup, _vms[vk]) del _vms[vk] elif vk in _vgroups.keys(): vg = _vgroups[vk] if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level): - self.status = "grouping scope: nested group's level is " \ - "higher" + self.status = ("grouping scope: nested group's level is " + "higher") return False if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX": - if self._merge_subgroups(_vgroup, vg.subvgroups, - _vms, _vgroups, - _elements, _affinity_map) is False: + if not self._merge_subgroups(_vgroup, vg.subvgroups, + _vms, _vgroups, + _elements, _affinity_map): return False del _vgroups[vk] else: if self._exist_in_subgroups(vk, _vgroup) is None: - if self._get_subgroups(vg, _elements, _vgroups, _vms, _affinity_map) is False: + if not self._get_subgroups(vg, _elements, _vgroups, + _vms, _affinity_map): return False _vgroup.subvgroups[vk] = vg vg.survgroup = _vgroup _affinity_map[vk] = _vgroup - self._add_implicit_diversity_groups(_vgroup, vg.diversity_groups) - self._add_implicit_exclusivity_groups(_vgroup, vg.exclusivity_groups) + self._add_implicit_diversity_groups( + _vgroup, vg.diversity_groups) + self._add_implicit_exclusivity_groups( + _vgroup, vg.exclusivity_groups) self._add_memberships(_vgroup, vg) del _vgroups[vk] else: @@ -365,7 +397,8 @@ class Parser(object): return False if _affinity_map[vk].uuid != _vgroup.uuid: if self._exist_in_subgroups(vk, _vgroup) is None: - self._set_implicit_grouping(vk, _vgroup, _affinity_map, _vgroups) + self._set_implicit_grouping( + vk, _vgroup, _affinity_map, _vgroups) return True def _add_implicit_diversity_groups(self, _vgroup, _diversity_groups): @@ -398,7 +431,8 @@ class Parser(object): if t_vg.uuid in _affinity_map.keys(): # if the parent belongs to the other parent vgroup - self._set_implicit_grouping(t_vg.uuid, _s_vg, _affinity_map, _vgroups) + self._set_implicit_grouping( + t_vg.uuid, _s_vg, _affinity_map, _vgroups) else: if LEVELS.index(t_vg.level) > LEVELS.index(_s_vg.level): t_vg.level = _s_vg.level @@ -406,8 +440,10 @@ class Parser(object): _s_vg.subvgroups[t_vg.uuid] = t_vg t_vg.survgroup = _s_vg _affinity_map[t_vg.uuid] = _s_vg - self._add_implicit_diversity_groups(_s_vg, t_vg.diversity_groups) - self._add_implicit_exclusivity_groups(_s_vg, t_vg.exclusivity_groups) + self._add_implicit_diversity_groups( + _s_vg, t_vg.diversity_groups) + self._add_implicit_exclusivity_groups( + _s_vg, t_vg.exclusivity_groups) self._add_memberships(_s_vg, t_vg) del _vgroups[t_vg.uuid] diff --git a/valet/engine/optimizer/db_connect/music_handler.py b/valet/engine/optimizer/db_connect/music_handler.py index 2e0ef1d..e3f7115 100755 --- a/valet/engine/optimizer/db_connect/music_handler.py +++ b/valet/engine/optimizer/db_connect/music_handler.py @@ -38,14 +38,16 @@ class MusicHandler(object): self.config = _config self.logger = _logger - self.music = Music(hosts=self.config.hosts, port=self.config.port, - replication_factor=self.config.replication_factor, - music_server_retries=self.config.music_server_retries, - logger=self.logger) + self.music = Music( + hosts=self.config.hosts, port=self.config.port, + replication_factor=self.config.replication_factor, + music_server_retries=self.config.music_server_retries, + logger=self.logger) if self.config.hosts is not None: self.logger.info("DB: music host = %s", self.config.hosts) if self.config.replication_factor is not None: - self.logger.info("DB: music replication factor = " + str(self.config.replication_factor)) + self.logger.info("DB: music replication factor = %s ", + str(self.config.replication_factor)) # FIXME(GJ): this may not need def init_db(self): @@ -172,8 +174,9 @@ class MusicHandler(object): if exchange != "nova": if self.delete_event(event_id) is False: return None - self.logger.debug("MusicHandler.get_events: event exchange " - "(" + exchange + ") is not supported") + self.logger.debug( + "MusicHandler.get_events: event exchange " + "(" + exchange + ") is not supported") continue if method != 'object_action' and method != 'build_and_run_' \ @@ -187,8 +190,8 @@ class MusicHandler(object): if len(args_data) == 0: if self.delete_event(event_id) is False: return None - self.logger.debug("MusicHandler.get_events: event does not " - "have args") + self.logger.debug("MusicHandler.get_events: " + "event does not have args") continue try: @@ -199,6 +202,7 @@ class MusicHandler(object): ":" + event_id) continue + # TODO(lamt) this block of code can use refactoring if method == 'object_action': if 'objinst' in args.keys(): objinst = args['objinst'] @@ -207,28 +211,32 @@ class MusicHandler(object): if nova_object_name == 'Instance': if 'nova_object.changes' in objinst.keys() and \ 'nova_object.data' in objinst.keys(): - change_list = objinst['nova_object.changes'] + change_list = objinst[ + 'nova_object.changes'] change_data = objinst['nova_object.data'] if 'vm_state' in change_list and \ 'vm_state' in change_data.keys(): - if change_data['vm_state'] == \ - 'deleted' \ - or change_data[ - 'vm_state' - ] == 'active': + if (change_data['vm_state'] == + 'deleted' or + change_data['vm_state'] == + 'active'): e = Event(event_id) e.exchange = exchange e.method = method e.args = args event_list.append(e) else: - self.logger.warn("unknown vm_state = " + change_data["vm_state"]) + msg = "unknown vm_state = %s" + self.logger.warn( + msg % change_data["vm_state"]) if 'uuid' in change_data.keys(): - self.logger.warn(" uuid = " + change_data['uuid']) - if self.delete_event(event_id) is False: + msg = " uuid = %s" + self.logger.warn( + msg % change_data['uuid']) + if not self.delete_event(event_id): return None else: - if self.delete_event(event_id) is False: + if not self.delete_event(event_id): return None else: if self.delete_event(event_id) is False: @@ -304,7 +312,8 @@ class MusicHandler(object): "in build event") if len(error_event_list) > 0: - event_list[:] = [e for e in event_list if e not in error_event_list] + event_list[:] = [ + e for e in event_list if e not in error_event_list] if len(event_list) > 0: event_list.sort(key=operator.attrgetter('event_id')) @@ -459,7 +468,7 @@ class MusicHandler(object): return json_resource def update_resource_status(self, _k, _status): - """Update resource _k to the new _status (flavors, lgs, hosts, etc).""" + """Update resource to the new _status (flavors, lgs, hosts, etc).""" row = {} try: row = self.music.read_row(self.config.db_keyspace, @@ -485,7 +494,8 @@ class MusicHandler(object): if 'logical_groups' in _status.keys(): logical_groups = _status['logical_groups'] for lgk, lg in logical_groups.iteritems(): - if lgk in ensurekey(json_resource, 'logical_groups').keys(): + keys = ensurekey(json_resource, 'logical_groups').keys() + if lgk in keys: del json_resource['logical_groups'][lgk] json_resource['logical_groups'][lgk] = lg @@ -612,7 +622,8 @@ class MusicHandler(object): vm["host"] = _host self.logger.warn("db: conflicted placement " "decision from Ostro") - # TODO(GY): affinity, diversity, exclusivity validation check + # TODO(GY): affinity, diversity, exclusivity + # validation check updated = True else: vm["status"] = "scheduled" @@ -668,8 +679,8 @@ class MusicHandler(object): self.logger.error("MusicHandler.update_vm_info: vm is missing " "from stack") else: - self.logger.warn("MusicHandler.update_vm_info: not found stack for " - "update = " + _s_uuid) + self.logger.warn("MusicHandler.update_vm_info: not found " + "stack for update = " + _s_uuid) if updated is True: if self.add_app(_s_uuid, json_app) is False: diff --git a/valet/engine/optimizer/ostro/constraint_solver.py b/valet/engine/optimizer/ostro/constraint_solver.py index 6a36925..8742af4 100755 --- a/valet/engine/optimizer/ostro/constraint_solver.py +++ b/valet/engine/optimizer/ostro/constraint_solver.py @@ -13,10 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""ConstraintSolver.""" - -from valet.engine.optimizer.app_manager.app_topology_base \ - import VGroup, VM, LEVELS +from valet.engine.optimizer.app_manager.app_topology_base import LEVELS +from valet.engine.optimizer.app_manager.app_topology_base import VGroup +from valet.engine.optimizer.app_manager.app_topology_base import VM from valet.engine.optimizer.ostro.openstack_filters \ import AggregateInstanceExtraSpecsFilter from valet.engine.optimizer.ostro.openstack_filters \ @@ -191,7 +190,8 @@ class ConstraintSolver(object): if r not in conflict_list: conflict_list.append(r) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [ + c for c in _candidate_list if c not in conflict_list] def _constrain_diversity_with_others(self, _level, _diversity_id, _candidate_list): @@ -202,7 +202,8 @@ class ConstraintSolver(object): if r not in conflict_list: conflict_list.append(r) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [ + c for c in _candidate_list if c not in conflict_list] def exist_group(self, _level, _id, _group_type, _candidate): """Check if group esists.""" @@ -224,11 +225,12 @@ class ConstraintSolver(object): conflict_list = [] for r in _candidate_list: - if self.conflict_diversity(_level, _n, _node_placements, r) is True: + if self.conflict_diversity(_level, _n, _node_placements, r): if r not in conflict_list: conflict_list.append(r) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [ + c for c in _candidate_list if c not in conflict_list] def conflict_diversity(self, _level, _n, _node_placements, _candidate): """Return True if the candidate has a placement conflict.""" @@ -270,7 +272,8 @@ class ConstraintSolver(object): if r not in conflict_list: conflict_list.append(r) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [ + c for c in _candidate_list if c not in conflict_list] def conflict_exclusivity(self, _level, _candidate): """Check for an exculsivity conflict.""" @@ -300,8 +303,8 @@ class ConstraintSolver(object): return exclusivities def _constrain_exclusivity(self, _level, _exclusivity_id, _candidate_list): - candidate_list = self._get_exclusive_candidates(_level, _exclusivity_id, - _candidate_list) + candidate_list = self._get_exclusive_candidates( + _level, _exclusivity_id, _candidate_list) if len(candidate_list) == 0: candidate_list = self._get_hibernated_candidates(_level, @@ -317,7 +320,7 @@ class ConstraintSolver(object): candidate_list = [] for r in _candidate_list: - if self.exist_group(_level, _exclusivity_id, "EX", r) is True: + if self.exist_group(_level, _exclusivity_id, "EX", r): if r not in candidate_list: candidate_list.append(r) @@ -334,8 +337,9 @@ class ConstraintSolver(object): return candidate_list def check_hibernated(self, _level, _candidate): - """Check if the candidate is hibernated.""" - """Return True if the candidate has no placed VMs at the specified + """Check if the candidate is hibernated. + + Return True if the candidate has no placed VMs at the specified level. """ match = False @@ -354,10 +358,15 @@ class ConstraintSolver(object): if r not in conflict_list: conflict_list.append(r) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [ + c for c in _candidate_list if c not in conflict_list] def check_host_aggregates(self, _level, _candidate, _v): - """Check if the candidate passes the aggregate instance extra specs zone filter.""" + """Check if candidate passes aggregate instance extra specs. + + Return true if the candidate passes the aggregate instance extra specs + zone filter. + """ return self.openstack_AIES.host_passes(_level, _candidate, _v) def _constrain_availability_zone(self, _level, _n, _candidate_list): @@ -368,7 +377,8 @@ class ConstraintSolver(object): if r not in conflict_list: conflict_list.append(r) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [ + c for c in _candidate_list if c not in conflict_list] def check_availability_zone(self, _level, _candidate, _v): """Check if the candidate passes the availability zone filter.""" @@ -381,7 +391,8 @@ class ConstraintSolver(object): if self.check_cpu_capacity(_level, _n.node, ch) is False: conflict_list.append(ch) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [ + c for c in _candidate_list if c not in conflict_list] def check_cpu_capacity(self, _level, _v, _candidate): """Check if the candidate passes the core filter.""" @@ -394,7 +405,8 @@ class ConstraintSolver(object): if self.check_mem_capacity(_level, _n.node, ch) is False: conflict_list.append(ch) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [ + c for c in _candidate_list if c not in conflict_list] def check_mem_capacity(self, _level, _v, _candidate): """Check if the candidate passes the RAM filter.""" @@ -407,7 +419,8 @@ class ConstraintSolver(object): if self.check_local_disk_capacity(_level, _n.node, ch) is False: conflict_list.append(ch) - _candidate_list[:] = [c for c in _candidate_list if c not in conflict_list] + _candidate_list[:] = [ + c for c in _candidate_list if c not in conflict_list] def check_local_disk_capacity(self, _level, _v, _candidate): """Check if the candidate passes the disk filter.""" diff --git a/valet/engine/optimizer/ostro/openstack_filters.py b/valet/engine/optimizer/ostro/openstack_filters.py index 4cf6022..88f1867 100755 --- a/valet/engine/optimizer/ostro/openstack_filters.py +++ b/valet/engine/optimizer/ostro/openstack_filters.py @@ -12,11 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""AggregateInstanceExtraSpecsFilter.""" - - -import openstack_utils import six from valet.engine.optimizer.app_manager.app_topology_base import VM diff --git a/valet/engine/optimizer/ostro/optimizer.py b/valet/engine/optimizer/ostro/optimizer.py index 7fba5ba..67ffd3c 100755 --- a/valet/engine/optimizer/ostro/optimizer.py +++ b/valet/engine/optimizer/ostro/optimizer.py @@ -12,13 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Optimizer.""" - -import time - -from valet.engine.optimizer.app_manager.app_topology_base \ - import VGroup, VM +from valet.engine.optimizer.app_manager.app_topology_base import VGroup +from valet.engine.optimizer.app_manager.app_topology_base import VM from valet.engine.optimizer.ostro.search import Search @@ -47,8 +42,9 @@ class Optimizer(object): if len(_app_topology.exclusion_list_map) > 0: place_type = "migration" else: - if (len(_app_topology.old_vm_map) > 0 or len(_app_topology.planned_vm_map) > 0) and \ - len(_app_topology.candidate_list_map) > 0: + if ((len(_app_topology.old_vm_map) > 0 or + len(_app_topology.planned_vm_map) > 0) and + len(_app_topology.candidate_list_map) > 0): place_type = "replan" else: place_type = "create" @@ -73,18 +69,16 @@ class Optimizer(object): if success is True: placement_map = {} for v in self.search.node_placements.keys(): + node_placement = self.search.node_placements[v] if isinstance(v, VM): - placement_map[v] = self.search.node_placements[v].host_name + placement_map[v] = node_placement.host_name elif isinstance(v, VGroup): if v.level == "host": - placement_map[v] = \ - self.search.node_placements[v].host_name + placement_map[v] = node_placement.host_name elif v.level == "rack": - placement_map[v] = \ - self.search.node_placements[v].rack_name + placement_map[v] = node_placement.rack_name elif v.level == "cluster": - placement_map[v] = \ - self.search.node_placements[v].cluster_name + placement_map[v] = node_placement.cluster_name self.logger.debug(" " + v.name + " placed in " + placement_map[v]) @@ -125,7 +119,8 @@ class Optimizer(object): (v.uuid, v.name, uuid), v.vCPUs, v.mem, v.local_volume_size) - self._update_logical_grouping(v, self.search.avail_hosts[np.host_name], uuid) + self._update_logical_grouping( + v, self.search.avail_hosts[np.host_name], uuid) self.resource.update_host_time(np.host_name) @@ -160,8 +155,8 @@ class Optimizer(object): self._collect_logical_groups_of_vm(_v, vm_logical_groups) host = self.resource.hosts[_avail_host.host_name] - self.resource.add_vm_to_logical_groups(host, (_v.uuid, _v.name, _uuid), - vm_logical_groups) + self.resource.add_vm_to_logical_groups( + host, (_v.uuid, _v.name, _uuid), vm_logical_groups) def _collect_logical_groups_of_vm(self, _v, _vm_logical_groups): if isinstance(_v, VM): @@ -191,4 +186,5 @@ class Optimizer(object): _vm_logical_groups.append(name) if _v.survgroup is not None: - self._collect_logical_groups_of_vm(_v.survgroup, _vm_logical_groups) + self._collect_logical_groups_of_vm( + _v.survgroup, _vm_logical_groups) diff --git a/valet/engine/optimizer/ostro/ostro.py b/valet/engine/optimizer/ostro/ostro.py index 7730a3b..fa8f6cf 100755 --- a/valet/engine/optimizer/ostro/ostro.py +++ b/valet/engine/optimizer/ostro/ostro.py @@ -53,11 +53,13 @@ class Ostro(object): self.data_lock = threading.Lock() self.thread_list = [] - self.topology = TopologyManager(1, "Topology", self.resource, - self.data_lock, self.config, self.logger) + self.topology = TopologyManager( + 1, "Topology", self.resource, + self.data_lock, self.config, self.logger) - self.compute = ComputeManager(2, "Compute", self.resource, - self.data_lock, self.config, self.logger) + self.compute = ComputeManager( + 2, "Compute", self.resource, + self.data_lock, self.config, self.logger) self.listener = ListenerManager(3, "Listener", CONF) @@ -94,8 +96,9 @@ class Ostro(object): if self.handle_events(event_list) is False: break else: - if self.resource.resource_updated is True and \ - (time.time() - self.resource.curr_db_timestamp) >= self.batch_store_trigger: + time_diff = time.time() - self.resource.curr_db_timestamp + if (self.resource.resource_updated and + time_diff >= self.batch_store_trigger): self.data_lock.acquire() if self.resource.store_topology_updates() is False: self.data_lock.release() @@ -134,7 +137,8 @@ class Ostro(object): resource_status = self.db.get_resource_status( self.resource.datacenter.name) if resource_status is None: - self.logger.error("failed to read from table: " + self.config.db_resource_table) + self.logger.error("failed to read from table: %s" % + self.config.db_resource_table) return False if len(resource_status) > 0: @@ -155,7 +159,7 @@ class Ostro(object): self.resource.update_topology() except Exception: - self.logger.critical("Ostro.bootstrap failed: " + + self.logger.critical("Ostro.bootstrap failed: %s" % traceback.format_exc()) self.logger.info("done bootstrap") @@ -196,7 +200,7 @@ class Ostro(object): result = self._get_json_results("query", "ok", self.status, query_result) - if self.db.put_result(result) is False: + if not self.db.put_result(result): return False self.logger.info("done query") @@ -204,20 +208,24 @@ class Ostro(object): self.logger.info("start app placement") result = None - (decision_key, old_decision) = self.app_handler.check_history(req) + (decision_key, old_decision) = self.app_handler.check_history( + req) if old_decision is None: placement_map = self._place_app(req) if placement_map is None: - result = self._get_json_results("placement", "error", self.status, placement_map) + result = self._get_json_results( + "placement", "error", self.status, placement_map) else: - result = self._get_json_results("placement", "ok", "success", placement_map) + result = self._get_json_results( + "placement", "ok", "success", placement_map) if decision_key is not None: self.app_handler.put_history(decision_key, result) else: - self.logger.warn("decision(" + decision_key + ") already made") + self.logger.warn("decision(%s) already made" % + decision_key) result = old_decision - if self.db.put_result(result) is False: + if not self.db.put_result(result): return False self.logger.info("done app placement") @@ -233,7 +241,8 @@ class Ostro(object): params = _q["parameters"] if "group_name" in params.keys(): self.data_lock.acquire() - vm_list = self._get_vms_from_logical_group(params["group_name"]) + vm_list = self._get_vms_from_logical_group( + params["group_name"]) self.data_lock.release() query_result[_q["stack_id"]] = vm_list else: @@ -324,9 +333,11 @@ class Ostro(object): # Update resource and app information if len(placement_map) > 0: self.resource.update_topology(store=False) - self.app_handler.add_placement(placement_map, app_topology, self.resource.current_timestamp) + self.app_handler.add_placement( + placement_map, app_topology, self.resource.current_timestamp) - if len(app_topology.exclusion_list_map) > 0 and len(app_topology.planned_vm_map) > 0: + if (len(app_topology.exclusion_list_map) > 0 and + len(app_topology.planned_vm_map) > 0): for vk in app_topology.planned_vm_map.keys(): if vk in placement_map.keys(): del placement_map[vk] @@ -390,7 +401,8 @@ class Ostro(object): if e.method == "build_and_run_instance": # VM is created (from stack) - self.logger.info("Ostro.handle_events: got build_and_run event for " + e.uuid) + self.logger.info("Ostro.handle_events: got build_and_run " + "event for %s" % e.uuid) if self.db.put_uuid(e) is False: self.data_lock.release() return False @@ -406,57 +418,81 @@ class Ostro(object): if e.vm_state == "active": self.logger.info("Ostro.handle_events: got instance_" - "active event for " + e.uuid) - vm_info = self.app_handler.get_vm_info(orch_id[1], orch_id[0], e.host) + "active event for " + e.uuid) + vm_info = self.app_handler.get_vm_info( + orch_id[1], orch_id[0], e.host) if vm_info is None: self.logger.error("Ostro.handle_events: error " - "while getting app info from MUSIC") + "while getting app info " + "from MUSIC") self.data_lock.release() return False if len(vm_info) == 0: - # Stack not found because vm is created by the other stack - self.logger.warn("EVENT: no vm_info found in app placement record") - self._add_vm_to_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk) + # Stack not found because vm is created by the + # other stack + self.logger.warn("EVENT: no vm_info found in app " + "placement record") + self._add_vm_to_host( + e.uuid, orch_id[0], e.host, e.vcpus, + e.mem, e.local_disk) else: - if "planned_host" in vm_info.keys() and vm_info["planned_host"] != e.host: + if ("planned_host" in vm_info.keys() and + vm_info["planned_host"] != e.host): # VM is activated in the different host - self.logger.warn("EVENT: vm activated in the different host") - self._add_vm_to_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk) + self.logger.warn("EVENT: vm activated in the " + "different host") + self._add_vm_to_host( + e.uuid, orch_id[0], e.host, e.vcpus, + e.mem, e.local_disk) - self._remove_vm_from_host(e.uuid, orch_id[0], - vm_info["planned_host"], - float(vm_info["cpus"]), - float(vm_info["mem"]), - float(vm_info["local_volume"])) + self._remove_vm_from_host( + e.uuid, orch_id[0], + vm_info["planned_host"], + float(vm_info["cpus"]), + float(vm_info["mem"]), + float(vm_info["local_volume"])) - self._remove_vm_from_logical_groups(e.uuid, orch_id[0], vm_info["planned_host"]) + self._remove_vm_from_logical_groups( + e.uuid, orch_id[0], + vm_info["planned_host"]) else: # Found vm in the planned host, - # Possibly the vm deleted in the host while batch cleanup - if self._check_h_uuid(orch_id[0], e.host) is False: - self.logger.warn("EVENT: planned vm was deleted") - if self._check_uuid(e.uuid, e.host) is True: + # Possibly the vm deleted in the host while + # batch cleanup + if not self._check_h_uuid(orch_id[0], e.host): + self.logger.warn("EVENT: planned vm was " + "deleted") + if self._check_uuid(e.uuid, e.host): self._update_h_uuid_in_host(orch_id[0], e.uuid, e.host) self._update_h_uuid_in_logical_groups( orch_id[0], e.uuid, e.host) else: - self.logger.info("EVENT: vm activated as planned") - self._update_uuid_in_host(orch_id[0], e.uuid, e.host) - self._update_uuid_in_logical_groups(orch_id[0], e.uuid, e.host) + self.logger.info( + "EVENT: vm activated as planned") + self._update_uuid_in_host( + orch_id[0], e.uuid, e.host) + self._update_uuid_in_logical_groups( + orch_id[0], e.uuid, e.host) resource_updated = True elif e.vm_state == "deleted": - self.logger.info("EVENT: got instance_delete for " + e.uuid) + self.logger.info("EVENT: got instance_delete for %s" % + e.uuid) - self._remove_vm_from_host(e.uuid, orch_id[0], e.host, e.vcpus, e.mem, e.local_disk) - self._remove_vm_from_logical_groups(e.uuid, orch_id[0], e.host) + self._remove_vm_from_host( + e.uuid, orch_id[0], e.host, e.vcpus, + e.mem, e.local_disk) + self._remove_vm_from_logical_groups( + e.uuid, orch_id[0], e.host) - if self.app_handler.update_vm_info(orch_id[1], orch_id[0]) is False: - self.logger.error("EVENT: error while updating app in MUSIC") + if not self.app_handler.update_vm_info( + orch_id[1], orch_id[0]): + self.logger.error("EVENT: error while updating " + "app in MUSIC") self.data_lock.release() return False @@ -541,12 +577,13 @@ class Ostro(object): _local_disk) self.resource.update_host_time(_host_name) else: - self.logger.warn("vm (" + _uuid + ") is missing while removing") + self.logger.warn("vm (%s) is missing while removing" % _uuid) def _remove_vm_from_logical_groups(self, _uuid, _h_uuid, _host_name): host = self.resource.hosts[_host_name] if _h_uuid is not None and _h_uuid != "none": - self.resource.remove_vm_by_h_uuid_from_logical_groups(host, _h_uuid) + self.resource.remove_vm_by_h_uuid_from_logical_groups( + host, _h_uuid) else: self.resource.remove_vm_by_uuid_from_logical_groups(host, _uuid) @@ -582,7 +619,7 @@ class Ostro(object): self.resource.update_host_time(_host_name) else: self.logger.warn("Ostro._update_uuid_in_host: fail to update uuid " - "in host = " + host.name) + "in host = %s" % host.name) def _update_h_uuid_in_host(self, _h_uuid, _uuid, _host_name): host = self.resource.hosts[_host_name] @@ -659,7 +696,8 @@ class Ostro(object): app_status['message'] = "ping" app_result['status'] = app_status - app_result['resources'] = {"ip": self.config.ip, "id": self.config.priority} + app_result['resources'] = { + "ip": self.config.ip, "id": self.config.priority} result[appk] = app_result diff --git a/valet/engine/optimizer/ostro/search.py b/valet/engine/optimizer/ostro/search.py index cfa272c..1a2fca7 100755 --- a/valet/engine/optimizer/ostro/search.py +++ b/valet/engine/optimizer/ostro/search.py @@ -12,20 +12,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Search.""" - import copy import operator -from valet.engine.optimizer.app_manager.app_topology_base import VGroup, VM, LEVELS +from valet.engine.optimizer.app_manager.app_topology_base import LEVELS +from valet.engine.optimizer.app_manager.app_topology_base import VGroup +from valet.engine.optimizer.app_manager.app_topology_base import VM from valet.engine.optimizer.ostro.constraint_solver import ConstraintSolver -from valet.engine.optimizer.ostro.search_base import Node, Resource, LogicalGroupResource +from valet.engine.optimizer.ostro.search_base import LogicalGroupResource +from valet.engine.optimizer.ostro.search_base import Node +from valet.engine.optimizer.ostro.search_base import Resource from valet.engine.resource_manager.resource_base import Datacenter class Search(object): - ''' a bin-packing with maximal consolidation approach ''' + '''A bin-packing with maximal consolidation approach ''' def __init__(self, _logger): """Initialization.""" @@ -181,9 +182,8 @@ class Search(object): def _place_planned_nodes(self): init_level = LEVELS[len(LEVELS) - 1] - (planned_node_list, level) = self._open_planned_list(self.app_topology.vms, - self.app_topology.vgroups, - init_level) + (planned_node_list, level) = self._open_planned_list( + self.app_topology.vms, self.app_topology.vgroups, init_level) if len(planned_node_list) == 0: return True @@ -295,7 +295,8 @@ class Search(object): while len(_node_list) > 0: n = _node_list.pop(0) - best_resource = self._get_best_resource_for_planned(n, _level, avail_resources) + best_resource = self._get_best_resource_for_planned( + n, _level, avail_resources) if best_resource is not None: self._deduct_reservation(_level, best_resource, n) self._close_planned_placement(_level, best_resource, n.node) @@ -326,7 +327,8 @@ class Search(object): else: vms[_n.node.uuid] = _n.node - (planned_node_list, level) = self._open_planned_list(vms, vgroups, _level) + (planned_node_list, level) = self._open_planned_list( + vms, vgroups, _level) host_name = self._get_host_of_level(_n, _level) if host_name is None: @@ -533,11 +535,12 @@ class Search(object): lgr = r.host_memberships[lgk] lgr.num_of_placed_vms -= 1 if r.host_name in lgr.num_of_placed_vms_per_host.keys(): + num_placed_vm = lgr.num_of_placed_vms_per_host lgr.num_of_placed_vms_per_host[r.host_name] -= 1 if lgr.group_type == "EX" or \ lgr.group_type == "AFF" or \ lgr.group_type == "DIV": - if lgr.num_of_placed_vms_per_host[r.host_name] == 0: + if num_placed_vm[r.host_name] == 0: del lgr.num_of_placed_vms_per_host[r.host_name] del r.host_memberships[lgk] if lgr.group_type == "EX" or lgr.group_type == "AFF" or \ @@ -557,15 +560,11 @@ class Search(object): if lg.exist_vm_by_h_uuid(h_uuid) is True: lgr = r.rack_memberships[lgk] lgr.num_of_placed_vms -= 1 - if r.rack_name in \ - lgr.num_of_placed_vms_per_host.keys(): - lgr.num_of_placed_vms_per_host[r.rack_name] -= 1 - if lgr.num_of_placed_vms_per_host[ - r.rack_name - ] == 0: - del lgr.num_of_placed_vms_per_host[ - r.rack_name - ] + vms_placed = lgr.num_of_placed_vms_per_host + if r.rack_name in vms_placed.keys(): + vms_placed[r.rack_name] -= 1 + if vms_placed[r.rack_name] == 0: + del vms_placed[r.rack_name] for _, rr in self.avail_hosts.iteritems(): if rr.rack_name != "any" and \ rr.rack_name == \ @@ -667,7 +666,8 @@ class Search(object): elif _level == "host": avail_resources = _avail_hosts - _open_node_list.sort(key=operator.attrgetter("sort_base"), reverse=True) + _open_node_list.sort( + key=operator.attrgetter("sort_base"), reverse=True) while len(_open_node_list) > 0: n = _open_node_list.pop(0) @@ -678,9 +678,10 @@ class Search(object): break if n.node not in self.planned_placements.keys(): - ''' for VM under host level only ''' + # for VM under host level only self._deduct_reservation(_level, best_resource, n) - ''' close all types of nodes under any level, but VM with above host level ''' + # close all types of nodes under any level, but VM + # with above host level self._close_node_placement(_level, best_resource, n.node) return success @@ -771,16 +772,18 @@ class Search(object): avail_hosts[hk] = h # recursive call - if self._run_greedy(open_node_list, level, avail_hosts) is True: + if self._run_greedy(open_node_list, level, avail_hosts): best_resource = copy.deepcopy(cr) best_resource.level = _level break else: debug_candidate_name = cr.get_resource_name(_level) - self.logger.warn("rollback of candidate resource = " + debug_candidate_name) + msg = "rollback of candidate resource = {0}" + self.logger.warn(msg.format(debug_candidate_name)) if planned_host is None: - # recursively rollback deductions of all child VMs of _n + # recursively rollback deductions of all + # child VMs of _n self._rollback_reservation(_n.node) # recursively rollback closing self._rollback_node_placement(_n.node) @@ -855,8 +858,9 @@ class Search(object): lgr.group_type = "EX" self.avail_logical_groups[lgr.name] = lgr - self.logger.info("Search: add new exclusivity (" + - _exclusivity_id + ")") + self.logger.info( + "Search: add new exclusivity (%s)" % _exclusivity_id) + else: lgr = self.avail_logical_groups[_exclusivity_id] @@ -879,7 +883,8 @@ class Search(object): np.rack_memberships[_exclusivity_id] = lgr if chosen_host.cluster_name != "any" and \ np.cluster_name == chosen_host.cluster_name: - if _exclusivity_id not in np.cluster_memberships.keys(): + if (_exclusivity_id not in + np.cluster_memberships.keys()): np.cluster_memberships[_exclusivity_id] = lgr elif _level == "rack": for _, np in self.avail_hosts.iteritems(): @@ -889,13 +894,15 @@ class Search(object): np.rack_memberships[_exclusivity_id] = lgr if chosen_host.cluster_name != "any" and \ np.cluster_name == chosen_host.cluster_name: - if _exclusivity_id not in np.cluster_memberships.keys(): + if (_exclusivity_id not in + np.cluster_memberships.keys()): np.cluster_memberships[_exclusivity_id] = lgr elif _level == "cluster": for _, np in self.avail_hosts.iteritems(): if chosen_host.cluster_name != "any" and \ np.cluster_name == chosen_host.cluster_name: - if _exclusivity_id not in np.cluster_memberships.keys(): + if (_exclusivity_id not in + np.cluster_memberships.keys()): np.cluster_memberships[_exclusivity_id] = lgr def _add_affinity(self, _level, _best, _affinity_id): @@ -956,8 +963,8 @@ class Search(object): lgr.group_type = "DIV" self.avail_logical_groups[lgr.name] = lgr - self.logger.info("Search: add new diversity (" + - _diversity_id + ")") + self.logger.info( + "Search: add new diversity (%s)" % _diversity_id) else: lgr = self.avail_logical_groups[_diversity_id] @@ -1058,7 +1065,8 @@ class Search(object): if len(_v.diversity_groups) > 0: for _, diversity_id in _v.diversity_groups.iteritems(): if diversity_id.split(":")[1] != "any": - self._remove_diversities(chosen_host, diversity_id, level) + self._remove_diversities( + chosen_host, diversity_id, level) def _remove_exclusivity(self, _chosen_host, _exclusivity_id, _level): if _exclusivity_id.split(":")[0] == _level: @@ -1087,7 +1095,8 @@ class Search(object): if _chosen_host.cluster_name != "any" and \ np.cluster_name == \ _chosen_host.cluster_name: - if _exclusivity_id in np.cluster_memberships.keys(): + if (_exclusivity_id in + np.cluster_memberships.keys()): del np.cluster_memberships[_exclusivity_id] elif _level == "rack": @@ -1100,7 +1109,8 @@ class Search(object): if _chosen_host.cluster_name != "any" and \ np.cluster_name == \ _chosen_host.cluster_name: - if _exclusivity_id in np.cluster_memberships.keys(): + if (_exclusivity_id in + np.cluster_memberships.keys()): del np.cluster_memberships[_exclusivity_id] elif _level == "cluster": @@ -1109,7 +1119,8 @@ class Search(object): if _chosen_host.cluster_name != "any" and \ np.cluster_name == \ _chosen_host.cluster_name: - if _exclusivity_id in np.cluster_memberships.keys(): + if (_exclusivity_id in + np.cluster_memberships.keys()): del np.cluster_memberships[_exclusivity_id] def _remove_affinity(self, _chosen_host, _affinity_id, _level): diff --git a/valet/engine/optimizer/ostro/search_base.py b/valet/engine/optimizer/ostro/search_base.py index c1dbd9c..db0c587 100755 --- a/valet/engine/optimizer/ostro/search_base.py +++ b/valet/engine/optimizer/ostro/search_base.py @@ -12,11 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Resources utlized by search engine.""" - -from valet.engine.optimizer.app_manager.app_topology_base import VGroup from valet.engine.optimizer.app_manager.app_topology_base import LEVELS +from valet.engine.optimizer.app_manager.app_topology_base import VGroup + class Resource(object): """Resource.""" @@ -27,14 +25,20 @@ class Resource(object): self.level = None self.host_name = None - self.host_memberships = {} # all mapped logical groups to host - self.host_vCPUs = 0 # original total vCPUs before overcommit - self.host_avail_vCPUs = 0 # remaining vCPUs after overcommit - self.host_mem = 0 # original total mem cap before overcommit - self.host_avail_mem = 0 # remaining mem cap after - self.host_local_disk = 0 # original total local disk cap before overcommit - self.host_avail_local_disk = 0 # remaining local disk cap after overcommit - self.host_num_of_placed_vms = 0 # the number of vms currently placed in this host + self.host_memberships = {} # all mapped logical groups to host + self.host_vCPUs = 0 # original total vCPUs before overcommit + self.host_avail_vCPUs = 0 # remaining vCPUs after overcommit + self.host_mem = 0 # original total mem cap before overcommit + self.host_avail_mem = 0 # remaining mem cap after + + # original total local disk cap before overcommit + self.host_local_disk = 0 + + # remaining local disk cap after overcommit + self.host_avail_local_disk = 0 + + # the number of vms currently placed in this host + self.host_num_of_placed_vms = 0 self.rack_name = None # where this host is located self.rack_memberships = {} @@ -106,7 +110,7 @@ class Resource(object): return memberships def get_num_of_placed_vms(self, _level): - """Get the number of placed vms of this resource at the specified level.""" + """Get the number of placed vms of this resource at a given level.""" num_of_vms = 0 if _level == "cluster": @@ -119,7 +123,11 @@ class Resource(object): return num_of_vms def get_avail_resources(self, _level): - """Get the available vCPUs, memory, local disk of this resource at the specified level.""" + """Get available resources of this resource at a given level. + + Returns the available vCPUs, memory, local disk of this resource + the specified level. + """ avail_vCPUs = 0 avail_mem = 0 avail_local_disk = 0 @@ -140,7 +148,11 @@ class Resource(object): return (avail_vCPUs, avail_mem, avail_local_disk) def get_local_disk(self, _level): - """Get the local disk and available local disk of this resource at the specified level.""" + """Get the local disk information. + + Returns the local disk and available local disk of this resource + at the specified level. + """ local_disk = 0 avail_local_disk = 0 @@ -157,7 +169,11 @@ class Resource(object): return (local_disk, avail_local_disk) def get_vCPUs(self, _level): - """Get the vCPUs and available vCPUs of this resource at the specified level.""" + """Get the vCPUs information. + + Returns the vCPUs and available vCPUs of this resource at the + specified level. + """ vCPUs = 0 avail_vCPUs = 0 @@ -174,7 +190,11 @@ class Resource(object): return (vCPUs, avail_vCPUs) def get_mem(self, _level): - """Get the memory and available memory of this resource at the specified level.""" + """Get memory information. + + Returns the memory and available memory of this resource at the + specified level. + """ mem = 0 avail_mem = 0 diff --git a/valet/engine/optimizer/ostro_server/daemon.py b/valet/engine/optimizer/ostro_server/daemon.py index a6b76d1..c1b3522 100644 --- a/valet/engine/optimizer/ostro_server/daemon.py +++ b/valet/engine/optimizer/ostro_server/daemon.py @@ -102,7 +102,7 @@ class Daemon(object): return pid def checkpid(self, pid): - """ Check For the existence of a unix pid. """ + """Check For the existence of a unix pid. """ alive = False try: if pid: diff --git a/valet/engine/optimizer/ostro_server/health_checker.py b/valet/engine/optimizer/ostro_server/health_checker.py index 7404d46..9d6fa3d 100644 --- a/valet/engine/optimizer/ostro_server/health_checker.py +++ b/valet/engine/optimizer/ostro_server/health_checker.py @@ -1,9 +1,26 @@ +# +# Copyright 2014-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import json import os -from oslo_config import cfg import sys import time import uuid + +from oslo_config import cfg + from valet.common.conf import get_logger from valet.common.music import REST from valet.engine.conf import init_engine @@ -16,8 +33,8 @@ class HealthCheck(object): rest = None def __init__(self, hosts=[]): - - self.tries = CONF.engine.health_timeout * 2 # default health_timeout=10 + # default health_timeout=10 + self.tries = CONF.engine.health_timeout * 2 self.uuid = str(uuid.uuid4()) kwargs = { @@ -39,12 +56,21 @@ class HealthCheck(object): return engine_id def _send(self): + request = [ + { + "action": "ping", + "stack_id": self.uuid + } + ] data = { - "values": {"stack_id": self.uuid, - "request": "[{\"action\": \"ping\", \"stack_id\": \"" + self.uuid + "\"}]" - }, - "consistencyInfo": {"type": "eventual"} + "values": { + "stack_id": self.uuid, + "request": request + }, + "consistencyInfo": { + "type": "eventual" + } } path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % { @@ -58,13 +84,15 @@ class HealthCheck(object): def _read_response(self): engine_id = None - path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows?stack_id=%(uid)s' % { + pre = '/keyspaces/%(keyspace)s/tables/%(table)s/rows?stack_id=%(uid)s' + path = pre % { 'keyspace': CONF.music.keyspace, 'table': CONF.music.response_table, 'uid': self.uuid, } - for i in range(self.tries): # default 20 tries * 0.5 sec = 10 sec. timeout + # default 20 tries * 0.5 sec = 10 sec. timeout + for i in range(self.tries): time.sleep(0.5) try: response = self.rest.request(method='get', path=path) @@ -79,7 +107,7 @@ class HealthCheck(object): engine_id = placement['resources']['id'] break except Exception as e: - logger.warn("HealthCheck exception in read response - " + str(e)) + logger.warn("HealthCheck exception in read response " + str(e)) return engine_id @@ -89,8 +117,9 @@ class HealthCheck(object): "consistencyInfo": {"type": "eventual"} } + base = '/keyspaces/%(keyspace)s/tables/%(table)s/rows?stack_id=%(uid)s' try: - path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows?stack_id=%(uid)s' % { + path = base % { 'keyspace': CONF.music.keyspace, 'table': CONF.music.request_table, 'uid': self.uuid @@ -100,7 +129,7 @@ class HealthCheck(object): logger.warn("HealthCheck exception in delete request - " + str(e)) try: - path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows?stack_id=%(uid)s' % { + path = base % { 'keyspace': CONF.music.keyspace, 'table': CONF.music.response_table, 'uid': self.uuid @@ -116,14 +145,19 @@ if __name__ == "__main__": code = 0 init_engine(default_config_files=['/etc/valet/valet.conf']) logger = get_logger("ostro_daemon") + if os.path.exists(CONF.engine.pid): respondent_id = HealthCheck().ping() + if respondent_id == CONF.engine.priority: code = CONF.engine.priority - logger.info("HealthCheck - Alive, respondent instance id: {}".format(respondent_id)) + logger.info("HealthCheck - Alive, " + "respondent instance id: {}".format(respondent_id)) else: - logger.warn("HealthCheck - pid file exists, engine {} did not respond in a timely manner (respondent id {})" - .format(CONF.engine.priority, respondent_id)) + logger.warn("HealthCheck - pid file exists, " + "engine {} did not respond in a timely manner " + "(respondent id {})".format(CONF.engine.priority, + respondent_id)) else: logger.info("HealthCheck - no pid file, engine is not running!") sys.exit(code) diff --git a/valet/engine/resource_manager/compute.py b/valet/engine/resource_manager/compute.py index da6ed6a..c139966 100755 --- a/valet/engine/resource_manager/compute.py +++ b/valet/engine/resource_manager/compute.py @@ -12,13 +12,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Compute.""" +import traceback from novaclient import client as nova_client from oslo_config import cfg -from resource_base import Host, LogicalGroup, Flavor -import traceback + +from resource_base import Flavor +from resource_base import Host +from resource_base import LogicalGroup + # Nova API v2 VERSION = 2 @@ -205,7 +207,7 @@ class Compute(object): return "success" def _set_resources(self, _hosts): - ''' returns Hypervisor list ''' + '''Returns Hypervisor list ''' host_list = self.nova.hypervisors.list() @@ -252,7 +254,7 @@ class Compute(object): return error_status def _set_flavors(self, _flavors): - ''' get a list of all flavors ''' + '''Get a list of all flavors.''' flavor_list = self.nova.flavors.list() @@ -280,7 +282,8 @@ class Compute(object): if sw != '': swap_mb = float(sw) - flavor.disk_cap = root_gb + ephemeral_gb + swap_mb / float(1024) + flavor.disk_cap = ( + root_gb + ephemeral_gb + swap_mb / float(1024)) _flavors[flavor.name] = flavor except (ValueError, KeyError, TypeError): diff --git a/valet/engine/resource_manager/compute_manager.py b/valet/engine/resource_manager/compute_manager.py index 29d1a21..65dd9db 100755 --- a/valet/engine/resource_manager/compute_manager.py +++ b/valet/engine/resource_manager/compute_manager.py @@ -62,11 +62,13 @@ class ComputeManager(threading.Thread): time.sleep(60) curr_ts = time.time() if curr_ts > period_end: - # Give some time (batch_wait) to update resource status via message bus - # Otherwise, late update will be cleaned up - if (curr_ts - self.resource.current_timestamp) > self.update_batch_wait: + # Give some time (batch_wait) to update resource status via + # message bus. Otherwise, late update will be cleaned up. + time_diff = curr_ts - self.resource.current_timestamp + if time_diff > self.update_batch_wait: self._run() - period_end = curr_ts + self.config.compute_trigger_freq + period_end = (curr_ts + + self.config.compute_trigger_freq) # NOTE(GJ): do not timer based batch self.logger.info("exit compute_manager " + self.thread_name) @@ -119,7 +121,8 @@ class ComputeManager(threading.Thread): for lk in _logical_groups.keys(): if lk not in self.resource.logical_groups.keys(): - self.resource.logical_groups[lk] = deepcopy(_logical_groups[lk]) + self.resource.logical_groups[lk] = deepcopy( + _logical_groups[lk]) self.resource.logical_groups[lk].last_update = time.time() self.logger.warn("ComputeManager: new logical group (" + @@ -339,7 +342,8 @@ class ComputeManager(threading.Thread): alen = len(_rhost.vm_list) if alen != blen: topology_updated = True - self.logger.warn("host (" + _rhost.name + ") " + str(blen - alen) + " none vms removed") + msg = "host ({0}) {1} none vms removed" + self.logger.warn(msg.format(_rhost.name, str(blen - alen))) self.resource.clean_none_vms_from_logical_groups(_rhost) @@ -352,17 +356,20 @@ class ComputeManager(threading.Thread): for rvm_id in _rhost.vm_list: if _host.exist_vm_by_uuid(rvm_id[2]) is False: - self.resource.remove_vm_by_uuid_from_logical_groups(_rhost, rvm_id[2]) + self.resource.remove_vm_by_uuid_from_logical_groups( + _rhost, rvm_id[2]) topology_updated = True self.logger.warn("ComputeManager: host (" + _rhost.name + ") updated (vm removed)") blen = len(_rhost.vm_list) - _rhost.vm_list = [v for v in _rhost.vm_list if _host.exist_vm_by_uuid(v[2]) is True] + _rhost.vm_list = [ + v for v in _rhost.vm_list if _host.exist_vm_by_uuid(v[2]) is True] alen = len(_rhost.vm_list) if alen != blen: topology_updated = True - self.logger.warn("host (" + _rhost.name + ") " + str(blen - alen) + " vms removed") + msg = "host ({0}) {1} vms removed" + self.logger.warn(msg.format(_rhost.name, str(blen - alen))) return topology_updated diff --git a/valet/engine/resource_manager/resource.py b/valet/engine/resource_manager/resource.py index 65eccc8..8339044 100755 --- a/valet/engine/resource_manager/resource.py +++ b/valet/engine/resource_manager/resource.py @@ -12,16 +12,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Resource - Handles data, metadata, status of resources.""" - import time import traceback from valet.engine.optimizer.app_manager.app_topology_base import LEVELS -from valet.engine.resource_manager.resource_base \ - import Datacenter, HostGroup, Host, LogicalGroup -from valet.engine.resource_manager.resource_base import Flavor, Switch, Link +from valet.engine.resource_manager.resource_base import Datacenter +from valet.engine.resource_manager.resource_base import Flavor +from valet.engine.resource_manager.resource_base import Host +from valet.engine.resource_manager.resource_base import HostGroup +from valet.engine.resource_manager.resource_base import LogicalGroup class Resource(object): @@ -141,7 +140,8 @@ class Resource(object): host_group.local_disk_cap = hg.get("local_disk") host_group.original_local_disk_cap = \ hg.get("original_local_disk") - host_group.avail_local_disk_cap = hg.get("avail_local_disk") + host_group.avail_local_disk_cap = hg.get( + "avail_local_disk") host_group.vm_list = hg.get("vm_list") for lgk in hg.get("membership_list"): @@ -166,7 +166,8 @@ class Resource(object): self.datacenter.local_disk_cap = dc.get("local_disk") self.datacenter.original_local_disk_cap = \ dc.get("original_local_disk") - self.datacenter.avail_local_disk_cap = dc.get("avail_local_disk") + self.datacenter.avail_local_disk_cap = dc.get( + "avail_local_disk") self.datacenter.vm_list = dc.get("vm_list") for lgk in dc.get("membership_list"): @@ -196,7 +197,8 @@ class Resource(object): if ck in self.hosts.keys(): host_group.child_resources[ck] = self.hosts[ck] elif ck in self.host_groups.keys(): - host_group.child_resources[ck] = self.host_groups[ck] + host_group.child_resources[ck] = ( + self.host_groups[ck]) hs = _resource_status.get("hosts") if hs: @@ -212,7 +214,8 @@ class Resource(object): self._update_compute_avail() except Exception: - self.logger.error("while bootstrap_from_db:" + traceback.format_exc()) + self.logger.error("while bootstrap_from_db: ", + traceback.format_exc()) return True @@ -231,8 +234,8 @@ class Resource(object): updated = False for level in LEVELS: for _, host_group in self.host_groups.iteritems(): - if host_group.host_type == level and \ - host_group.check_availability() is True: + if (host_group.host_type == level and + host_group.check_availability()): if host_group.last_update > self.current_timestamp: self._update_host_group_topology(host_group) updated = True @@ -353,7 +356,8 @@ class Resource(object): if datacenter_update is not None: json_logging['datacenter'] = datacenter_update - if self.db.update_resource_status(self.datacenter.name, json_logging) is False: + if not self.db.update_resource_status( + self.datacenter.name, json_logging): return None self.curr_db_timestamp = time.time() @@ -370,12 +374,14 @@ class Resource(object): for k in lg.metadata.keys(): self.logger.debug(" metadata key = " + k) self.logger.debug(" vms") + debug_msg = " orch_id = %s uuid = %s" for v in lg.vm_list: - self.logger.debug(" orch_id = " + v[0] + " uuid = " + v[2]) + self.logger.debug(debug_msg % (v[0], v[2])) self.logger.debug(" hosts") for h, v in lg.vms_per_host.iteritems(): - self.logger.debug(" host = " + h) - self.logger.debug(" vms = " + str(len(lg.vms_per_host[h]))) + self.logger.debug(" host = %s" % h) + self.logger.debug(" vms = %s" % + str(len(lg.vms_per_host[h]))) host = None if h in self.hosts.keys(): host = self.hosts[h] @@ -399,17 +405,21 @@ class Resource(object): cpu_tot = str(h.vCPUs) cpu_avail = str(h.avail_vCPUs) cpu_used = str(h.vCPUs_used) - self.logger.debug(" cpu = " + cpu_org + ", " + cpu_tot + ", " + cpu_avail + ", " + cpu_used) + msg = " {0} = {1}, {2}, {3}, {4}" + self.logger.debug( + msg.format('cpu', cpu_org, cpu_tot, cpu_avail, cpu_used)) mem_org = str(h.original_mem_cap) mem_tot = str(h.mem_cap) mem_avail = str(h.avail_mem_cap) mem_used = str(h.free_mem_mb) - self.logger.debug(" mem = " + mem_org + ", " + mem_tot + ", " + mem_avail + ", " + mem_used) + self.logger.debug( + msg.format('mem', mem_org, mem_tot, mem_avail, mem_used)) dsk_org = str(h.original_local_disk_cap) dsk_tot = str(h.local_disk_cap) dsk_avail = str(h.avail_local_disk_cap) dsk_used = str(h.free_disk_gb) - self.logger.debug(" disk = " + dsk_org + ", " + dsk_tot + ", " + dsk_avail + ", " + dsk_used) + self.logger.debug( + msg.format('disk', dsk_org, dsk_tot, dsk_avail, dsk_used)) self.logger.debug(" memberships") for mk in h.memberships.keys(): self.logger.debug(" " + mk) @@ -498,8 +508,9 @@ class Resource(object): if host.status != _st: host.status = _st - self.logger.warn("Resource.update_host_resources: host(" + _hn + - ") status changed") + self.logger.warn( + "Resource.update_host_resources: host(%s) status changed" % + _hn) updated = True # FIXME(GJ): should check cpu, memm and disk here? @@ -549,8 +560,7 @@ class Resource(object): if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True: lg.last_update = time.time() elif isinstance(_host, HostGroup): - if lg.group_type == "EX" or lg.group_type == "AFF" or \ - lg.group_type == "DIV": + if self._check_group_type(lg.group_type): if lgk.split(":")[0] == _host.host_type: if lg.add_vm_by_h_uuid(_vm_id, _host.name) is True: lg.last_update = time.time() @@ -558,7 +568,8 @@ class Resource(object): if isinstance(_host, Host) and _host.host_group is not None: self.add_vm_to_logical_groups(_host.host_group, _vm_id, _logical_groups_of_vm) - elif isinstance(_host, HostGroup) and _host.parent_resource is not None: + elif (isinstance(_host, HostGroup) and + _host.parent_resource is not None): self.add_vm_to_logical_groups(_host.parent_resource, _vm_id, _logical_groups_of_vm) @@ -566,12 +577,14 @@ class Resource(object): """Remove vm by orchestration id from lgs. Update host and lgs.""" for lgk in _host.memberships.keys(): if lgk not in self.logical_groups.keys(): - self.logger.warn("logical group (" + lgk + ") missing while removing " + _h_uuid) + self.logger.warn("logical group (%s) missing while " + "removing %s" % (lgk, _h_uuid)) continue lg = self.logical_groups[lgk] if isinstance(_host, Host): - # Remove host from lg's membership if the host has no vms of lg + # Remove host from lg's membership if the host + # has no vms of lg if lg.remove_vm_by_h_uuid(_h_uuid, _host.name) is True: lg.last_update = time.time() @@ -580,47 +593,48 @@ class Resource(object): _host.last_update = time.time() elif isinstance(_host, HostGroup): - if lg.group_type == "EX" or lg.group_type == "AFF" or \ - lg.group_type == "DIV": + if self._check_group_type(lg.group_type): if lgk.split(":")[0] == _host.host_type: - if lg.remove_vm_by_h_uuid(_h_uuid, _host.name) is True: + if lg.remove_vm_by_h_uuid(_h_uuid, _host.name): lg.last_update = time.time() - if _host.remove_membership(lg) is True: + if _host.remove_membership(lg): _host.last_update = time.time() - if lg.group_type == "EX" or lg.group_type == "AFF" or \ - lg.group_type == "DIV": + if self._check_group_type(lg.group_type): if len(lg.vm_list) == 0: del self.logical_groups[lgk] if isinstance(_host, Host) and _host.host_group is not None: self.remove_vm_by_h_uuid_from_logical_groups(_host.host_group, _h_uuid) - elif isinstance(_host, HostGroup) and _host.parent_resource is not None: - self.remove_vm_by_h_uuid_from_logical_groups(_host.parent_resource, - _h_uuid) + elif (isinstance(_host, HostGroup) and + _host.parent_resource is not None): + self.remove_vm_by_h_uuid_from_logical_groups( + _host.parent_resource, _h_uuid) def remove_vm_by_uuid_from_logical_groups(self, _host, _uuid): """Remove vm by uuid from lgs and update proper host and lgs.""" for lgk in _host.memberships.keys(): if lgk not in self.logical_groups.keys(): - self.logger.warn("logical group (" + lgk + ") missing while removing " + _uuid) + self.logger.warn("logical group (%s) missing while " + "removing %s" % (lgk, _uuid)) continue lg = self.logical_groups[lgk] if isinstance(_host, Host): - # Remove host from lg's membership if the host has no vms of lg + # Remove host from lg's membership if the host has + # no vms of lg if lg.remove_vm_by_uuid(_uuid, _host.name) is True: lg.last_update = time.time() - # Remove lg from host's membership if lg does not have the host + # Remove lg from host's membership if lg does not + # have the host if _host.remove_membership(lg) is True: _host.last_update = time.time() elif isinstance(_host, HostGroup): - if lg.group_type == "EX" or lg.group_type == "AFF" or \ - lg.group_type == "DIV": + if self._check_group_type(lg.group_type): if lgk.split(":")[0] == _host.host_type: if lg.remove_vm_by_uuid(_uuid, _host.name) is True: lg.last_update = time.time() @@ -628,14 +642,14 @@ class Resource(object): if _host.remove_membership(lg) is True: _host.last_update = time.time() - if lg.group_type == "EX" or lg.group_type == "AFF" or \ - lg.group_type == "DIV": + if self._check_group_type(lg.group_type): if len(lg.vm_list) == 0: del self.logical_groups[lgk] if isinstance(_host, Host) and _host.host_group is not None: self.remove_vm_by_uuid_from_logical_groups(_host.host_group, _uuid) - elif isinstance(_host, HostGroup) and _host.parent_resource is not None: + elif (isinstance(_host, HostGroup) and + _host.parent_resource is not None): self.remove_vm_by_uuid_from_logical_groups(_host.parent_resource, _uuid) @@ -654,8 +668,7 @@ class Resource(object): _host.last_update = time.time() elif isinstance(_host, HostGroup): - if lg.group_type == "EX" or lg.group_type == "AFF" or \ - lg.group_type == "DIV": + if self._check_group_type(lg.group_type): if lgk.split(":")[0] == _host.host_type: if lg.clean_none_vms(_host.name) is True: lg.last_update = time.time() @@ -663,14 +676,14 @@ class Resource(object): if _host.remove_membership(lg) is True: _host.last_update = time.time() - if lg.group_type == "EX" or lg.group_type == "AFF" or \ - lg.group_type == "DIV": + if self._check_group_type(lg.group_type): if len(lg.vm_list) == 0: del self.logical_groups[lgk] if isinstance(_host, Host) and _host.host_group is not None: self.clean_none_vms_from_logical_groups(_host.host_group) - elif isinstance(_host, HostGroup) and _host.parent_resource is not None: + elif (isinstance(_host, HostGroup) and + _host.parent_resource is not None): self.clean_none_vms_from_logical_groups(_host.parent_resource) def update_uuid_in_logical_groups(self, _h_uuid, _uuid, _host): @@ -682,15 +695,16 @@ class Resource(object): if lg.update_uuid(_h_uuid, _uuid, _host.name) is True: lg.last_update = time.time() elif isinstance(_host, HostGroup): - if lg.group_type == "EX" or lg.group_type == "AFF" or \ - lg.group_type == "DIV": + if self._check_group_type(lg.group_type): if lgk.split(":")[0] == _host.host_type: if lg.update_uuid(_h_uuid, _uuid, _host.name) is True: lg.last_update = time.time() if isinstance(_host, Host) and _host.host_group is not None: - self.update_uuid_in_logical_groups(_h_uuid, _uuid, _host.host_group) - elif isinstance(_host, HostGroup) and _host.parent_resource is not None: + self.update_uuid_in_logical_groups( + _h_uuid, _uuid, _host.host_group) + elif (isinstance(_host, HostGroup) and + _host.parent_resource is not None): self.update_uuid_in_logical_groups(_h_uuid, _uuid, _host.parent_resource) @@ -700,19 +714,19 @@ class Resource(object): lg = self.logical_groups[lgk] if isinstance(_host, Host): - if lg.update_h_uuid(_h_uuid, _uuid, _host.name) is True: + if lg.update_h_uuid(_h_uuid, _uuid, _host.name): lg.last_update = time.time() elif isinstance(_host, HostGroup): - if lg.group_type == "EX" or lg.group_type == "AFF" or \ - lg.group_type == "DIV": + if self._check_group_type(lg.group_type): if lgk.split(":")[0] == _host.host_type: - if lg.update_h_uuid(_h_uuid, _uuid, _host.name) is True: + if lg.update_h_uuid(_h_uuid, _uuid, _host.name): lg.last_update = time.time() if isinstance(_host, Host) and _host.host_group is not None: self.update_h_uuid_in_logical_groups(_h_uuid, _uuid, _host.host_group) - elif isinstance(_host, HostGroup) and _host.parent_resource is not None: + elif (isinstance(_host, HostGroup) and + _host.parent_resource is not None): self.update_h_uuid_in_logical_groups(_h_uuid, _uuid, _host.parent_resource) @@ -750,7 +764,8 @@ class Resource(object): ram_allocation_ratio = self.config.default_ram_allocation_ratio if self.config.static_mem_standby_ratio > 0: - static_ram_standby_ratio = float(self.config.static_mem_standby_ratio) / float(100) + static_ram_standby_ratio = ( + float(self.config.static_mem_standby_ratio) / float(100)) host.compute_avail_mem(ram_allocation_ratio, static_ram_standby_ratio) @@ -762,9 +777,11 @@ class Resource(object): cpu_allocation_ratio = self.config.default_cpu_allocation_ratio if self.config.static_cpu_standby_ratio > 0: - static_cpu_standby_ratio = float(self.config.static_cpu_standby_ratio) / float(100) + static_cpu_standby_ratio = ( + float(self.config.static_cpu_standby_ratio) / float(100)) - host.compute_avail_vCPUs(cpu_allocation_ratio, static_cpu_standby_ratio) + host.compute_avail_vCPUs( + cpu_allocation_ratio, static_cpu_standby_ratio) disk_allocation_ratio = 1.0 if len(disk_allocation_ratio_list) > 0: @@ -775,9 +792,12 @@ class Resource(object): self.config.default_disk_allocation_ratio if self.config.static_local_disk_standby_ratio > 0: - static_disk_standby_ratio = float(self.config.static_local_disk_standby_ratio) / float(100) + static_disk_standby_ratio = ( + float(self.config.static_local_disk_standby_ratio) / float(100) + ) - host.compute_avail_disk(disk_allocation_ratio, static_disk_standby_ratio) + host.compute_avail_disk( + disk_allocation_ratio, static_disk_standby_ratio) def get_flavor(self, _id): """Return flavor according to name passed in.""" @@ -796,3 +816,6 @@ class Resource(object): flavor = None return flavor + + def _check_group_type(self, type): + return type in ['EX', 'AFF', 'DIV'] diff --git a/valet/engine/resource_manager/resource_base.py b/valet/engine/resource_manager/resource_base.py index 61a3235..887f133 100755 --- a/valet/engine/resource_manager/resource_base.py +++ b/valet/engine/resource_manager/resource_base.py @@ -152,11 +152,11 @@ class HostGroup(object): """Init Host Group memberships.""" for lgk in self.memberships.keys(): lg = self.memberships[lgk] - if lg.group_type == "EX" or lg.group_type == "AFF" or \ - lg.group_type == "DIV": + if (lg.group_type == "EX" or lg.group_type == "AFF" or + lg.group_type == "DIV"): level = lg.name.split(":")[0] - if LEVELS.index(level) < LEVELS.index(self.host_type) or \ - self.name not in lg.vms_per_host.keys(): + if (LEVELS.index(level) < LEVELS.index(self.host_type) or + self.name not in lg.vms_per_host.keys()): del self.memberships[lgk] else: del self.memberships[lgk] @@ -165,8 +165,8 @@ class HostGroup(object): """Return True if membership to group _lg removed.""" cleaned = False - if _lg.group_type == "EX" or _lg.group_type == "AFF" or \ - _lg.group_type == "DIV": + if (_lg.group_type == "EX" or _lg.group_type == "AFF" or + _lg.group_type == "DIV"): if self.name not in _lg.vms_per_host.keys(): del self.memberships[_lg.name] cleaned = True @@ -267,8 +267,8 @@ class Host(object): """Return True if host removed from logical group _lg passed in.""" cleaned = False - if _lg.group_type == "EX" or _lg.group_type == "AFF" or \ - _lg.group_type == "DIV": + if (_lg.group_type == "EX" or _lg.group_type == "AFF" or + _lg.group_type == "DIV"): if self.name not in _lg.vms_per_host.keys(): del self.memberships[_lg.name] cleaned = True @@ -277,8 +277,8 @@ class Host(object): def check_availability(self): """Return True if host is up, enabled and tagged as nova infra.""" - if self.status == "enabled" and self.state == "up" and \ - ("nova" in self.tag) and ("infra" in self.tag): + if (self.status == "enabled" and self.state == "up" and + ("nova" in self.tag) and ("infra" in self.tag)): return True else: return False @@ -546,8 +546,7 @@ class LogicalGroup(object): if self.exist_vm_by_h_uuid(_vm_id[0]) is False: self.vm_list.append(_vm_id) - if self.group_type == "EX" or self.group_type == "AFF" or \ - self.group_type == "DIV": + if self._check_group_type(self.group_type): if _host_id not in self.vms_per_host.keys(): self.vms_per_host[_host_id] = [] self.vms_per_host[_host_id].append(_vm_id) @@ -573,10 +572,9 @@ class LogicalGroup(object): success = True break - if self.group_type == "EX" or self.group_type == "AFF" or \ - self.group_type == "DIV": - if (_host_id in self.vms_per_host.keys()) and \ - len(self.vms_per_host[_host_id]) == 0: + if self._check_group_type(self.group_type): + if ((_host_id in self.vms_per_host.keys()) and + len(self.vms_per_host[_host_id]) == 0): del self.vms_per_host[_host_id] return success @@ -598,10 +596,9 @@ class LogicalGroup(object): success = True break - if self.group_type == "EX" or self.group_type == "AFF" or \ - self.group_type == "DIV": - if (_host_id in self.vms_per_host.keys()) and \ - len(self.vms_per_host[_host_id]) == 0: + if self._check_group_type(self.group_type): + if ((_host_id in self.vms_per_host.keys()) and + len(self.vms_per_host[_host_id]) == 0): del self.vms_per_host[_host_id] return success @@ -618,15 +615,15 @@ class LogicalGroup(object): if _host_id in self.vms_per_host.keys(): blen = len(self.vms_per_host[_host_id]) - self.vms_per_host[_host_id] = [v for v in self.vms_per_host[_host_id] if v[2] != "none"] + self.vms_per_host[_host_id] = [ + v for v in self.vms_per_host[_host_id] if v[2] != "none"] alen = len(self.vm_list) if alen != blen: success = True - if self.group_type == "EX" or self.group_type == "AFF" or \ - self.group_type == "DIV": - if (_host_id in self.vms_per_host.keys()) and \ - len(self.vms_per_host[_host_id]) == 0: + if self._check_group_type(self.group_type): + if ((_host_id in self.vms_per_host.keys()) and + len(self.vms_per_host[_host_id]) == 0): del self.vms_per_host[_host_id] return success @@ -640,6 +637,9 @@ class LogicalGroup(object): 'vms_per_host': self.vms_per_host, 'last_update': self.last_update} + def _check_group_type(self, type): + return type in ['EX', 'AFF', 'DIV'] + class Flavor(object): """Flavor class.""" diff --git a/valet/engine/resource_manager/topology.py b/valet/engine/resource_manager/topology.py index fb03d5b..0c563ce 100755 --- a/valet/engine/resource_manager/topology.py +++ b/valet/engine/resource_manager/topology.py @@ -16,16 +16,16 @@ """Topology class - performs actual setting up of Topology object.""" import copy - from sre_parse import isdigit + from valet.engine.resource_manager.resource_base import HostGroup class Topology(object): - """ - Topology class. + """Topology class. + currently, using cannonical naming convention to find the topology - """ + """ def __init__(self, _config, _logger): """Init config and logger.""" @@ -117,12 +117,13 @@ class Topology(object): num_of_fields += 1 if index == (end_of_region_index + 1): - status = "invalid rack name = " + _host_name[:index] + c + msg = "invalid rack name = {0}{1}" + status = msg.format(_host_name[:index], c) validated_name = False break - if end_of_rack_index == 0 and \ - index > (end_of_region_index + 1): + if (end_of_rack_index == 0 and + index > (end_of_region_index + 1)): end_of_rack_index = index num_of_fields += 1 diff --git a/valet/engine/resource_manager/topology_manager.py b/valet/engine/resource_manager/topology_manager.py index 267fa92..c784497 100755 --- a/valet/engine/resource_manager/topology_manager.py +++ b/valet/engine/resource_manager/topology_manager.py @@ -12,26 +12,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Topology Manager. - -Actions involved in setting up and managing topology. This includes setting -topology, checking updates, creating new switches( also hosts and links), as -well as updating them. -""" - import threading import time -from valet.engine.resource_manager.resource_base \ - import Datacenter, HostGroup, Host +from valet.engine.resource_manager.resource_base import Datacenter +from valet.engine.resource_manager.resource_base import Host +from valet.engine.resource_manager.resource_base import HostGroup from valet.engine.resource_manager.topology import Topology class TopologyManager(threading.Thread): """Topology Manager Class.""" - def __init__(self, _t_id, _t_name, _resource, _data_lock, _config, _logger): + def __init__(self, _t_id, _t_name, _resource, + _data_lock, _config, _logger): """Init Topology Manager.""" threading.Thread.__init__(self) @@ -59,11 +53,13 @@ class TopologyManager(threading.Thread): time.sleep(70) curr_ts = time.time() if curr_ts > period_end: - # Give some time (batch_wait) to update resource status via message bus - # Otherwise, late update will be cleaned up - if (curr_ts - self.resource.current_timestamp) > self.update_batch_wait: + # Give some time (batch_wait) to update resource status via + # message bus. Otherwise, late update will be cleaned up + time_diff = curr_ts - self.resource.current_timestamp + if time_diff > self.update_batch_wait: self._run() - period_end = curr_ts + self.config.topology_trigger_freq + period_end = (curr_ts + + self.config.topology_trigger_freq) # NOTE(GJ): do not timer based batch self.logger.info("exit topology_manager " + self.thread_name) @@ -198,8 +194,8 @@ class TopologyManager(threading.Thread): self.logger.warn("TopologyManager: host (" + _rhost.name + ") updated (tag)") - if _rhost.host_group is None or \ - _host.host_group.name != _rhost.host_group.name: + if (_rhost.host_group is None or + _host.host_group.name != _rhost.host_group.name): if _host.host_group.name in self.resource.host_groups.keys(): _rhost.host_group = \ diff --git a/valet/tests/api/controllers/test_plans.py b/valet/tests/api/controllers/test_plans.py index 783dc5a..2622a14 100644 --- a/valet/tests/api/controllers/test_plans.py +++ b/valet/tests/api/controllers/test_plans.py @@ -12,12 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Test Plans.""" - from uuid import uuid4 -from valet.api.db.models import Plan, Placement +from valet.api.db.models import Placement +from valet.api.db.models import Plan from valet.tests.api.controllers import is_valid_uuid4 # TODO(JD): Add Keystone mock object. diff --git a/valet/tests/functional/valet_validator/common/__init__.py b/valet/tests/functional/valet_validator/common/__init__.py index 73523c8..cff72e5 100644 --- a/valet/tests/functional/valet_validator/common/__init__.py +++ b/valet/tests/functional/valet_validator/common/__init__.py @@ -12,12 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Init.""" +import time from oslo_log import log as logging -import time -from valet.tests.functional.valet_validator.common.init import CONF, COLORS + +from valet.tests.functional.valet_validator.common.init import COLORS +from valet.tests.functional.valet_validator.common.init import CONF LOG = logging.getLogger(__name__) diff --git a/valet/tests/functional/valet_validator/compute/analyzer.py b/valet/tests/functional/valet_validator/compute/analyzer.py index a298ba7..f4cd33c 100644 --- a/valet/tests/functional/valet_validator/compute/analyzer.py +++ b/valet/tests/functional/valet_validator/compute/analyzer.py @@ -12,14 +12,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Analyzer.""" +import traceback from novaclient import client -import traceback -from valet.tests.functional.valet_validator.common import Result, GeneralLogger + + from valet.tests.functional.valet_validator.common.auth import Auth +from valet.tests.functional.valet_validator.common import GeneralLogger from valet.tests.functional.valet_validator.common.init import CONF +from valet.tests.functional.valet_validator.common import Result class Analyzer(object): @@ -208,4 +209,5 @@ class Analyzer(object): def get_vms_by_hypervisor(self, host): """Return vms based on hypervisor(host).""" return [vm for vm in self.nova.servers.list( - search_opts={"all_tenants": True}) if self.get_hostname(vm) == host] + search_opts={"all_tenants": True}) + if self.get_hostname(vm) == host] diff --git a/valet/tests/functional/valet_validator/orchestration/loader.py b/valet/tests/functional/valet_validator/orchestration/loader.py index 0c9892a..7b6feec 100644 --- a/valet/tests/functional/valet_validator/orchestration/loader.py +++ b/valet/tests/functional/valet_validator/orchestration/loader.py @@ -13,16 +13,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Loader.""" - -from heatclient.client import Client import sys import time import traceback -from valet.tests.functional.valet_validator.common import Result, GeneralLogger + +from heatclient.client import Client + from valet.tests.functional.valet_validator.common.auth import Auth +from valet.tests.functional.valet_validator.common import GeneralLogger from valet.tests.functional.valet_validator.common.init import CONF -from valet.tests.functional.valet_validator.group_api.valet_group import ValetGroup +from valet.tests.functional.valet_validator.common import Result +from valet.tests.functional.valet_validator.group_api.valet_group \ + import ValetGroup class Loader(object): diff --git a/valet/tests/functional/valet_validator/tests/functional_base.py b/valet/tests/functional/valet_validator/tests/functional_base.py index 981971f..390daa8 100644 --- a/valet/tests/functional/valet_validator/tests/functional_base.py +++ b/valet/tests/functional/valet_validator/tests/functional_base.py @@ -16,11 +16,15 @@ """Functional Base.""" import os -from oslo_log import log as logging import time + +from oslo_log import log as logging + from valet.tests.base import Base -from valet.tests.functional.valet_validator.common.init import COLORS, CONF -from valet.tests.functional.valet_validator.common.resources import TemplateResources +from valet.tests.functional.valet_validator.common.init import COLORS +from valet.tests.functional.valet_validator.common.init import CONF +from valet.tests.functional.valet_validator.common.resources \ + import TemplateResources from valet.tests.functional.valet_validator.compute.analyzer import Analyzer from valet.tests.functional.valet_validator.orchestration.loader import Loader diff --git a/valet/tests/functional/valet_validator/tests/test_affinity.py b/valet/tests/functional/valet_validator/tests/test_affinity.py index 9e930be..183269e 100644 --- a/valet/tests/functional/valet_validator/tests/test_affinity.py +++ b/valet/tests/functional/valet_validator/tests/test_affinity.py @@ -12,20 +12,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Test Affinity.""" - from oslo_config import cfg from oslo_log import log as logging + from valet.tests.functional.valet_validator.common.init import CONF -from valet.tests.functional.valet_validator.tests.functional_base import FunctionalTestCase +from valet.tests.functional.valet_validator.tests.functional_base \ + import FunctionalTestCase -opt_test_aff = \ - [ - cfg.StrOpt('STACK_NAME', default="basic_affinity_stack"), - cfg.StrOpt('TEMPLATE_NAME', default="affinity_basic_2_instances"), - ] +opt_test_aff = [ + cfg.StrOpt('STACK_NAME', default="basic_affinity_stack"), + cfg.StrOpt('TEMPLATE_NAME', default="affinity_basic_2_instances"), +] CONF.register_opts(opt_test_aff, group="test_affinity") LOG = logging.getLogger(__name__) diff --git a/valet/tests/functional/valet_validator/tests/test_affinity_3_Instances.py b/valet/tests/functional/valet_validator/tests/test_affinity_3_Instances.py index ce51187..975fc35 100644 --- a/valet/tests/functional/valet_validator/tests/test_affinity_3_Instances.py +++ b/valet/tests/functional/valet_validator/tests/test_affinity_3_Instances.py @@ -18,13 +18,14 @@ from oslo_config import cfg from oslo_log import log as logging from valet.tests.functional.valet_validator.common.init import CONF -from valet.tests.functional.valet_validator.tests.functional_base import FunctionalTestCase +from valet.tests.functional.valet_validator.tests.functional_base \ + import FunctionalTestCase opt_test_aff = [ cfg.StrOpt('STACK_NAME', default="affinity_3_stack"), cfg.StrOpt('TEMPLATE_NAME', default="affinity_ 3_Instances"), - ] +] CONF.register_opts(opt_test_aff, group="test_affinity_3") LOG = logging.getLogger(__name__) diff --git a/valet/tests/functional/valet_validator/tests/test_diversity.py b/valet/tests/functional/valet_validator/tests/test_diversity.py index db21b42..9817b27 100644 --- a/valet/tests/functional/valet_validator/tests/test_diversity.py +++ b/valet/tests/functional/valet_validator/tests/test_diversity.py @@ -18,14 +18,14 @@ from oslo_config import cfg from oslo_log import log as logging from valet.tests.functional.valet_validator.common.init import CONF -from valet.tests.functional.valet_validator.tests.functional_base import FunctionalTestCase +from valet.tests.functional.valet_validator.tests.functional_base \ + import FunctionalTestCase -opt_test_div = \ - [ - cfg.StrOpt('STACK_NAME', default="basic_diversity_stack"), - cfg.StrOpt('TEMPLATE_NAME', default="diversity_basic_2_instances"), - ] +opt_test_div = [ + cfg.StrOpt('STACK_NAME', default="basic_diversity_stack"), + cfg.StrOpt('TEMPLATE_NAME', default="diversity_basic_2_instances"), +] CONF.register_opts(opt_test_div, group="test_diversity") LOG = logging.getLogger(__name__) diff --git a/valet/tests/functional/valet_validator/tests/test_exclusivity.py b/valet/tests/functional/valet_validator/tests/test_exclusivity.py index fba109a..c920680 100644 --- a/valet/tests/functional/valet_validator/tests/test_exclusivity.py +++ b/valet/tests/functional/valet_validator/tests/test_exclusivity.py @@ -18,14 +18,14 @@ from oslo_config import cfg from oslo_log import log as logging from valet.tests.functional.valet_validator.common.init import CONF -from valet.tests.functional.valet_validator.tests.functional_base import FunctionalTestCase +from valet.tests.functional.valet_validator.tests.functional_base \ + import FunctionalTestCase -opt_test_ex = \ - [ - cfg.StrOpt('STACK_NAME', default="basic_exclusivity_stack"), - cfg.StrOpt('TEMPLATE_NAME', default="exclusivity_basic_2_instances"), - ] +opt_test_ex = [ + cfg.StrOpt('STACK_NAME', default="basic_exclusivity_stack"), + cfg.StrOpt('TEMPLATE_NAME', default="exclusivity_basic_2_instances"), +] CONF.register_opts(opt_test_ex, group="test_exclusivity") LOG = logging.getLogger(__name__) diff --git a/valet/tests/functional/valet_validator/tests/test_groups.py b/valet/tests/functional/valet_validator/tests/test_groups.py index bb3a8f9..54e8bba 100644 --- a/valet/tests/functional/valet_validator/tests/test_groups.py +++ b/valet/tests/functional/valet_validator/tests/test_groups.py @@ -17,8 +17,10 @@ from valet.tests.functional.valet_validator.common.auth import Auth from valet.tests.functional.valet_validator.common import GeneralLogger -from valet.tests.functional.valet_validator.group_api.valet_group import ValetGroup -from valet.tests.functional.valet_validator.tests.functional_base import FunctionalTestCase +from valet.tests.functional.valet_validator.group_api.valet_group \ + import ValetGroup +from valet.tests.functional.valet_validator.tests.functional_base \ + import FunctionalTestCase class TestGroups(FunctionalTestCase): @@ -65,8 +67,8 @@ class TestGroups(FunctionalTestCase): GeneralLogger.log_group(str(self.groups.get_list_groups())) GeneralLogger.log_group("Create test member (NOT tenant ID)") - member_respone = self.groups.update_group_members(grp_id, - members="test_member") + member_respone = self.groups.update_group_members( + grp_id, members="test_member") self.assertEqual(409, member_respone.status_code, "update_group_members failed with code %s" % member_respone.status_code) diff --git a/valet/tests/functional/valet_validator/tests/test_nested.py b/valet/tests/functional/valet_validator/tests/test_nested.py index 6b64ba1..3fda044 100644 --- a/valet/tests/functional/valet_validator/tests/test_nested.py +++ b/valet/tests/functional/valet_validator/tests/test_nested.py @@ -18,14 +18,14 @@ from oslo_config import cfg from oslo_log import log as logging from valet.tests.functional.valet_validator.common.init import CONF -from valet.tests.functional.valet_validator.tests.functional_base import FunctionalTestCase +from valet.tests.functional.valet_validator.tests.functional_base \ + import FunctionalTestCase -opt_test_aff = \ - [ - cfg.StrOpt('STACK_NAME', default="nest_stack"), - cfg.StrOpt('TEMPLATE_NAME', default="diversity_between_2_affinity"), - ] +opt_test_aff = [ + cfg.StrOpt('STACK_NAME', default="nest_stack"), + cfg.StrOpt('TEMPLATE_NAME', default="diversity_between_2_affinity"), +] CONF.register_opts(opt_test_aff, group="test_nested") LOG = logging.getLogger(__name__) diff --git a/valet/tests/tempest/scenario/analyzer.py b/valet/tests/tempest/scenario/analyzer.py index aaee675..9497b20 100644 --- a/valet/tests/tempest/scenario/analyzer.py +++ b/valet/tests/tempest/scenario/analyzer.py @@ -102,7 +102,8 @@ class Analyzer(object): try: for i in range(len(servers_list["servers"])): - server = self.nova_client.show_server(servers_list["servers"][i]["id"]) + server = self.nova_client.show_server( + servers_list["servers"][i]["id"]) host_name = server["server"]["OS-EXT-SRV-ATTR:host"] instance_name = servers_list["servers"][i]["name"] @@ -110,7 +111,8 @@ class Analyzer(object): self.instances_on_host[host_name].append(instance_name) except Exception: - self.log.log_error("Exception trying to show_server: %s" % traceback.format_exc()) + self.log.log_error( + "Exception trying to show_server: %s" % traceback.format_exc()) if self.tries > 0: time.sleep(CONF.valet.PAUSE) self.tries -= 1 @@ -127,7 +129,8 @@ class Analyzer(object): """Return host of instance with matching name.""" hosts = [] - self.log.log_debug("host - instance dictionary is: %s" % self.host_instance_dict) + self.log.log_debug( + "host - instance dictionary is: %s" % self.host_instance_dict) for res in res_name: name = self.get_instance_name(res) @@ -150,7 +153,8 @@ class Analyzer(object): except Exception as ex: self.log.log_error("Exception while verifying instances are on " - "the same host/racks: %s" % ex, traceback.format_exc()) + "different hosts/racks: " + "%s" % ex, traceback.format_exc()) return False return True @@ -169,18 +173,22 @@ class Analyzer(object): except Exception as ex: self.log.log_error("Exception while verifying instances are on " - "different hosts/racks: %s" % ex, traceback.format_exc()) + "different hosts/racks: " + "%s" % ex, traceback.format_exc()) return False return True def are_we_alone(self, ins_for_group, level): """Return True if no other instances in group on server.""" - self.log.log_info("verifying instances are on the same group hosts/racks") + self.log.log_info("verifying instances are on the " + "same group hosts/racks") exclusivity_group_hosts = self.get_exclusivity_group_hosts() - self.log.log_debug("exclusivity group hosts are: %s " % exclusivity_group_hosts) - self.log.log_debug("instances on host are: %s " % self.instances_on_host) + self.log.log_debug( + "exclusivity group hosts are: %s " % exclusivity_group_hosts) + self.log.log_debug( + "instances on host are: %s " % self.instances_on_host) # instances - all the instances on the exclusivity group hosts for host in exclusivity_group_hosts: @@ -189,7 +197,8 @@ class Analyzer(object): self.log.log_debug("exclusivity group instances are: %s " % instances) if level == "rack": - instances = self.get_rack_instances(set(self.host_instance_dict.values())) + instances = self.get_rack_instances( + set(self.host_instance_dict.values())) # host_instance_dict should be all the instances on the rack if len(instances) < 1: @@ -215,13 +224,14 @@ class Analyzer(object): return ins_group def get_exclusivity_group_hosts(self): - ''' Get all the hosts that the exclusivity group instances are located on ''' + '''Get all hosts that exclusivity group instances are located on ''' servers_list = self.nova_client.list_servers() exclusivity_hosts = [] for serv in servers_list["servers"]: if "exclusivity" in serv["name"]: server = self.nova_client.show_server(serv["id"]) - exclusivity_hosts.append(server["server"]["OS-EXT-SRV-ATTR:host"]) + exclusivity_hosts.append( + server["server"]["OS-EXT-SRV-ATTR:host"]) return set(exclusivity_hosts) def get_group_instances(self, resources, group_ins): @@ -238,7 +248,9 @@ class Analyzer(object): return ins_for_group except Exception as ex: - self.log.log_error("Exception at method get_group_instances: %s" % ex, traceback.format_exc()) + self.log.log_error( + "Exception at method get_group_instances: %s" % ex, + traceback.format_exc()) return None def get_rack_instances(self, hosts): diff --git a/valet/tests/tempest/scenario/general_logger.py b/valet/tests/tempest/scenario/general_logger.py index 89dc2d1..fb7c4c9 100644 --- a/valet/tests/tempest/scenario/general_logger.py +++ b/valet/tests/tempest/scenario/general_logger.py @@ -40,7 +40,8 @@ class GeneralLogger(object): def log_info(self, msg): """Info log.""" LOG.info("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, - COLORS["L_GREEN"], msg, COLORS["WHITE"])) + COLORS["L_GREEN"], msg, + COLORS["WHITE"])) def log_error(self, msg, trc_back=None): """Log error and trace_back for error if there is one.""" @@ -54,7 +55,8 @@ class GeneralLogger(object): def log_debug(self, msg): """Log debug.""" LOG.debug("%s %s - %s %s %s" % (COLORS["L_PURPLE"], self.test_name, - COLORS["L_BLUE"], msg, COLORS["WHITE"])) + COLORS["L_BLUE"], + msg, COLORS["WHITE"])) def log_group(self, msg): """Log info.""" diff --git a/valet/tests/tempest/scenario/scenario_base.py b/valet/tests/tempest/scenario/scenario_base.py index 782c9ef..6be2c77 100644 --- a/valet/tests/tempest/scenario/scenario_base.py +++ b/valet/tests/tempest/scenario/scenario_base.py @@ -101,7 +101,8 @@ class ScenarioTestCase(test.BaseTestCase): for key in groups: if groups[key].group_type == "exclusivity": self.log.log_info(" creating valet group ") - grp_name = data_utils.rand_name(name=groups[key].group_name) + grp_name = data_utils.rand_name( + name=groups[key].group_name) template_resources.template_data = \ template_resources.template_data.replace( groups[key].group_name, grp_name) @@ -119,7 +120,8 @@ class ScenarioTestCase(test.BaseTestCase): return res except Exception: - self.log.log_error("Failed to prepare stack for creation", traceback.format_exc()) + self.log.log_error("Failed to prepare stack for creation", + traceback.format_exc()) return False return True @@ -148,34 +150,43 @@ class ScenarioTestCase(test.BaseTestCase): if os.path.exists(env_url): with open(env_url, "r") as f: filedata = f.read() - filedata = filedata.replace('image_place_holder', CONF.compute.image_ref) - filedata = filedata.replace('flavor_place_holder', CONF.compute.flavor_ref) - filedata = filedata.replace('network_place_holder', CONF.compute.fixed_network_name) + filedata = filedata.replace( + 'image_place_holder', + CONF.compute.image_ref) + filedata = filedata.replace( + 'flavor_place_holder', + CONF.compute.flavor_ref) + filedata = filedata.replace( + 'network_place_holder', + CONF.compute.fixed_network_name) return filedata else: return None except Exception: - self.log.log_error("Failed to load environment file", traceback.format_exc()) + self.log.log_error("Failed to load environment file", + traceback.format_exc()) def _delete_group(self, group_id): try: self.valet_client.delete_all_members(group_id) self.valet_client.delete_group(group_id) except Exception: - self.log.log_error("Failed to delete group", traceback.format_exc()) + self.log.log_error("Failed to delete group", + traceback.format_exc()) raise def delete_stack(self): """Use heat client to delete stack.""" try: - self.heat_client.delete_stack(self.stack_identifier) - self.heat_client.wait_for_stack_status( + self.heat_client.delete_stack(self.stack_identifier) + self.heat_client.wait_for_stack_status( self.stack_identifier, "DELETE_COMPLETE", failure_pattern='^.*DELETE_FAILED$') except Exception: - self.log.log_error("Failed to delete stack", traceback.format_exc()) + self.log.log_error("Failed to delete stack", + traceback.format_exc()) raise def show_stack(self, stack_id): @@ -199,14 +210,16 @@ class ScenarioTestCase(test.BaseTestCase): except exceptions.StackBuildErrorException as ex: if "Ostro error" in str(ex) and self.tries > 0: - self.log.log_error("Ostro error - try number %d" % - (CONF.valet.TRIES_TO_CREATE - self.tries + 2)) + msg = "Ostro error - try number %d" + self.log.log_error( + msg % (CONF.valet.TRIES_TO_CREATE - self.tries + 2)) self.tries -= 1 self.delete_stack() time.sleep(CONF.valet.PAUSE) self.wait_for_stack(stack_name, env_data, template_resources) else: - self.log.log_error("Failed to create stack", traceback.format_exc()) + self.log.log_error("Failed to create stack", + traceback.format_exc()) return False return True diff --git a/valet/tests/unit/api/common/test_hooks.py b/valet/tests/unit/api/common/test_hooks.py index 6bb35bc..3834342 100644 --- a/valet/tests/unit/api/common/test_hooks.py +++ b/valet/tests/unit/api/common/test_hooks.py @@ -46,8 +46,9 @@ class TestHooks(ApiBase): mock_threading.Thread.assert_called_once_with( target=mock_conf.messaging.notifier.info, args=( {}, - 'api', {'response': {'body': State.response.body, - 'status_code': State.response.status_code}, + 'api', {'response': + {'body': State.response.body, + 'status_code': State.response.status_code}, 'context': State.request.context, 'request': {'path': 'test_path', 'method': 'test_method', @@ -71,8 +72,9 @@ class TestHooks(ApiBase): mock_threading.Thread.assert_called_once_with( target=mock_conf.messaging.notifier.error, args=( {}, - 'api', {'response': {'body': State.response.body, - 'status_code': State.response.status_code}, + 'api', {'response': + {'body': State.response.body, + 'status_code': State.response.status_code}, 'context': State.request.context, 'request': {'path': 'test_path', 'method': 'test_method', diff --git a/valet/tests/unit/api/db/test_ostro.py b/valet/tests/unit/api/db/test_ostro.py index 43b321a..21012eb 100644 --- a/valet/tests/unit/api/db/test_ostro.py +++ b/valet/tests/unit/api/db/test_ostro.py @@ -12,10 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Test Ostro(Engine).""" - -from valet.api.db.models.music.ostro import PlacementRequest, PlacementResult, Event +from valet.api.db.models.music.ostro import Event +from valet.api.db.models.music.ostro import PlacementRequest +from valet.api.db.models.music.ostro import PlacementResult from valet.tests.unit.api.v1.api_base import ApiBase @@ -46,11 +45,14 @@ class TestOstro(ApiBase): def test__repr__(self): """Test test id in placement request/result and event.""" - self.validate_test("test_stack_id" in self.placement_request.__repr__()) + self.validate_test( + "test_stack_id" in self.placement_request.__repr__()) - self.validate_test("test_stack_id" in self.placement_result.__repr__()) + self.validate_test( + "test_stack_id" in self.placement_result.__repr__()) - self.validate_test("test_event_id" in self.event.__repr__()) + self.validate_test( + "test_event_id" in self.event.__repr__()) def test__json__(self): """Test json return value for placement request, result and event.""" @@ -79,7 +81,8 @@ class TestOstro(ApiBase): def test_pk_value(self): """Test placement request, result and events' pk values.""" - self.validate_test(self.placement_request.pk_value() == "test_stack_id") + self.validate_test( + self.placement_request.pk_value() == "test_stack_id") self.validate_test(self.placement_result.pk_value() == "test_stack_id") diff --git a/valet/tests/unit/api/v1/test_groups.py b/valet/tests/unit/api/v1/test_groups.py index dfa0dce..3642692 100644 --- a/valet/tests/unit/api/v1/test_groups.py +++ b/valet/tests/unit/api/v1/test_groups.py @@ -12,15 +12,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Test Groups.""" - import mock import pecan + from valet.api.db.models.music.groups import Group -from valet.api.db.models.music import Query, Results +from valet.api.db.models.music import Query +from valet.api.db.models.music import Results import valet.api.v1.controllers.groups as groups -from valet.api.v1.controllers.groups import GroupsController, MembersController, GroupsItemController, MembersItemController +from valet.api.v1.controllers.groups import GroupsController +from valet.api.v1.controllers.groups import GroupsItemController +from valet.api.v1.controllers.groups import MembersController +from valet.api.v1.controllers.groups import MembersItemController from valet.tests.unit.api.v1.api_base import ApiBase @@ -92,7 +94,8 @@ class TestGroups(ApiBase): self.validate_test( self.groups_item_controller.allow() == "GET,PUT,DELETE") - self.validate_test(self.members_item_controller.allow() == "GET,DELETE") + self.validate_test( + self.members_item_controller.allow() == "GET,DELETE") @mock.patch.object(groups, 'error', ApiBase.mock_error) @mock.patch.object(groups, 'request') @@ -164,7 +167,8 @@ class TestGroups(ApiBase): @mock.patch.object(groups, 'tenant_servers_in_group') @mock.patch.object(groups, 'request') - def test_index_delete_member_item_controller(self, mock_request, mock_func): + def test_index_delete_member_item_controller(self, mock_request, + mock_func): """Members_item_controller index_delete, check status and members.""" grp = Group("test_name", "test_description", "test_type", None) grp.members = ["demo members"] @@ -214,7 +218,8 @@ class TestGroups(ApiBase): "test_description", "test_type", None)} - r = self.groups_item_controller.index_put(description="new description") + r = self.groups_item_controller.index_put( + description="new description") self.validate_test(groups.response.status == 201) self.validate_test(r.description == "new description") @@ -269,16 +274,18 @@ class TestGroups(ApiBase): self.members_item_controller.index_get() self.validate_test(groups.response.status == 204) - self.validate_test("test_name" in item_controller_response["group"].name) + self.validate_test( + "test_name" in item_controller_response["group"].name) self.validate_test(len(response) == 1) self.validate_test(len(response["groups"]) == len(all_groups)) self.validate_test(all_groups == response["groups"]) def test_index_post(self): """Test group_controller index_post, check status and name.""" - group = self.groups_controller.index_post(name="testgroup", - description="test description", - type="testtype") + group = self.groups_controller.index_post( + name="testgroup", + description="test description", + type="testtype") self.validate_test(groups.response.status == 201) self.validate_test(group.name == "testgroup") diff --git a/valet/tests/unit/api/v1/test_placements.py b/valet/tests/unit/api/v1/test_placements.py index 313e74a..0eea7dc 100644 --- a/valet/tests/unit/api/v1/test_placements.py +++ b/valet/tests/unit/api/v1/test_placements.py @@ -12,16 +12,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Test Placements.""" - import mock -import valet.api.v1.controllers.placements as placements -from valet.api.v1.controllers.placements import PlacementsController, PlacementsItemController -from valet.api.db.models.music import Query -from valet.api.db.models.music import Results + from valet.api.db.models.music.placements import Placement from valet.api.db.models.music.plans import Plan +from valet.api.db.models.music import Query +from valet.api.db.models.music import Results +import valet.api.v1.controllers.placements as placements +from valet.api.v1.controllers.placements import PlacementsController +from valet.api.v1.controllers.placements import PlacementsItemController from valet.tests.unit.api.v1.api_base import ApiBase @@ -70,7 +69,8 @@ class TestPlacements(ApiBase): """Test placements index method with POST and PUT (not allowed).""" mock_request.method = "POST" self.placements_controller.index() - self.validate_test("The POST method is not allowed" in ApiBase.response) + self.validate_test( + "The POST method is not allowed" in ApiBase.response) mock_request.method = "PUT" self.placements_item_controller.index() diff --git a/valet/tests/unit/api/v1/test_plans.py b/valet/tests/unit/api/v1/test_plans.py index c2b15c7..a721eb0 100644 --- a/valet/tests/unit/api/v1/test_plans.py +++ b/valet/tests/unit/api/v1/test_plans.py @@ -12,16 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -"""Test Plans.""" - import mock -import valet.api.v1.controllers.plans as plans -from valet.api.v1.controllers.plans import PlansController, PlansItemController +from valet.api.db.models.music.plans import Plan from valet.api.db.models.music import Query from valet.api.db.models.music import Results -from valet.api.db.models.music.plans import Plan +import valet.api.v1.controllers.plans as plans +from valet.api.v1.controllers.plans import PlansController +from valet.api.v1.controllers.plans import PlansItemController from valet.tests.unit.api.v1.api_base import ApiBase @@ -67,11 +65,13 @@ class TestPlans(ApiBase): """Test plans and plans_item_controller index method failure.""" mock_request.method = "PUT" self.plans_controller.index() - self.validate_test("The PUT method is not allowed" in ApiBase.response) + self.validate_test( + "The PUT method is not allowed" in ApiBase.response) mock_request.method = "POST" self.plans_item_controller.index() - self.validate_test("The POST method is not allowed" in ApiBase.response) + self.validate_test( + "The POST method is not allowed" in ApiBase.response) def test_index_options(self): """Test index_options method for plans and plans_item_controller.""" diff --git a/valet/tests/unit/engine/test_ping.py b/valet/tests/unit/engine/test_ping.py index 877a981..c02a739 100644 --- a/valet/tests/unit/engine/test_ping.py +++ b/valet/tests/unit/engine/test_ping.py @@ -1,10 +1,26 @@ +# +# Copyright 2014-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import mock + import valet.engine.optimizer.ostro_server.health_checker as ping from valet.engine.optimizer.ostro_server.health_checker import HealthCheck from valet.tests.base import Base -json = r'{"row 0":{"placement": "{\"status\": {\"message\": \"ping\", \"type\": \"ok\"},' \ - r'\"resources\": {\"ip\": \"localhost\", \"id\": %d}}","stack_id":"%s"}}' +json = (r'{"row 0":{"placement": "{\"status\": {\"message\": ' + r'\"ping\", \"type\": \"ok\"},\"resources\": ' + r'{\"ip\": \"localhost\", \"id\": %d}}","stack_id":"%s"}}') class TestHealthCheck(Base): @@ -50,13 +66,15 @@ class TestHealthCheck(Base): def test_read_response(self): mid = 1 self.pingger.rest.request.return_value.status_code = 200 - self.pingger.rest.request.return_value.text = json % (mid, self.pingger.uuid) + self.pingger.rest.request.return_value.text = json % ( + mid, self.pingger.uuid) self.validate_test(self.pingger._read_response()) def test_read_response_from_other_engine(self): my_id = 1 self.pingger.rest.request.return_value.status_code = 200 - self.pingger.rest.request.return_value.text = json % (my_id, self.pingger.uuid) + self.pingger.rest.request.return_value.text = json % ( + my_id, self.pingger.uuid) self.validate_test(not self.pingger._read_response() == 2) def test_read_response_unhappy_wrong_res_code(self):