Fix pep8 failures due to new rules
Change-Id: If5e171a1b1e925ad4d5ed14f1f25e9342b7f34a8
This commit is contained in:
parent
e62315115a
commit
6a20317433
@ -98,7 +98,7 @@ class SeekAndDestroy(object):
|
|||||||
try:
|
try:
|
||||||
if resource.is_deleted():
|
if resource.is_deleted():
|
||||||
return
|
return
|
||||||
except Exception as e:
|
except Exception:
|
||||||
LOG.exception(
|
LOG.exception(
|
||||||
"Seems like %s.%s.is_deleted(self) method is broken "
|
"Seems like %s.%s.is_deleted(self) method is broken "
|
||||||
"It shouldn't raise any exceptions."
|
"It shouldn't raise any exceptions."
|
||||||
@ -169,8 +169,8 @@ class SeekAndDestroy(object):
|
|||||||
user=self._get_cached_client(user),
|
user=self._get_cached_client(user),
|
||||||
tenant_uuid=user and user["tenant_id"])
|
tenant_uuid=user and user["tenant_id"])
|
||||||
|
|
||||||
if (isinstance(manager.name(), base.NoName) or
|
if (isinstance(manager.name(), base.NoName)
|
||||||
rutils.name_matches_object(
|
or rutils.name_matches_object(
|
||||||
manager.name(), *self.resource_classes,
|
manager.name(), *self.resource_classes,
|
||||||
task_id=self.task_id, exact=False)):
|
task_id=self.task_id, exact=False)):
|
||||||
self._delete_single_resource(manager)
|
self._delete_single_resource(manager)
|
||||||
|
@ -28,8 +28,8 @@ class CheckOpenStackAPIVersionsValidator(validation.Validator):
|
|||||||
for client in plugin_cfg:
|
for client in plugin_cfg:
|
||||||
client_cls = osclients.OSClient.get(client)
|
client_cls = osclients.OSClient.get(client)
|
||||||
try:
|
try:
|
||||||
if ("service_type" in plugin_cfg[client] or
|
if ("service_type" in plugin_cfg[client]
|
||||||
"service_name" in plugin_cfg[client]):
|
or "service_name" in plugin_cfg[client]):
|
||||||
client_cls.is_service_type_configurable()
|
client_cls.is_service_type_configurable()
|
||||||
|
|
||||||
if "version" in plugin_cfg[client]:
|
if "version" in plugin_cfg[client]:
|
||||||
|
@ -126,10 +126,10 @@ class UserGenerator(context.Context):
|
|||||||
else:
|
else:
|
||||||
self.existing_users = []
|
self.existing_users = []
|
||||||
self.credential = context["admin"]["credential"]
|
self.credential = context["admin"]["credential"]
|
||||||
project_domain = (self.credential["project_domain_name"] or
|
project_domain = (self.credential["project_domain_name"]
|
||||||
cfg.CONF.openstack.project_domain)
|
or cfg.CONF.openstack.project_domain)
|
||||||
user_domain = (self.credential["user_domain_name"] or
|
user_domain = (self.credential["user_domain_name"]
|
||||||
cfg.CONF.openstack.user_domain)
|
or cfg.CONF.openstack.user_domain)
|
||||||
self.DEFAULT_FOR_NEW_USERS["project_domain"] = project_domain
|
self.DEFAULT_FOR_NEW_USERS["project_domain"] = project_domain
|
||||||
self.DEFAULT_FOR_NEW_USERS["user_domain"] = user_domain
|
self.DEFAULT_FOR_NEW_USERS["user_domain"] = user_domain
|
||||||
with self.config.unlocked():
|
with self.config.unlocked():
|
||||||
|
@ -88,8 +88,8 @@ class ShareNetworks(context.Context):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _setup_for_existing_users(self):
|
def _setup_for_existing_users(self):
|
||||||
if (self.config["use_share_networks"] and
|
if (self.config["use_share_networks"]
|
||||||
not self.config["share_networks"]):
|
and not self.config["share_networks"]):
|
||||||
msg = ("Usage of share networks was enabled but for deployment "
|
msg = ("Usage of share networks was enabled but for deployment "
|
||||||
"with existing users share networks also should be "
|
"with existing users share networks also should be "
|
||||||
"specified via arg 'share_networks'")
|
"specified via arg 'share_networks'")
|
||||||
@ -189,8 +189,8 @@ class ShareNetworks(context.Context):
|
|||||||
self._setup_for_autocreated_users()
|
self._setup_for_autocreated_users()
|
||||||
|
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
if (not self.context["config"].get("existing_users") or
|
if (not self.context["config"].get("existing_users")
|
||||||
self.config["use_share_networks"]):
|
or self.config["use_share_networks"]):
|
||||||
resource_manager.cleanup(
|
resource_manager.cleanup(
|
||||||
names=["manila.share_networks"],
|
names=["manila.share_networks"],
|
||||||
users=self.context.get("users", []),
|
users=self.context.get("users", []),
|
||||||
|
@ -121,8 +121,8 @@ class AllowSSH(context.Context):
|
|||||||
"""Sets up security groups for all users to access VM via SSH."""
|
"""Sets up security groups for all users to access VM via SSH."""
|
||||||
|
|
||||||
def setup(self):
|
def setup(self):
|
||||||
admin_or_user = (self.context.get("admin") or
|
admin_or_user = (self.context.get("admin")
|
||||||
self.context.get("users")[0])
|
or self.context.get("users")[0])
|
||||||
|
|
||||||
net_wrapper = network.wrap(
|
net_wrapper = network.wrap(
|
||||||
osclients.Clients(admin_or_user["credential"]),
|
osclients.Clients(admin_or_user["credential"]),
|
||||||
|
@ -55,8 +55,8 @@ class AuthenticationFailed(exceptions.AuthenticationFailed):
|
|||||||
# self-sufficient
|
# self-sufficient
|
||||||
self.msg_fmt = self.msg_fmt_2
|
self.msg_fmt = self.msg_fmt_2
|
||||||
message = error.message
|
message = error.message
|
||||||
if (message.startswith("Unable to establish connection to") or
|
if (message.startswith("Unable to establish connection to")
|
||||||
isinstance(error, ks_exc.DiscoveryFailure)):
|
or isinstance(error, ks_exc.DiscoveryFailure)):
|
||||||
if "Max retries exceeded with url" in message:
|
if "Max retries exceeded with url" in message:
|
||||||
if "HTTPConnectionPool" in message:
|
if "HTTPConnectionPool" in message:
|
||||||
splitter = ": HTTPConnectionPool"
|
splitter = ": HTTPConnectionPool"
|
||||||
@ -141,8 +141,8 @@ class OSClient(plugin.Plugin):
|
|||||||
# version is a string object.
|
# version is a string object.
|
||||||
# For those clients which doesn't accept string value(for example
|
# For those clients which doesn't accept string value(for example
|
||||||
# zaqarclient), this method should be overridden.
|
# zaqarclient), this method should be overridden.
|
||||||
version = (version or
|
version = (version
|
||||||
self.credential.api_info.get(self.get_name(), {}).get(
|
or self.credential.api_info.get(self.get_name(), {}).get(
|
||||||
"version") or self._meta_get("default_version"))
|
"version") or self._meta_get("default_version"))
|
||||||
if version is not None:
|
if version is not None:
|
||||||
version = str(version)
|
version = str(version)
|
||||||
@ -175,8 +175,8 @@ class OSClient(plugin.Plugin):
|
|||||||
Choose service type between transmitted(preferable value if present),
|
Choose service type between transmitted(preferable value if present),
|
||||||
service type from api_info(configured from a context) and default.
|
service type from api_info(configured from a context) and default.
|
||||||
"""
|
"""
|
||||||
return (service_type or
|
return (service_type
|
||||||
self.credential.api_info.get(self.get_name(), {}).get(
|
or self.credential.api_info.get(self.get_name(), {}).get(
|
||||||
"service_type") or self._meta_get("default_service_type"))
|
"service_type") or self._meta_get("default_service_type"))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -312,8 +312,8 @@ class Keystone(OSClient):
|
|||||||
# available version with the smallest number. To be able to
|
# available version with the smallest number. To be able to
|
||||||
# discover versions we need session
|
# discover versions we need session
|
||||||
temp_session = session.Session(
|
temp_session = session.Session(
|
||||||
verify=(self.credential.https_cacert or
|
verify=(self.credential.https_cacert
|
||||||
not self.credential.https_insecure),
|
or not self.credential.https_insecure),
|
||||||
cert=self.credential.https_cert,
|
cert=self.credential.https_cert,
|
||||||
timeout=CONF.openstack_client_http_timeout)
|
timeout=CONF.openstack_client_http_timeout)
|
||||||
version = str(discover.Discover(
|
version = str(discover.Discover(
|
||||||
@ -329,8 +329,8 @@ class Keystone(OSClient):
|
|||||||
identity_plugin = identity.Password(**password_args)
|
identity_plugin = identity.Password(**password_args)
|
||||||
sess = session.Session(
|
sess = session.Session(
|
||||||
auth=identity_plugin,
|
auth=identity_plugin,
|
||||||
verify=(self.credential.https_cacert or
|
verify=(self.credential.https_cacert
|
||||||
not self.credential.https_insecure),
|
or not self.credential.https_insecure),
|
||||||
cert=self.credential.https_cert,
|
cert=self.credential.https_cert,
|
||||||
timeout=CONF.openstack_client_http_timeout)
|
timeout=CONF.openstack_client_http_timeout)
|
||||||
self.cache[key] = (sess, identity_plugin)
|
self.cache[key] = (sess, identity_plugin)
|
||||||
|
@ -243,8 +243,8 @@ class OpenStack(platform.Platform):
|
|||||||
|
|
||||||
def info(self):
|
def info(self):
|
||||||
"""Return information about cloud as dict."""
|
"""Return information about cloud as dict."""
|
||||||
active_user = (self.platform_data["admin"] or
|
active_user = (self.platform_data["admin"]
|
||||||
self.platform_data["users"][0])
|
or self.platform_data["users"][0])
|
||||||
services = []
|
services = []
|
||||||
for stype, name in osclients.Clients(active_user).services().items():
|
for stype, name in osclients.Clients(active_user).services().items():
|
||||||
if name == "__unknown__":
|
if name == "__unknown__":
|
||||||
@ -366,9 +366,9 @@ class OpenStack(platform.Platform):
|
|||||||
project_domain_name = sys_environ.get("OS_PROJECT_DOMAIN_NAME")
|
project_domain_name = sys_environ.get("OS_PROJECT_DOMAIN_NAME")
|
||||||
identity_api_version = sys_environ.get(
|
identity_api_version = sys_environ.get(
|
||||||
"OS_IDENTITY_API_VERSION", sys_environ.get("IDENTITY_API_VERSION"))
|
"OS_IDENTITY_API_VERSION", sys_environ.get("IDENTITY_API_VERSION"))
|
||||||
if (identity_api_version == "3" or
|
if (identity_api_version == "3"
|
||||||
(identity_api_version is None and
|
or (identity_api_version is None
|
||||||
(user_domain_name or project_domain_name))):
|
and (user_domain_name or project_domain_name))):
|
||||||
# it is Keystone v3 and it has another config scheme
|
# it is Keystone v3 and it has another config scheme
|
||||||
spec["admin"]["project_name"] = spec["admin"].pop("tenant_name")
|
spec["admin"]["project_name"] = spec["admin"].pop("tenant_name")
|
||||||
spec["admin"]["user_domain_name"] = user_domain_name or "Default"
|
spec["admin"]["user_domain_name"] = user_domain_name or "Default"
|
||||||
|
@ -356,8 +356,8 @@ class ManilaScenario(scenario.OpenStackScenario):
|
|||||||
:raises exceptions.InvalidArgumentsException: if invalid arguments
|
:raises exceptions.InvalidArgumentsException: if invalid arguments
|
||||||
were provided.
|
were provided.
|
||||||
"""
|
"""
|
||||||
if not (key_min_length <= key_max_length and
|
if not (key_min_length <= key_max_length
|
||||||
value_min_length <= value_max_length):
|
and value_min_length <= value_max_length):
|
||||||
raise exceptions.InvalidArgumentsException(
|
raise exceptions.InvalidArgumentsException(
|
||||||
"Min length for keys and values of metadata can not be bigger "
|
"Min length for keys and values of metadata can not be bigger "
|
||||||
"than maximum length.")
|
"than maximum length.")
|
||||||
|
@ -19,12 +19,13 @@ from rally_openstack import scenario
|
|||||||
from rally_openstack.scenarios.neutron import utils
|
from rally_openstack.scenarios.neutron import utils
|
||||||
|
|
||||||
|
|
||||||
|
"""Scenarios for Neutron Networking-Bgpvpn."""
|
||||||
|
|
||||||
|
|
||||||
def _create_random_route_target():
|
def _create_random_route_target():
|
||||||
return "{}:{}".format(random.randint(0, 65535),
|
return "{}:{}".format(random.randint(0, 65535),
|
||||||
random.randint(0, 4294967295))
|
random.randint(0, 4294967295))
|
||||||
|
|
||||||
"""Scenarios for Neutron Networking-Bgpvpn."""
|
|
||||||
|
|
||||||
|
|
||||||
@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"],
|
@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"],
|
||||||
missed=True)
|
missed=True)
|
||||||
|
@ -499,20 +499,20 @@ class CreateAndDeletePorts(utils.NeutronScenario):
|
|||||||
|
|
||||||
def run(self, network_create_args=None,
|
def run(self, network_create_args=None,
|
||||||
port_create_args=None, ports_per_network=1):
|
port_create_args=None, ports_per_network=1):
|
||||||
"""Create and delete a port.
|
"""Create and delete a port.
|
||||||
|
|
||||||
Measure the "neutron port-create" and "neutron port-delete"
|
Measure the "neutron port-create" and "neutron port-delete"
|
||||||
commands performance.
|
commands performance.
|
||||||
|
|
||||||
:param network_create_args: dict, POST /v2.0/networks request
|
:param network_create_args: dict, POST /v2.0/networks request
|
||||||
options. Deprecated.
|
options. Deprecated.
|
||||||
:param port_create_args: dict, POST /v2.0/ports request options
|
:param port_create_args: dict, POST /v2.0/ports request options
|
||||||
:param ports_per_network: int, number of ports for one network
|
:param ports_per_network: int, number of ports for one network
|
||||||
"""
|
"""
|
||||||
network = self._get_or_create_network(network_create_args)
|
network = self._get_or_create_network(network_create_args)
|
||||||
for i in range(ports_per_network):
|
for i in range(ports_per_network):
|
||||||
port = self._create_port(network, port_create_args)
|
port = self._create_port(network, port_create_args)
|
||||||
self._delete_port(port)
|
self._delete_port(port)
|
||||||
|
|
||||||
|
|
||||||
@validation.add("number", param_name="ports_per_network", minval=1,
|
@validation.add("number", param_name="ports_per_network", minval=1,
|
||||||
@ -546,9 +546,9 @@ class CreateAndBindPorts(utils.NeutronScenario):
|
|||||||
# successfully. Look at agent types used in the gate.
|
# successfully. Look at agent types used in the gate.
|
||||||
host_to_bind = None
|
host_to_bind = None
|
||||||
for agent in self.context["networking_agents"]:
|
for agent in self.context["networking_agents"]:
|
||||||
if (agent["admin_state_up"] and
|
if (agent["admin_state_up"]
|
||||||
agent["alive"] and
|
and agent["alive"]
|
||||||
agent["agent_type"] in
|
and agent["agent_type"] in
|
||||||
cfg.CONF.openstack.neutron_bind_l2_agent_types):
|
cfg.CONF.openstack.neutron_bind_l2_agent_types):
|
||||||
host_to_bind = agent["host"]
|
host_to_bind = agent["host"]
|
||||||
if host_to_bind is None:
|
if host_to_bind is None:
|
||||||
|
@ -28,7 +28,7 @@ class SenlinScenario(scenario.OpenStackScenario):
|
|||||||
def _list_clusters(self, **queries):
|
def _list_clusters(self, **queries):
|
||||||
"""Return user cluster list.
|
"""Return user cluster list.
|
||||||
|
|
||||||
:param kwargs \*\*queries: Optional query parameters to be sent to
|
:param kwargs **queries: Optional query parameters to be sent to
|
||||||
restrict the clusters to be returned. Available parameters include:
|
restrict the clusters to be returned. Available parameters include:
|
||||||
|
|
||||||
* name: The name of a cluster.
|
* name: The name of a cluster.
|
||||||
|
@ -81,8 +81,8 @@ class ValidCommandValidator(validators.FileExistsValidator):
|
|||||||
interpreter = (interpreter[-1]
|
interpreter = (interpreter[-1]
|
||||||
if isinstance(interpreter, (tuple, list))
|
if isinstance(interpreter, (tuple, list))
|
||||||
else interpreter)
|
else interpreter)
|
||||||
if (command.get("local_path") and
|
if (command.get("local_path")
|
||||||
command.get("remote_path") != interpreter):
|
and command.get("remote_path") != interpreter):
|
||||||
self.fail(
|
self.fail(
|
||||||
"When uploading an interpreter its path should be as well"
|
"When uploading an interpreter its path should be as well"
|
||||||
" specified as the `remote_path' string: %r" % command)
|
" specified as the `remote_path' string: %r" % command)
|
||||||
@ -359,6 +359,7 @@ class RuncommandHeat(vm_utils.VMScenario):
|
|||||||
"rows": rows}}
|
"rows": rows}}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
BASH_DD_LOAD_TEST = """
|
BASH_DD_LOAD_TEST = """
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
# Load server and output JSON results ready to be processed
|
# Load server and output JSON results ready to be processed
|
||||||
@ -382,8 +383,8 @@ get_used_cpu_percent() {
|
|||||||
|
|
||||||
get_used_ram_percent() {
|
get_used_ram_percent() {
|
||||||
local total=$(free | grep Mem: | awk '{print $2}')
|
local total=$(free | grep Mem: | awk '{print $2}')
|
||||||
local used=$(free | grep -- -/+\ buffers | awk '{print $3}')
|
local used=$(free | grep -- -/+\\ buffers | awk '{print $3}')
|
||||||
echo ${used} 100 \* ${total} / p | dc
|
echo ${used} 100 \\* ${total} / p | dc
|
||||||
}
|
}
|
||||||
|
|
||||||
get_used_disk_percent() {
|
get_used_disk_percent() {
|
||||||
|
@ -60,8 +60,8 @@ class GrafanaService(service.Service):
|
|||||||
self._spec["grafana"]["password"]))
|
self._spec["grafana"]["password"]))
|
||||||
result = resp.json()
|
result = resp.json()
|
||||||
LOG.debug("Grafana response code: %s" % resp.status_code)
|
LOG.debug("Grafana response code: %s" % resp.status_code)
|
||||||
no_result = (result.get("data") is None or
|
no_result = (result.get("data") is None
|
||||||
len(result["data"]["result"]) < 1)
|
or len(result["data"]["result"]) < 1)
|
||||||
if no_result and i + 1 >= retries_total:
|
if no_result and i + 1 >= retries_total:
|
||||||
LOG.debug("No instance metrics found in Grafana")
|
LOG.debug("No instance metrics found in Grafana")
|
||||||
return False
|
return False
|
||||||
|
@ -80,8 +80,8 @@ class TempestConfigfileManager(object):
|
|||||||
from keystoneauth1 import session
|
from keystoneauth1 import session
|
||||||
|
|
||||||
temp_session = session.Session(
|
temp_session = session.Session(
|
||||||
verify=(self.credential.https_cacert or
|
verify=(self.credential.https_cacert
|
||||||
not self.credential.https_insecure),
|
or not self.credential.https_insecure),
|
||||||
timeout=CONF.openstack_client_http_timeout)
|
timeout=CONF.openstack_client_http_timeout)
|
||||||
data = discover.Discover(temp_session, auth_url).version_data()
|
data = discover.Discover(temp_session, auth_url).version_data()
|
||||||
return dict([(v["version"][0], v["url"]) for v in data])
|
return dict([(v["version"][0], v["url"]) for v in data])
|
||||||
@ -150,10 +150,10 @@ class TempestConfigfileManager(object):
|
|||||||
def _configure_network(self, section_name="network"):
|
def _configure_network(self, section_name="network"):
|
||||||
if "neutron" in self.available_services:
|
if "neutron" in self.available_services:
|
||||||
neutronclient = self.clients.neutron()
|
neutronclient = self.clients.neutron()
|
||||||
public_nets = [net for net
|
public_nets = [
|
||||||
in neutronclient.list_networks()["networks"]
|
net for net in neutronclient.list_networks()["networks"]
|
||||||
if net["status"] == "ACTIVE" and
|
if net["status"] == "ACTIVE" and net["router:external"] is True
|
||||||
net["router:external"] is True]
|
]
|
||||||
if public_nets:
|
if public_nets:
|
||||||
net_id = public_nets[0]["id"]
|
net_id = public_nets[0]["id"]
|
||||||
net_name = public_nets[0]["name"]
|
net_name = public_nets[0]["name"]
|
||||||
|
@ -252,8 +252,8 @@ class TempestContext(context.VerifierContext):
|
|||||||
"RAM = %(ram)dMB, VCPUs = 1, disk >= %(disk)dGiB." %
|
"RAM = %(ram)dMB, VCPUs = 1, disk >= %(disk)dGiB." %
|
||||||
{"ram": flv_ram, "disk": flv_disk})
|
{"ram": flv_ram, "disk": flv_disk})
|
||||||
for flavor in novaclient.flavors.list():
|
for flavor in novaclient.flavors.list():
|
||||||
if (flavor.ram == flv_ram and
|
if (flavor.ram == flv_ram
|
||||||
flavor.vcpus == 1 and flavor.disk >= flv_disk):
|
and flavor.vcpus == 1 and flavor.disk >= flv_disk):
|
||||||
LOG.debug("The following flavor discovered: '{0}'. "
|
LOG.debug("The following flavor discovered: '{0}'. "
|
||||||
"Using flavor '{0}' (ID = {1}) for the tests."
|
"Using flavor '{0}' (ID = {1}) for the tests."
|
||||||
.format(flavor.name, flavor.id))
|
.format(flavor.name, flavor.id))
|
||||||
|
@ -27,9 +27,9 @@ from rally_openstack.verification.tempest import config
|
|||||||
from rally_openstack.verification.tempest import consts
|
from rally_openstack.verification.tempest import consts
|
||||||
|
|
||||||
|
|
||||||
AVAILABLE_SETS = (list(consts.TempestTestSets) +
|
AVAILABLE_SETS = (list(consts.TempestTestSets)
|
||||||
list(consts.TempestApiTestSets) +
|
+ list(consts.TempestApiTestSets)
|
||||||
list(consts.TempestScenarioTestSets))
|
+ list(consts.TempestScenarioTestSets))
|
||||||
|
|
||||||
|
|
||||||
@manager.configure(name="tempest", platform="openstack",
|
@manager.configure(name="tempest", platform="openstack",
|
||||||
@ -131,7 +131,7 @@ class TempestManager(testr.TestrLauncher):
|
|||||||
"'%s' verifiers don't support extra installation settings "
|
"'%s' verifiers don't support extra installation settings "
|
||||||
"for extensions." % self.get_name())
|
"for extensions." % self.get_name())
|
||||||
version = version or "master"
|
version = version or "master"
|
||||||
egg = re.sub("\.git$", "", os.path.basename(source.strip("/")))
|
egg = re.sub(r"\.git$", "", os.path.basename(source.strip("/")))
|
||||||
full_source = "git+{0}@{1}#egg={2}".format(source, version, egg)
|
full_source = "git+{0}@{1}#egg={2}".format(source, version, egg)
|
||||||
# NOTE(ylobankov): Use 'develop mode' installation to provide an
|
# NOTE(ylobankov): Use 'develop mode' installation to provide an
|
||||||
# ability to advanced users to change tests or
|
# ability to advanced users to change tests or
|
||||||
|
@ -594,23 +594,21 @@ def main():
|
|||||||
# filter out expected additions
|
# filter out expected additions
|
||||||
expected = []
|
expected = []
|
||||||
for resource in added:
|
for resource in added:
|
||||||
if (
|
if (False # <- makes indent of other cases similar
|
||||||
(resource["cls"] == "keystone" and
|
or (resource["cls"] == "keystone"
|
||||||
resource["resource_name"] == "role" and
|
and resource["resource_name"] == "role"
|
||||||
resource["id"].get("name") == "_member_") or
|
and resource["id"].get("name") == "_member_")
|
||||||
|
or (resource["cls"] == "neutron"
|
||||||
|
and resource["resource_name"] == "security_group"
|
||||||
|
and resource["id"].get("name") == "default")
|
||||||
|
or (resource["cls"] == "cinder"
|
||||||
|
and resource["resource_name"] == "volume"
|
||||||
|
and resource["id"].get("name") in volume_names)
|
||||||
|
|
||||||
(resource["cls"] == "neutron" and
|
or resource["cls"] == "murano"
|
||||||
resource["resource_name"] == "security_group" and
|
|
||||||
resource["id"].get("name") == "default") or
|
|
||||||
|
|
||||||
(resource["cls"] == "cinder" and
|
|
||||||
resource["resource_name"] == "volume" and
|
|
||||||
resource["id"].get("name") in volume_names) or
|
|
||||||
|
|
||||||
resource["cls"] == "murano" or
|
|
||||||
|
|
||||||
# Glance has issues with uWSGI integration...
|
# Glance has issues with uWSGI integration...
|
||||||
resource["cls"] == "glance"):
|
or resource["cls"] == "glance"):
|
||||||
expected.append(resource)
|
expected.append(resource)
|
||||||
|
|
||||||
for resource in expected:
|
for resource in expected:
|
||||||
|
@ -115,5 +115,6 @@ def main(args):
|
|||||||
if exit_code == 1:
|
if exit_code == 1:
|
||||||
error("")
|
error("")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main(sys.argv))
|
sys.exit(main(sys.argv))
|
||||||
|
@ -539,5 +539,6 @@ def main():
|
|||||||
return 0
|
return 0
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
@ -121,8 +121,8 @@ class PYPIPackage(object):
|
|||||||
return self._license
|
return self._license
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return (isinstance(other, PYPIPackage) and
|
return (isinstance(other, PYPIPackage)
|
||||||
self.package_name == other.package_name)
|
and self.package_name == other.package_name)
|
||||||
|
|
||||||
|
|
||||||
class Requirement(PYPIPackage):
|
class Requirement(PYPIPackage):
|
||||||
@ -181,9 +181,11 @@ class Requirement(PYPIPackage):
|
|||||||
|
|
||||||
min_equal_to_max = False
|
min_equal_to_max = False
|
||||||
if self.version["min"] and self.version["max"]:
|
if self.version["min"] and self.version["max"]:
|
||||||
if (self.version["min"].startswith(">=") and
|
if (
|
||||||
self.version["max"].startswith("<=") and
|
self.version["min"].startswith(">=")
|
||||||
self.version["min"][2:] == self.version["max"][2:]):
|
and self.version["max"].startswith("<=")
|
||||||
|
and self.version["min"][2:] == self.version["max"][2:]
|
||||||
|
):
|
||||||
# min and max versions are equal there is no need to write
|
# min and max versions are equal there is no need to write
|
||||||
# both of them
|
# both of them
|
||||||
min_equal_to_max = True
|
min_equal_to_max = True
|
||||||
@ -221,8 +223,8 @@ class Requirement(PYPIPackage):
|
|||||||
return string
|
return string
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return (isinstance(other, self.__class__) and
|
return (isinstance(other, self.__class__)
|
||||||
self.package_name == other.package_name)
|
and self.package_name == other.package_name)
|
||||||
|
|
||||||
def __ne__(self, other):
|
def __ne__(self, other):
|
||||||
return not self.__eq__(other)
|
return not self.__eq__(other)
|
||||||
@ -278,15 +280,15 @@ def parse_data(raw_data, include_comments=True, dependency_cls=Requirement):
|
|||||||
requirements[-1].finish_him()
|
requirements[-1].finish_him()
|
||||||
requirements.append(Comment(finished=True))
|
requirements.append(Comment(finished=True))
|
||||||
else:
|
else:
|
||||||
if (isinstance(requirements[-1], Comment) and
|
if (isinstance(requirements[-1], Comment)
|
||||||
not requirements[-1].is_finished):
|
and not requirements[-1].is_finished):
|
||||||
requirements[-1].finish_him()
|
requirements[-1].finish_him()
|
||||||
|
|
||||||
# parse_line
|
# parse_line
|
||||||
dep = dependency_cls.parse_line(line)
|
dep = dependency_cls.parse_line(line)
|
||||||
if dep:
|
if dep:
|
||||||
if (isinstance(requirements[-1], Comment) and
|
if (isinstance(requirements[-1], Comment)
|
||||||
DO_NOT_TOUCH_TAG in str(requirements[-1])):
|
and DO_NOT_TOUCH_TAG in str(requirements[-1])):
|
||||||
dep.do_not_touch = True
|
dep.do_not_touch = True
|
||||||
requirements.append(dep)
|
requirements.append(dep)
|
||||||
|
|
||||||
|
@ -244,8 +244,8 @@ def assert_equal_none(logical_line, physical_line, filename):
|
|||||||
|
|
||||||
N322
|
N322
|
||||||
"""
|
"""
|
||||||
res = (re_assert_equal_start_with_none.search(logical_line) or
|
res = (re_assert_equal_start_with_none.search(logical_line)
|
||||||
re_assert_equal_end_with_none.search(logical_line))
|
or re_assert_equal_end_with_none.search(logical_line))
|
||||||
if res:
|
if res:
|
||||||
yield (0, "N322 assertEqual(A, None) or assertEqual(None, A) "
|
yield (0, "N322 assertEqual(A, None) or assertEqual(None, A) "
|
||||||
"sentences not allowed, you should use assertIsNone(A) "
|
"sentences not allowed, you should use assertIsNone(A) "
|
||||||
@ -262,8 +262,9 @@ def assert_true_or_false_with_in(logical_line, physical_line, filename):
|
|||||||
|
|
||||||
N323
|
N323
|
||||||
"""
|
"""
|
||||||
res = (re_assert_true_false_with_in_or_not_in.search(logical_line) or
|
res = (re_assert_true_false_with_in_or_not_in.search(logical_line)
|
||||||
re_assert_true_false_with_in_or_not_in_spaces.search(logical_line))
|
or re_assert_true_false_with_in_or_not_in_spaces.search(
|
||||||
|
logical_line))
|
||||||
if res:
|
if res:
|
||||||
yield (0, "N323 assertTrue/assertFalse(A in/not in B)sentences not "
|
yield (0, "N323 assertTrue/assertFalse(A in/not in B)sentences not "
|
||||||
"allowed, you should use assertIn(A, B) or assertNotIn(A, B)"
|
"allowed, you should use assertIn(A, B) or assertNotIn(A, B)"
|
||||||
@ -280,8 +281,8 @@ def assert_equal_in(logical_line, physical_line, filename):
|
|||||||
|
|
||||||
N324
|
N324
|
||||||
"""
|
"""
|
||||||
res = (re_assert_equal_in_end_with_true_or_false.search(logical_line) or
|
res = (re_assert_equal_in_end_with_true_or_false.search(logical_line)
|
||||||
re_assert_equal_in_start_with_true_or_false.search(logical_line))
|
or re_assert_equal_in_start_with_true_or_false.search(logical_line))
|
||||||
if res:
|
if res:
|
||||||
yield (0, "N324: Use assertIn/NotIn(A, B) rather than "
|
yield (0, "N324: Use assertIn/NotIn(A, B) rather than "
|
||||||
"assertEqual(A in/not in B, True/False) when checking "
|
"assertEqual(A in/not in B, True/False) when checking "
|
||||||
@ -294,8 +295,8 @@ def assert_not_equal_none(logical_line, physical_line, filename):
|
|||||||
|
|
||||||
N325
|
N325
|
||||||
"""
|
"""
|
||||||
res = (re_assert_not_equal_start_with_none.search(logical_line) or
|
res = (re_assert_not_equal_start_with_none.search(logical_line)
|
||||||
re_assert_not_equal_end_with_none.search(logical_line))
|
or re_assert_not_equal_end_with_none.search(logical_line))
|
||||||
if res:
|
if res:
|
||||||
yield (0, "N325 assertNotEqual(A, None) or assertNotEqual(None, A) "
|
yield (0, "N325 assertNotEqual(A, None) or assertNotEqual(None, A) "
|
||||||
"sentences not allowed, you should use assertIsNotNone(A) "
|
"sentences not allowed, you should use assertIsNotNone(A) "
|
||||||
@ -311,8 +312,8 @@ def assert_equal_true_or_false(logical_line, physical_line, filename):
|
|||||||
|
|
||||||
N326
|
N326
|
||||||
"""
|
"""
|
||||||
res = (re_assert_equal_end_with_true_or_false.search(logical_line) or
|
res = (re_assert_equal_end_with_true_or_false.search(logical_line)
|
||||||
re_assert_equal_start_with_true_or_false.search(logical_line))
|
or re_assert_equal_start_with_true_or_false.search(logical_line))
|
||||||
if res:
|
if res:
|
||||||
yield (0, "N326 assertEqual(A, True/False) or "
|
yield (0, "N326 assertEqual(A, True/False) or "
|
||||||
"assertEqual(True/False, A) sentences not allowed,"
|
"assertEqual(True/False, A) sentences not allowed,"
|
||||||
@ -371,8 +372,8 @@ def check_quotes(logical_line, physical_line, filename):
|
|||||||
|
|
||||||
check_tripple = (
|
check_tripple = (
|
||||||
lambda line, i, char: (
|
lambda line, i, char: (
|
||||||
i + 2 < len(line) and
|
i + 2 < len(line)
|
||||||
(char == line[i] == line[i + 1] == line[i + 2])
|
and (char == line[i] == line[i + 1] == line[i + 2])
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -434,9 +435,9 @@ def check_dict_formatting_in_string(logical_line, tokens):
|
|||||||
# NOTE(stpierre): Can't use @skip_ignored_lines here because it's
|
# NOTE(stpierre): Can't use @skip_ignored_lines here because it's
|
||||||
# a stupid decorator that only works on functions that take
|
# a stupid decorator that only works on functions that take
|
||||||
# (logical_line, filename) as arguments.
|
# (logical_line, filename) as arguments.
|
||||||
if (not logical_line or
|
if (not logical_line
|
||||||
logical_line.startswith("#") or
|
or logical_line.startswith("#")
|
||||||
logical_line.endswith("# noqa")):
|
or logical_line.endswith("# noqa")):
|
||||||
return
|
return
|
||||||
|
|
||||||
current_string = ""
|
current_string = ""
|
||||||
|
@ -553,8 +553,8 @@ class NeutronPortTestCase(test.TestCase):
|
|||||||
for port in ports:
|
for port in ports:
|
||||||
if port["tenant_id"] == tenant_uuid:
|
if port["tenant_id"] == tenant_uuid:
|
||||||
expected_ports.append(copy.deepcopy(port))
|
expected_ports.append(copy.deepcopy(port))
|
||||||
if ("device_id" in port and
|
if ("device_id" in port
|
||||||
port["device_id"].startswith("router")):
|
and port["device_id"].startswith("router")):
|
||||||
expected_ports[-1]["parent_name"] = [
|
expected_ports[-1]["parent_name"] = [
|
||||||
r for r in routers
|
r for r in routers
|
||||||
if r["id"] == port["device_id"]][0]["name"]
|
if r["id"] == port["device_id"]][0]["name"]
|
||||||
|
@ -128,8 +128,8 @@ class ImageGeneratorTestCase(test.ScenarioTestCase):
|
|||||||
wrapper_calls.extend(
|
wrapper_calls.extend(
|
||||||
[mock.call().create_image(
|
[mock.call().create_image(
|
||||||
container_format, image_url, disk_format,
|
container_format, image_url, disk_format,
|
||||||
name=mock.ANY, **expected_image_args)] *
|
name=mock.ANY, **expected_image_args)]
|
||||||
tenants * images_per_tenant)
|
* tenants * images_per_tenant)
|
||||||
|
|
||||||
mock_clients.assert_has_calls([mock.call(mock.ANY)] * tenants)
|
mock_clients.assert_has_calls([mock.call(mock.ANY)] * tenants)
|
||||||
|
|
||||||
|
@ -438,9 +438,9 @@ class UserGeneratorForNewUsersTestCase(test.ScenarioTestCase):
|
|||||||
identity_service = mock_identity.Identity.return_value
|
identity_service = mock_identity.Identity.return_value
|
||||||
identity_service.create_user.side_effect = Exception()
|
identity_service.create_user.side_effect = Exception()
|
||||||
with users.UserGenerator(self.context) as ctx:
|
with users.UserGenerator(self.context) as ctx:
|
||||||
self.assertRaises(exceptions.ContextSetupFailure, ctx.setup)
|
self.assertRaises(exceptions.ContextSetupFailure, ctx.setup)
|
||||||
mock_log_warning.assert_called_with(
|
mock_log_warning.assert_called_with(
|
||||||
"Failed to consume a task from the queue: ")
|
"Failed to consume a task from the queue: ")
|
||||||
|
|
||||||
# Ensure that tenants get deleted anyway
|
# Ensure that tenants get deleted anyway
|
||||||
self.assertEqual(0, len(ctx.context["tenants"]))
|
self.assertEqual(0, len(ctx.context["tenants"]))
|
||||||
|
@ -271,10 +271,9 @@ class ShareNetworksTestCase(test.TestCase):
|
|||||||
mock_manila_scenario__add_security_service_to_share_network)
|
mock_manila_scenario__add_security_service_to_share_network)
|
||||||
mock_add_security_service_to_share_network.assert_has_calls([
|
mock_add_security_service_to_share_network.assert_has_calls([
|
||||||
mock.call(mock.ANY, mock.ANY)
|
mock.call(mock.ANY, mock.ANY)
|
||||||
for i in range(
|
for _ in range(
|
||||||
self.TENANTS_AMOUNT *
|
self.TENANTS_AMOUNT * networks_per_tenant
|
||||||
networks_per_tenant *
|
* len(self.SECURITY_SERVICES))])
|
||||||
len(self.SECURITY_SERVICES))])
|
|
||||||
if neutron:
|
if neutron:
|
||||||
sn_args = {
|
sn_args = {
|
||||||
"neutron_net_id": mock.ANY,
|
"neutron_net_id": mock.ANY,
|
||||||
|
@ -93,8 +93,8 @@ class MonascaMetricGeneratorTestCase(test.TestCase):
|
|||||||
second_call = mock.call(monasca_utils.CONF.openstack.
|
second_call = mock.call(monasca_utils.CONF.openstack.
|
||||||
monasca_metric_create_prepoll_delay,
|
monasca_metric_create_prepoll_delay,
|
||||||
atomic_delay=1)
|
atomic_delay=1)
|
||||||
self.assertEqual([first_call] * metrics_per_tenant * tenants_count +
|
self.assertEqual(
|
||||||
[second_call],
|
[first_call] * metrics_per_tenant * tenants_count + [second_call],
|
||||||
mock_interruptable_sleep.call_args_list,
|
mock_interruptable_sleep.call_args_list,
|
||||||
"Method interruptable_sleep should be called "
|
"Method interruptable_sleep should be called tenant counts times "
|
||||||
"tenant counts times metrics plus one")
|
"metrics plus one")
|
||||||
|
@ -39,7 +39,7 @@ class TestFormat(testtools.TestCase):
|
|||||||
if "http://" in line or "https://" in line or ":ref:" in line:
|
if "http://" in line or "https://" in line or ":ref:" in line:
|
||||||
continue
|
continue
|
||||||
# Allow lines which do not contain any whitespace
|
# Allow lines which do not contain any whitespace
|
||||||
if re.match("\s*[^\s]+$", line):
|
if re.match(r"\s*[^\s]+$", line):
|
||||||
continue
|
continue
|
||||||
if not text_inside_simple_tables:
|
if not text_inside_simple_tables:
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
@ -59,7 +59,7 @@ class TestFormat(testtools.TestCase):
|
|||||||
|
|
||||||
def _check_trailing_spaces(self, doc_file, raw):
|
def _check_trailing_spaces(self, doc_file, raw):
|
||||||
for i, line in enumerate(raw.split("\n")):
|
for i, line in enumerate(raw.split("\n")):
|
||||||
trailing_spaces = re.findall("\s+$", line)
|
trailing_spaces = re.findall(r"\s+$", line)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
len(trailing_spaces), 0,
|
len(trailing_spaces), 0,
|
||||||
"Found trailing spaces on line %s of %s" % (i + 1, doc_file))
|
"Found trailing spaces on line %s of %s" % (i + 1, doc_file))
|
||||||
|
@ -154,8 +154,8 @@ class TaskSampleTestCase(test.TestCase):
|
|||||||
bad_filenames = []
|
bad_filenames = []
|
||||||
for dirname, dirnames, filenames in os.walk(self.samples_path):
|
for dirname, dirnames, filenames in os.walk(self.samples_path):
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
if "_" in filename and (filename.endswith(".yaml") or
|
if "_" in filename and (filename.endswith(".yaml")
|
||||||
filename.endswith(".json")):
|
or filename.endswith(".json")):
|
||||||
full_path = os.path.join(dirname, filename)
|
full_path = os.path.join(dirname, filename)
|
||||||
bad_filenames.append(full_path)
|
bad_filenames.append(full_path)
|
||||||
|
|
||||||
|
@ -1190,8 +1190,8 @@ class FakeNeutronClient(object):
|
|||||||
def add_interface_router(self, router_id, data):
|
def add_interface_router(self, router_id, data):
|
||||||
subnet_id = data["subnet_id"]
|
subnet_id = data["subnet_id"]
|
||||||
|
|
||||||
if (router_id not in self.__routers or
|
if (router_id not in self.__routers
|
||||||
subnet_id not in self.__subnets):
|
or subnet_id not in self.__subnets):
|
||||||
raise neutron_exceptions.NeutronClientException
|
raise neutron_exceptions.NeutronClientException
|
||||||
|
|
||||||
subnet = self.__subnets[subnet_id]
|
subnet = self.__subnets[subnet_id]
|
||||||
|
@ -53,8 +53,9 @@ class RallyJobsTestCase(test.TestCase):
|
|||||||
discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins"))
|
discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins"))
|
||||||
|
|
||||||
files = {f for f in os.listdir(self.rally_jobs_path)
|
files = {f for f in os.listdir(self.rally_jobs_path)
|
||||||
if (os.path.isfile(os.path.join(self.rally_jobs_path, f)) and
|
if (os.path.isfile(os.path.join(self.rally_jobs_path, f))
|
||||||
f.endswith(".yaml") and not f.endswith("_args.yaml"))}
|
and f.endswith(".yaml")
|
||||||
|
and not f.endswith("_args.yaml"))}
|
||||||
|
|
||||||
# TODO(andreykurilin): figure out why it fails
|
# TODO(andreykurilin): figure out why it fails
|
||||||
files -= {"rally-mos.yaml", "sahara-clusters.yaml"}
|
files -= {"rally-mos.yaml", "sahara-clusters.yaml"}
|
||||||
|
@ -44,8 +44,8 @@ class CeilometerScenarioTestCase(test.ScenarioTestCase):
|
|||||||
"resource_id": "fake_uuid",
|
"resource_id": "fake_uuid",
|
||||||
"timestamp": test_timestamp.isoformat()}
|
"timestamp": test_timestamp.isoformat()}
|
||||||
self.assertEqual(expected, result[0][0])
|
self.assertEqual(expected, result[0][0])
|
||||||
samples_int = (parser.parse(result[0][0]["timestamp"]) -
|
samples_int = (parser.parse(result[0][0]["timestamp"])
|
||||||
parser.parse(result[0][1]["timestamp"])).seconds
|
- parser.parse(result[0][1]["timestamp"])).seconds
|
||||||
self.assertEqual(60, samples_int)
|
self.assertEqual(60, samples_int)
|
||||||
|
|
||||||
@mock.patch("%s.uuid.uuid4" % CEILOMETER_UTILS)
|
@mock.patch("%s.uuid.uuid4" % CEILOMETER_UTILS)
|
||||||
@ -63,8 +63,8 @@ class CeilometerScenarioTestCase(test.ScenarioTestCase):
|
|||||||
"resource_id": "fake_uuid",
|
"resource_id": "fake_uuid",
|
||||||
"timestamp": test_timestamp.isoformat()}
|
"timestamp": test_timestamp.isoformat()}
|
||||||
self.assertEqual(expected, result[0][0])
|
self.assertEqual(expected, result[0][0])
|
||||||
samples_int = (parser.parse(result[0][-1]["timestamp"]) -
|
samples_int = (parser.parse(result[0][-1]["timestamp"])
|
||||||
parser.parse(result[1][0]["timestamp"])).seconds
|
- parser.parse(result[1][0]["timestamp"])).seconds
|
||||||
# NOTE(idegtiarov): here we check that interval between last sample in
|
# NOTE(idegtiarov): here we check that interval between last sample in
|
||||||
# first batch and first sample in second batch is equal 60 sec.
|
# first batch and first sample in second batch is equal 60 sec.
|
||||||
self.assertEqual(60, samples_int)
|
self.assertEqual(60, samples_int)
|
||||||
|
@ -69,8 +69,6 @@ class ManilaSharesTestCase(test.ScenarioTestCase):
|
|||||||
"params": {
|
"params": {
|
||||||
"share_proto": "cifs",
|
"share_proto": "cifs",
|
||||||
"size": 4,
|
"size": 4,
|
||||||
"share_network": "foo",
|
|
||||||
"share_type": "bar",
|
|
||||||
"snapshot_id": "snapshot_foo",
|
"snapshot_id": "snapshot_foo",
|
||||||
"description": "foo_description",
|
"description": "foo_description",
|
||||||
"metadata": {"foo_metadata": "foo"},
|
"metadata": {"foo_metadata": "foo"},
|
||||||
@ -122,8 +120,6 @@ class ManilaSharesTestCase(test.ScenarioTestCase):
|
|||||||
"params": {
|
"params": {
|
||||||
"share_proto": "cifs",
|
"share_proto": "cifs",
|
||||||
"size": 4,
|
"size": 4,
|
||||||
"share_network": "foo",
|
|
||||||
"share_type": "bar",
|
|
||||||
"snapshot_id": "snapshot_foo",
|
"snapshot_id": "snapshot_foo",
|
||||||
"description": "foo_description",
|
"description": "foo_description",
|
||||||
"metadata": {"foo_metadata": "foo"},
|
"metadata": {"foo_metadata": "foo"},
|
||||||
@ -182,8 +178,6 @@ class ManilaSharesTestCase(test.ScenarioTestCase):
|
|||||||
"access_level": "ro",
|
"access_level": "ro",
|
||||||
"share_proto": "cifs",
|
"share_proto": "cifs",
|
||||||
"size": 4,
|
"size": 4,
|
||||||
"share_network": "foo",
|
|
||||||
"share_type": "bar",
|
|
||||||
"snapshot_id": "snapshot_foo",
|
"snapshot_id": "snapshot_foo",
|
||||||
"description": "foo_description",
|
"description": "foo_description",
|
||||||
"metadata": {"foo_metadata": "foo"},
|
"metadata": {"foo_metadata": "foo"},
|
||||||
|
@ -84,8 +84,8 @@ class DDTDecoratorCheckerTestCase(test.TestCase):
|
|||||||
|
|
||||||
for dirname, dirnames, filenames in os.walk(self.tests_path):
|
for dirname, dirnames, filenames in os.walk(self.tests_path):
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
if not (filename.startswith("test_") and
|
if not (filename.startswith("test_")
|
||||||
filename.endswith(".py")):
|
and filename.endswith(".py")):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
filename = os.path.relpath(os.path.join(dirname, filename))
|
filename = os.path.relpath(os.path.join(dirname, filename))
|
||||||
|
@ -192,8 +192,8 @@ class FuncMockArgsDecoratorsChecker(ast.NodeVisitor):
|
|||||||
if funcname == "mock.patch":
|
if funcname == "mock.patch":
|
||||||
decname = self._get_value(decorator.args[0])
|
decname = self._get_value(decorator.args[0])
|
||||||
elif funcname == "mock.patch.object":
|
elif funcname == "mock.patch.object":
|
||||||
decname = (self._get_name(decorator.args[0]) + "." +
|
decname = (self._get_name(decorator.args[0]) + "."
|
||||||
self._get_value(decorator.args[1]))
|
+ self._get_value(decorator.args[1]))
|
||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -306,8 +306,8 @@ class MockUsageCheckerTestCase(test.TestCase):
|
|||||||
|
|
||||||
for dirname, dirnames, filenames in os.walk(self.tests_path):
|
for dirname, dirnames, filenames in os.walk(self.tests_path):
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
if (not filename.startswith("test_") or
|
if (not filename.startswith("test_")
|
||||||
not filename.endswith(".py")):
|
or not filename.endswith(".py")):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
filename = os.path.relpath(os.path.join(dirname, filename))
|
filename = os.path.relpath(os.path.join(dirname, filename))
|
||||||
|
@ -153,7 +153,7 @@ class FlavorTestCase(test.TestCase):
|
|||||||
resource_spec=resource_spec, config={})
|
resource_spec=resource_spec, config={})
|
||||||
|
|
||||||
def test_preprocess_by_regex(self):
|
def test_preprocess_by_regex(self):
|
||||||
resource_spec = {"regex": "m(1|2)\.nano"}
|
resource_spec = {"regex": r"m(1|2)\.nano"}
|
||||||
flavor_id = self.type_cls.pre_process(
|
flavor_id = self.type_cls.pre_process(
|
||||||
resource_spec=resource_spec, config={})
|
resource_spec=resource_spec, config={})
|
||||||
self.assertEqual("42", flavor_id)
|
self.assertEqual("42", flavor_id)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user