Fix pep8 failures due to new rules
Change-Id: If5e171a1b1e925ad4d5ed14f1f25e9342b7f34a8
This commit is contained in:
parent
e62315115a
commit
6a20317433
@ -98,7 +98,7 @@ class SeekAndDestroy(object):
|
||||
try:
|
||||
if resource.is_deleted():
|
||||
return
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
LOG.exception(
|
||||
"Seems like %s.%s.is_deleted(self) method is broken "
|
||||
"It shouldn't raise any exceptions."
|
||||
@ -169,8 +169,8 @@ class SeekAndDestroy(object):
|
||||
user=self._get_cached_client(user),
|
||||
tenant_uuid=user and user["tenant_id"])
|
||||
|
||||
if (isinstance(manager.name(), base.NoName) or
|
||||
rutils.name_matches_object(
|
||||
if (isinstance(manager.name(), base.NoName)
|
||||
or rutils.name_matches_object(
|
||||
manager.name(), *self.resource_classes,
|
||||
task_id=self.task_id, exact=False)):
|
||||
self._delete_single_resource(manager)
|
||||
|
@ -28,8 +28,8 @@ class CheckOpenStackAPIVersionsValidator(validation.Validator):
|
||||
for client in plugin_cfg:
|
||||
client_cls = osclients.OSClient.get(client)
|
||||
try:
|
||||
if ("service_type" in plugin_cfg[client] or
|
||||
"service_name" in plugin_cfg[client]):
|
||||
if ("service_type" in plugin_cfg[client]
|
||||
or "service_name" in plugin_cfg[client]):
|
||||
client_cls.is_service_type_configurable()
|
||||
|
||||
if "version" in plugin_cfg[client]:
|
||||
|
@ -126,10 +126,10 @@ class UserGenerator(context.Context):
|
||||
else:
|
||||
self.existing_users = []
|
||||
self.credential = context["admin"]["credential"]
|
||||
project_domain = (self.credential["project_domain_name"] or
|
||||
cfg.CONF.openstack.project_domain)
|
||||
user_domain = (self.credential["user_domain_name"] or
|
||||
cfg.CONF.openstack.user_domain)
|
||||
project_domain = (self.credential["project_domain_name"]
|
||||
or cfg.CONF.openstack.project_domain)
|
||||
user_domain = (self.credential["user_domain_name"]
|
||||
or cfg.CONF.openstack.user_domain)
|
||||
self.DEFAULT_FOR_NEW_USERS["project_domain"] = project_domain
|
||||
self.DEFAULT_FOR_NEW_USERS["user_domain"] = user_domain
|
||||
with self.config.unlocked():
|
||||
|
@ -88,8 +88,8 @@ class ShareNetworks(context.Context):
|
||||
}
|
||||
|
||||
def _setup_for_existing_users(self):
|
||||
if (self.config["use_share_networks"] and
|
||||
not self.config["share_networks"]):
|
||||
if (self.config["use_share_networks"]
|
||||
and not self.config["share_networks"]):
|
||||
msg = ("Usage of share networks was enabled but for deployment "
|
||||
"with existing users share networks also should be "
|
||||
"specified via arg 'share_networks'")
|
||||
@ -189,8 +189,8 @@ class ShareNetworks(context.Context):
|
||||
self._setup_for_autocreated_users()
|
||||
|
||||
def cleanup(self):
|
||||
if (not self.context["config"].get("existing_users") or
|
||||
self.config["use_share_networks"]):
|
||||
if (not self.context["config"].get("existing_users")
|
||||
or self.config["use_share_networks"]):
|
||||
resource_manager.cleanup(
|
||||
names=["manila.share_networks"],
|
||||
users=self.context.get("users", []),
|
||||
|
@ -121,8 +121,8 @@ class AllowSSH(context.Context):
|
||||
"""Sets up security groups for all users to access VM via SSH."""
|
||||
|
||||
def setup(self):
|
||||
admin_or_user = (self.context.get("admin") or
|
||||
self.context.get("users")[0])
|
||||
admin_or_user = (self.context.get("admin")
|
||||
or self.context.get("users")[0])
|
||||
|
||||
net_wrapper = network.wrap(
|
||||
osclients.Clients(admin_or_user["credential"]),
|
||||
|
@ -55,8 +55,8 @@ class AuthenticationFailed(exceptions.AuthenticationFailed):
|
||||
# self-sufficient
|
||||
self.msg_fmt = self.msg_fmt_2
|
||||
message = error.message
|
||||
if (message.startswith("Unable to establish connection to") or
|
||||
isinstance(error, ks_exc.DiscoveryFailure)):
|
||||
if (message.startswith("Unable to establish connection to")
|
||||
or isinstance(error, ks_exc.DiscoveryFailure)):
|
||||
if "Max retries exceeded with url" in message:
|
||||
if "HTTPConnectionPool" in message:
|
||||
splitter = ": HTTPConnectionPool"
|
||||
@ -141,8 +141,8 @@ class OSClient(plugin.Plugin):
|
||||
# version is a string object.
|
||||
# For those clients which doesn't accept string value(for example
|
||||
# zaqarclient), this method should be overridden.
|
||||
version = (version or
|
||||
self.credential.api_info.get(self.get_name(), {}).get(
|
||||
version = (version
|
||||
or self.credential.api_info.get(self.get_name(), {}).get(
|
||||
"version") or self._meta_get("default_version"))
|
||||
if version is not None:
|
||||
version = str(version)
|
||||
@ -175,8 +175,8 @@ class OSClient(plugin.Plugin):
|
||||
Choose service type between transmitted(preferable value if present),
|
||||
service type from api_info(configured from a context) and default.
|
||||
"""
|
||||
return (service_type or
|
||||
self.credential.api_info.get(self.get_name(), {}).get(
|
||||
return (service_type
|
||||
or self.credential.api_info.get(self.get_name(), {}).get(
|
||||
"service_type") or self._meta_get("default_service_type"))
|
||||
|
||||
@classmethod
|
||||
@ -312,8 +312,8 @@ class Keystone(OSClient):
|
||||
# available version with the smallest number. To be able to
|
||||
# discover versions we need session
|
||||
temp_session = session.Session(
|
||||
verify=(self.credential.https_cacert or
|
||||
not self.credential.https_insecure),
|
||||
verify=(self.credential.https_cacert
|
||||
or not self.credential.https_insecure),
|
||||
cert=self.credential.https_cert,
|
||||
timeout=CONF.openstack_client_http_timeout)
|
||||
version = str(discover.Discover(
|
||||
@ -329,8 +329,8 @@ class Keystone(OSClient):
|
||||
identity_plugin = identity.Password(**password_args)
|
||||
sess = session.Session(
|
||||
auth=identity_plugin,
|
||||
verify=(self.credential.https_cacert or
|
||||
not self.credential.https_insecure),
|
||||
verify=(self.credential.https_cacert
|
||||
or not self.credential.https_insecure),
|
||||
cert=self.credential.https_cert,
|
||||
timeout=CONF.openstack_client_http_timeout)
|
||||
self.cache[key] = (sess, identity_plugin)
|
||||
|
@ -243,8 +243,8 @@ class OpenStack(platform.Platform):
|
||||
|
||||
def info(self):
|
||||
"""Return information about cloud as dict."""
|
||||
active_user = (self.platform_data["admin"] or
|
||||
self.platform_data["users"][0])
|
||||
active_user = (self.platform_data["admin"]
|
||||
or self.platform_data["users"][0])
|
||||
services = []
|
||||
for stype, name in osclients.Clients(active_user).services().items():
|
||||
if name == "__unknown__":
|
||||
@ -366,9 +366,9 @@ class OpenStack(platform.Platform):
|
||||
project_domain_name = sys_environ.get("OS_PROJECT_DOMAIN_NAME")
|
||||
identity_api_version = sys_environ.get(
|
||||
"OS_IDENTITY_API_VERSION", sys_environ.get("IDENTITY_API_VERSION"))
|
||||
if (identity_api_version == "3" or
|
||||
(identity_api_version is None and
|
||||
(user_domain_name or project_domain_name))):
|
||||
if (identity_api_version == "3"
|
||||
or (identity_api_version is None
|
||||
and (user_domain_name or project_domain_name))):
|
||||
# it is Keystone v3 and it has another config scheme
|
||||
spec["admin"]["project_name"] = spec["admin"].pop("tenant_name")
|
||||
spec["admin"]["user_domain_name"] = user_domain_name or "Default"
|
||||
|
@ -356,8 +356,8 @@ class ManilaScenario(scenario.OpenStackScenario):
|
||||
:raises exceptions.InvalidArgumentsException: if invalid arguments
|
||||
were provided.
|
||||
"""
|
||||
if not (key_min_length <= key_max_length and
|
||||
value_min_length <= value_max_length):
|
||||
if not (key_min_length <= key_max_length
|
||||
and value_min_length <= value_max_length):
|
||||
raise exceptions.InvalidArgumentsException(
|
||||
"Min length for keys and values of metadata can not be bigger "
|
||||
"than maximum length.")
|
||||
|
@ -19,12 +19,13 @@ from rally_openstack import scenario
|
||||
from rally_openstack.scenarios.neutron import utils
|
||||
|
||||
|
||||
"""Scenarios for Neutron Networking-Bgpvpn."""
|
||||
|
||||
|
||||
def _create_random_route_target():
|
||||
return "{}:{}".format(random.randint(0, 65535),
|
||||
random.randint(0, 4294967295))
|
||||
|
||||
"""Scenarios for Neutron Networking-Bgpvpn."""
|
||||
|
||||
|
||||
@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"],
|
||||
missed=True)
|
||||
|
@ -499,20 +499,20 @@ class CreateAndDeletePorts(utils.NeutronScenario):
|
||||
|
||||
def run(self, network_create_args=None,
|
||||
port_create_args=None, ports_per_network=1):
|
||||
"""Create and delete a port.
|
||||
"""Create and delete a port.
|
||||
|
||||
Measure the "neutron port-create" and "neutron port-delete"
|
||||
commands performance.
|
||||
Measure the "neutron port-create" and "neutron port-delete"
|
||||
commands performance.
|
||||
|
||||
:param network_create_args: dict, POST /v2.0/networks request
|
||||
options. Deprecated.
|
||||
:param port_create_args: dict, POST /v2.0/ports request options
|
||||
:param ports_per_network: int, number of ports for one network
|
||||
"""
|
||||
network = self._get_or_create_network(network_create_args)
|
||||
for i in range(ports_per_network):
|
||||
port = self._create_port(network, port_create_args)
|
||||
self._delete_port(port)
|
||||
:param network_create_args: dict, POST /v2.0/networks request
|
||||
options. Deprecated.
|
||||
:param port_create_args: dict, POST /v2.0/ports request options
|
||||
:param ports_per_network: int, number of ports for one network
|
||||
"""
|
||||
network = self._get_or_create_network(network_create_args)
|
||||
for i in range(ports_per_network):
|
||||
port = self._create_port(network, port_create_args)
|
||||
self._delete_port(port)
|
||||
|
||||
|
||||
@validation.add("number", param_name="ports_per_network", minval=1,
|
||||
@ -546,9 +546,9 @@ class CreateAndBindPorts(utils.NeutronScenario):
|
||||
# successfully. Look at agent types used in the gate.
|
||||
host_to_bind = None
|
||||
for agent in self.context["networking_agents"]:
|
||||
if (agent["admin_state_up"] and
|
||||
agent["alive"] and
|
||||
agent["agent_type"] in
|
||||
if (agent["admin_state_up"]
|
||||
and agent["alive"]
|
||||
and agent["agent_type"] in
|
||||
cfg.CONF.openstack.neutron_bind_l2_agent_types):
|
||||
host_to_bind = agent["host"]
|
||||
if host_to_bind is None:
|
||||
|
@ -28,7 +28,7 @@ class SenlinScenario(scenario.OpenStackScenario):
|
||||
def _list_clusters(self, **queries):
|
||||
"""Return user cluster list.
|
||||
|
||||
:param kwargs \*\*queries: Optional query parameters to be sent to
|
||||
:param kwargs **queries: Optional query parameters to be sent to
|
||||
restrict the clusters to be returned. Available parameters include:
|
||||
|
||||
* name: The name of a cluster.
|
||||
|
@ -81,8 +81,8 @@ class ValidCommandValidator(validators.FileExistsValidator):
|
||||
interpreter = (interpreter[-1]
|
||||
if isinstance(interpreter, (tuple, list))
|
||||
else interpreter)
|
||||
if (command.get("local_path") and
|
||||
command.get("remote_path") != interpreter):
|
||||
if (command.get("local_path")
|
||||
and command.get("remote_path") != interpreter):
|
||||
self.fail(
|
||||
"When uploading an interpreter its path should be as well"
|
||||
" specified as the `remote_path' string: %r" % command)
|
||||
@ -359,6 +359,7 @@ class RuncommandHeat(vm_utils.VMScenario):
|
||||
"rows": rows}}
|
||||
)
|
||||
|
||||
|
||||
BASH_DD_LOAD_TEST = """
|
||||
#!/bin/sh
|
||||
# Load server and output JSON results ready to be processed
|
||||
@ -382,8 +383,8 @@ get_used_cpu_percent() {
|
||||
|
||||
get_used_ram_percent() {
|
||||
local total=$(free | grep Mem: | awk '{print $2}')
|
||||
local used=$(free | grep -- -/+\ buffers | awk '{print $3}')
|
||||
echo ${used} 100 \* ${total} / p | dc
|
||||
local used=$(free | grep -- -/+\\ buffers | awk '{print $3}')
|
||||
echo ${used} 100 \\* ${total} / p | dc
|
||||
}
|
||||
|
||||
get_used_disk_percent() {
|
||||
|
@ -60,8 +60,8 @@ class GrafanaService(service.Service):
|
||||
self._spec["grafana"]["password"]))
|
||||
result = resp.json()
|
||||
LOG.debug("Grafana response code: %s" % resp.status_code)
|
||||
no_result = (result.get("data") is None or
|
||||
len(result["data"]["result"]) < 1)
|
||||
no_result = (result.get("data") is None
|
||||
or len(result["data"]["result"]) < 1)
|
||||
if no_result and i + 1 >= retries_total:
|
||||
LOG.debug("No instance metrics found in Grafana")
|
||||
return False
|
||||
|
@ -80,8 +80,8 @@ class TempestConfigfileManager(object):
|
||||
from keystoneauth1 import session
|
||||
|
||||
temp_session = session.Session(
|
||||
verify=(self.credential.https_cacert or
|
||||
not self.credential.https_insecure),
|
||||
verify=(self.credential.https_cacert
|
||||
or not self.credential.https_insecure),
|
||||
timeout=CONF.openstack_client_http_timeout)
|
||||
data = discover.Discover(temp_session, auth_url).version_data()
|
||||
return dict([(v["version"][0], v["url"]) for v in data])
|
||||
@ -150,10 +150,10 @@ class TempestConfigfileManager(object):
|
||||
def _configure_network(self, section_name="network"):
|
||||
if "neutron" in self.available_services:
|
||||
neutronclient = self.clients.neutron()
|
||||
public_nets = [net for net
|
||||
in neutronclient.list_networks()["networks"]
|
||||
if net["status"] == "ACTIVE" and
|
||||
net["router:external"] is True]
|
||||
public_nets = [
|
||||
net for net in neutronclient.list_networks()["networks"]
|
||||
if net["status"] == "ACTIVE" and net["router:external"] is True
|
||||
]
|
||||
if public_nets:
|
||||
net_id = public_nets[0]["id"]
|
||||
net_name = public_nets[0]["name"]
|
||||
|
@ -252,8 +252,8 @@ class TempestContext(context.VerifierContext):
|
||||
"RAM = %(ram)dMB, VCPUs = 1, disk >= %(disk)dGiB." %
|
||||
{"ram": flv_ram, "disk": flv_disk})
|
||||
for flavor in novaclient.flavors.list():
|
||||
if (flavor.ram == flv_ram and
|
||||
flavor.vcpus == 1 and flavor.disk >= flv_disk):
|
||||
if (flavor.ram == flv_ram
|
||||
and flavor.vcpus == 1 and flavor.disk >= flv_disk):
|
||||
LOG.debug("The following flavor discovered: '{0}'. "
|
||||
"Using flavor '{0}' (ID = {1}) for the tests."
|
||||
.format(flavor.name, flavor.id))
|
||||
|
@ -27,9 +27,9 @@ from rally_openstack.verification.tempest import config
|
||||
from rally_openstack.verification.tempest import consts
|
||||
|
||||
|
||||
AVAILABLE_SETS = (list(consts.TempestTestSets) +
|
||||
list(consts.TempestApiTestSets) +
|
||||
list(consts.TempestScenarioTestSets))
|
||||
AVAILABLE_SETS = (list(consts.TempestTestSets)
|
||||
+ list(consts.TempestApiTestSets)
|
||||
+ list(consts.TempestScenarioTestSets))
|
||||
|
||||
|
||||
@manager.configure(name="tempest", platform="openstack",
|
||||
@ -131,7 +131,7 @@ class TempestManager(testr.TestrLauncher):
|
||||
"'%s' verifiers don't support extra installation settings "
|
||||
"for extensions." % self.get_name())
|
||||
version = version or "master"
|
||||
egg = re.sub("\.git$", "", os.path.basename(source.strip("/")))
|
||||
egg = re.sub(r"\.git$", "", os.path.basename(source.strip("/")))
|
||||
full_source = "git+{0}@{1}#egg={2}".format(source, version, egg)
|
||||
# NOTE(ylobankov): Use 'develop mode' installation to provide an
|
||||
# ability to advanced users to change tests or
|
||||
|
@ -594,23 +594,21 @@ def main():
|
||||
# filter out expected additions
|
||||
expected = []
|
||||
for resource in added:
|
||||
if (
|
||||
(resource["cls"] == "keystone" and
|
||||
resource["resource_name"] == "role" and
|
||||
resource["id"].get("name") == "_member_") or
|
||||
if (False # <- makes indent of other cases similar
|
||||
or (resource["cls"] == "keystone"
|
||||
and resource["resource_name"] == "role"
|
||||
and resource["id"].get("name") == "_member_")
|
||||
or (resource["cls"] == "neutron"
|
||||
and resource["resource_name"] == "security_group"
|
||||
and resource["id"].get("name") == "default")
|
||||
or (resource["cls"] == "cinder"
|
||||
and resource["resource_name"] == "volume"
|
||||
and resource["id"].get("name") in volume_names)
|
||||
|
||||
(resource["cls"] == "neutron" and
|
||||
resource["resource_name"] == "security_group" and
|
||||
resource["id"].get("name") == "default") or
|
||||
|
||||
(resource["cls"] == "cinder" and
|
||||
resource["resource_name"] == "volume" and
|
||||
resource["id"].get("name") in volume_names) or
|
||||
|
||||
resource["cls"] == "murano" or
|
||||
or resource["cls"] == "murano"
|
||||
|
||||
# Glance has issues with uWSGI integration...
|
||||
resource["cls"] == "glance"):
|
||||
or resource["cls"] == "glance"):
|
||||
expected.append(resource)
|
||||
|
||||
for resource in expected:
|
||||
|
@ -115,5 +115,6 @@ def main(args):
|
||||
if exit_code == 1:
|
||||
error("")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv))
|
||||
|
@ -539,5 +539,6 @@ def main():
|
||||
return 0
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
@ -121,8 +121,8 @@ class PYPIPackage(object):
|
||||
return self._license
|
||||
|
||||
def __eq__(self, other):
|
||||
return (isinstance(other, PYPIPackage) and
|
||||
self.package_name == other.package_name)
|
||||
return (isinstance(other, PYPIPackage)
|
||||
and self.package_name == other.package_name)
|
||||
|
||||
|
||||
class Requirement(PYPIPackage):
|
||||
@ -181,9 +181,11 @@ class Requirement(PYPIPackage):
|
||||
|
||||
min_equal_to_max = False
|
||||
if self.version["min"] and self.version["max"]:
|
||||
if (self.version["min"].startswith(">=") and
|
||||
self.version["max"].startswith("<=") and
|
||||
self.version["min"][2:] == self.version["max"][2:]):
|
||||
if (
|
||||
self.version["min"].startswith(">=")
|
||||
and self.version["max"].startswith("<=")
|
||||
and self.version["min"][2:] == self.version["max"][2:]
|
||||
):
|
||||
# min and max versions are equal there is no need to write
|
||||
# both of them
|
||||
min_equal_to_max = True
|
||||
@ -221,8 +223,8 @@ class Requirement(PYPIPackage):
|
||||
return string
|
||||
|
||||
def __eq__(self, other):
|
||||
return (isinstance(other, self.__class__) and
|
||||
self.package_name == other.package_name)
|
||||
return (isinstance(other, self.__class__)
|
||||
and self.package_name == other.package_name)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
@ -278,15 +280,15 @@ def parse_data(raw_data, include_comments=True, dependency_cls=Requirement):
|
||||
requirements[-1].finish_him()
|
||||
requirements.append(Comment(finished=True))
|
||||
else:
|
||||
if (isinstance(requirements[-1], Comment) and
|
||||
not requirements[-1].is_finished):
|
||||
if (isinstance(requirements[-1], Comment)
|
||||
and not requirements[-1].is_finished):
|
||||
requirements[-1].finish_him()
|
||||
|
||||
# parse_line
|
||||
dep = dependency_cls.parse_line(line)
|
||||
if dep:
|
||||
if (isinstance(requirements[-1], Comment) and
|
||||
DO_NOT_TOUCH_TAG in str(requirements[-1])):
|
||||
if (isinstance(requirements[-1], Comment)
|
||||
and DO_NOT_TOUCH_TAG in str(requirements[-1])):
|
||||
dep.do_not_touch = True
|
||||
requirements.append(dep)
|
||||
|
||||
|
@ -244,8 +244,8 @@ def assert_equal_none(logical_line, physical_line, filename):
|
||||
|
||||
N322
|
||||
"""
|
||||
res = (re_assert_equal_start_with_none.search(logical_line) or
|
||||
re_assert_equal_end_with_none.search(logical_line))
|
||||
res = (re_assert_equal_start_with_none.search(logical_line)
|
||||
or re_assert_equal_end_with_none.search(logical_line))
|
||||
if res:
|
||||
yield (0, "N322 assertEqual(A, None) or assertEqual(None, A) "
|
||||
"sentences not allowed, you should use assertIsNone(A) "
|
||||
@ -262,8 +262,9 @@ def assert_true_or_false_with_in(logical_line, physical_line, filename):
|
||||
|
||||
N323
|
||||
"""
|
||||
res = (re_assert_true_false_with_in_or_not_in.search(logical_line) or
|
||||
re_assert_true_false_with_in_or_not_in_spaces.search(logical_line))
|
||||
res = (re_assert_true_false_with_in_or_not_in.search(logical_line)
|
||||
or re_assert_true_false_with_in_or_not_in_spaces.search(
|
||||
logical_line))
|
||||
if res:
|
||||
yield (0, "N323 assertTrue/assertFalse(A in/not in B)sentences not "
|
||||
"allowed, you should use assertIn(A, B) or assertNotIn(A, B)"
|
||||
@ -280,8 +281,8 @@ def assert_equal_in(logical_line, physical_line, filename):
|
||||
|
||||
N324
|
||||
"""
|
||||
res = (re_assert_equal_in_end_with_true_or_false.search(logical_line) or
|
||||
re_assert_equal_in_start_with_true_or_false.search(logical_line))
|
||||
res = (re_assert_equal_in_end_with_true_or_false.search(logical_line)
|
||||
or re_assert_equal_in_start_with_true_or_false.search(logical_line))
|
||||
if res:
|
||||
yield (0, "N324: Use assertIn/NotIn(A, B) rather than "
|
||||
"assertEqual(A in/not in B, True/False) when checking "
|
||||
@ -294,8 +295,8 @@ def assert_not_equal_none(logical_line, physical_line, filename):
|
||||
|
||||
N325
|
||||
"""
|
||||
res = (re_assert_not_equal_start_with_none.search(logical_line) or
|
||||
re_assert_not_equal_end_with_none.search(logical_line))
|
||||
res = (re_assert_not_equal_start_with_none.search(logical_line)
|
||||
or re_assert_not_equal_end_with_none.search(logical_line))
|
||||
if res:
|
||||
yield (0, "N325 assertNotEqual(A, None) or assertNotEqual(None, A) "
|
||||
"sentences not allowed, you should use assertIsNotNone(A) "
|
||||
@ -311,8 +312,8 @@ def assert_equal_true_or_false(logical_line, physical_line, filename):
|
||||
|
||||
N326
|
||||
"""
|
||||
res = (re_assert_equal_end_with_true_or_false.search(logical_line) or
|
||||
re_assert_equal_start_with_true_or_false.search(logical_line))
|
||||
res = (re_assert_equal_end_with_true_or_false.search(logical_line)
|
||||
or re_assert_equal_start_with_true_or_false.search(logical_line))
|
||||
if res:
|
||||
yield (0, "N326 assertEqual(A, True/False) or "
|
||||
"assertEqual(True/False, A) sentences not allowed,"
|
||||
@ -371,8 +372,8 @@ def check_quotes(logical_line, physical_line, filename):
|
||||
|
||||
check_tripple = (
|
||||
lambda line, i, char: (
|
||||
i + 2 < len(line) and
|
||||
(char == line[i] == line[i + 1] == line[i + 2])
|
||||
i + 2 < len(line)
|
||||
and (char == line[i] == line[i + 1] == line[i + 2])
|
||||
)
|
||||
)
|
||||
|
||||
@ -434,9 +435,9 @@ def check_dict_formatting_in_string(logical_line, tokens):
|
||||
# NOTE(stpierre): Can't use @skip_ignored_lines here because it's
|
||||
# a stupid decorator that only works on functions that take
|
||||
# (logical_line, filename) as arguments.
|
||||
if (not logical_line or
|
||||
logical_line.startswith("#") or
|
||||
logical_line.endswith("# noqa")):
|
||||
if (not logical_line
|
||||
or logical_line.startswith("#")
|
||||
or logical_line.endswith("# noqa")):
|
||||
return
|
||||
|
||||
current_string = ""
|
||||
|
@ -553,8 +553,8 @@ class NeutronPortTestCase(test.TestCase):
|
||||
for port in ports:
|
||||
if port["tenant_id"] == tenant_uuid:
|
||||
expected_ports.append(copy.deepcopy(port))
|
||||
if ("device_id" in port and
|
||||
port["device_id"].startswith("router")):
|
||||
if ("device_id" in port
|
||||
and port["device_id"].startswith("router")):
|
||||
expected_ports[-1]["parent_name"] = [
|
||||
r for r in routers
|
||||
if r["id"] == port["device_id"]][0]["name"]
|
||||
|
@ -128,8 +128,8 @@ class ImageGeneratorTestCase(test.ScenarioTestCase):
|
||||
wrapper_calls.extend(
|
||||
[mock.call().create_image(
|
||||
container_format, image_url, disk_format,
|
||||
name=mock.ANY, **expected_image_args)] *
|
||||
tenants * images_per_tenant)
|
||||
name=mock.ANY, **expected_image_args)]
|
||||
* tenants * images_per_tenant)
|
||||
|
||||
mock_clients.assert_has_calls([mock.call(mock.ANY)] * tenants)
|
||||
|
||||
|
@ -438,9 +438,9 @@ class UserGeneratorForNewUsersTestCase(test.ScenarioTestCase):
|
||||
identity_service = mock_identity.Identity.return_value
|
||||
identity_service.create_user.side_effect = Exception()
|
||||
with users.UserGenerator(self.context) as ctx:
|
||||
self.assertRaises(exceptions.ContextSetupFailure, ctx.setup)
|
||||
mock_log_warning.assert_called_with(
|
||||
"Failed to consume a task from the queue: ")
|
||||
self.assertRaises(exceptions.ContextSetupFailure, ctx.setup)
|
||||
mock_log_warning.assert_called_with(
|
||||
"Failed to consume a task from the queue: ")
|
||||
|
||||
# Ensure that tenants get deleted anyway
|
||||
self.assertEqual(0, len(ctx.context["tenants"]))
|
||||
|
@ -271,10 +271,9 @@ class ShareNetworksTestCase(test.TestCase):
|
||||
mock_manila_scenario__add_security_service_to_share_network)
|
||||
mock_add_security_service_to_share_network.assert_has_calls([
|
||||
mock.call(mock.ANY, mock.ANY)
|
||||
for i in range(
|
||||
self.TENANTS_AMOUNT *
|
||||
networks_per_tenant *
|
||||
len(self.SECURITY_SERVICES))])
|
||||
for _ in range(
|
||||
self.TENANTS_AMOUNT * networks_per_tenant
|
||||
* len(self.SECURITY_SERVICES))])
|
||||
if neutron:
|
||||
sn_args = {
|
||||
"neutron_net_id": mock.ANY,
|
||||
|
@ -93,8 +93,8 @@ class MonascaMetricGeneratorTestCase(test.TestCase):
|
||||
second_call = mock.call(monasca_utils.CONF.openstack.
|
||||
monasca_metric_create_prepoll_delay,
|
||||
atomic_delay=1)
|
||||
self.assertEqual([first_call] * metrics_per_tenant * tenants_count +
|
||||
[second_call],
|
||||
mock_interruptable_sleep.call_args_list,
|
||||
"Method interruptable_sleep should be called "
|
||||
"tenant counts times metrics plus one")
|
||||
self.assertEqual(
|
||||
[first_call] * metrics_per_tenant * tenants_count + [second_call],
|
||||
mock_interruptable_sleep.call_args_list,
|
||||
"Method interruptable_sleep should be called tenant counts times "
|
||||
"metrics plus one")
|
||||
|
@ -39,7 +39,7 @@ class TestFormat(testtools.TestCase):
|
||||
if "http://" in line or "https://" in line or ":ref:" in line:
|
||||
continue
|
||||
# Allow lines which do not contain any whitespace
|
||||
if re.match("\s*[^\s]+$", line):
|
||||
if re.match(r"\s*[^\s]+$", line):
|
||||
continue
|
||||
if not text_inside_simple_tables:
|
||||
self.assertTrue(
|
||||
@ -59,7 +59,7 @@ class TestFormat(testtools.TestCase):
|
||||
|
||||
def _check_trailing_spaces(self, doc_file, raw):
|
||||
for i, line in enumerate(raw.split("\n")):
|
||||
trailing_spaces = re.findall("\s+$", line)
|
||||
trailing_spaces = re.findall(r"\s+$", line)
|
||||
self.assertEqual(
|
||||
len(trailing_spaces), 0,
|
||||
"Found trailing spaces on line %s of %s" % (i + 1, doc_file))
|
||||
|
@ -154,8 +154,8 @@ class TaskSampleTestCase(test.TestCase):
|
||||
bad_filenames = []
|
||||
for dirname, dirnames, filenames in os.walk(self.samples_path):
|
||||
for filename in filenames:
|
||||
if "_" in filename and (filename.endswith(".yaml") or
|
||||
filename.endswith(".json")):
|
||||
if "_" in filename and (filename.endswith(".yaml")
|
||||
or filename.endswith(".json")):
|
||||
full_path = os.path.join(dirname, filename)
|
||||
bad_filenames.append(full_path)
|
||||
|
||||
|
@ -1190,8 +1190,8 @@ class FakeNeutronClient(object):
|
||||
def add_interface_router(self, router_id, data):
|
||||
subnet_id = data["subnet_id"]
|
||||
|
||||
if (router_id not in self.__routers or
|
||||
subnet_id not in self.__subnets):
|
||||
if (router_id not in self.__routers
|
||||
or subnet_id not in self.__subnets):
|
||||
raise neutron_exceptions.NeutronClientException
|
||||
|
||||
subnet = self.__subnets[subnet_id]
|
||||
|
@ -53,8 +53,9 @@ class RallyJobsTestCase(test.TestCase):
|
||||
discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins"))
|
||||
|
||||
files = {f for f in os.listdir(self.rally_jobs_path)
|
||||
if (os.path.isfile(os.path.join(self.rally_jobs_path, f)) and
|
||||
f.endswith(".yaml") and not f.endswith("_args.yaml"))}
|
||||
if (os.path.isfile(os.path.join(self.rally_jobs_path, f))
|
||||
and f.endswith(".yaml")
|
||||
and not f.endswith("_args.yaml"))}
|
||||
|
||||
# TODO(andreykurilin): figure out why it fails
|
||||
files -= {"rally-mos.yaml", "sahara-clusters.yaml"}
|
||||
|
@ -44,8 +44,8 @@ class CeilometerScenarioTestCase(test.ScenarioTestCase):
|
||||
"resource_id": "fake_uuid",
|
||||
"timestamp": test_timestamp.isoformat()}
|
||||
self.assertEqual(expected, result[0][0])
|
||||
samples_int = (parser.parse(result[0][0]["timestamp"]) -
|
||||
parser.parse(result[0][1]["timestamp"])).seconds
|
||||
samples_int = (parser.parse(result[0][0]["timestamp"])
|
||||
- parser.parse(result[0][1]["timestamp"])).seconds
|
||||
self.assertEqual(60, samples_int)
|
||||
|
||||
@mock.patch("%s.uuid.uuid4" % CEILOMETER_UTILS)
|
||||
@ -63,8 +63,8 @@ class CeilometerScenarioTestCase(test.ScenarioTestCase):
|
||||
"resource_id": "fake_uuid",
|
||||
"timestamp": test_timestamp.isoformat()}
|
||||
self.assertEqual(expected, result[0][0])
|
||||
samples_int = (parser.parse(result[0][-1]["timestamp"]) -
|
||||
parser.parse(result[1][0]["timestamp"])).seconds
|
||||
samples_int = (parser.parse(result[0][-1]["timestamp"])
|
||||
- parser.parse(result[1][0]["timestamp"])).seconds
|
||||
# NOTE(idegtiarov): here we check that interval between last sample in
|
||||
# first batch and first sample in second batch is equal 60 sec.
|
||||
self.assertEqual(60, samples_int)
|
||||
|
@ -69,8 +69,6 @@ class ManilaSharesTestCase(test.ScenarioTestCase):
|
||||
"params": {
|
||||
"share_proto": "cifs",
|
||||
"size": 4,
|
||||
"share_network": "foo",
|
||||
"share_type": "bar",
|
||||
"snapshot_id": "snapshot_foo",
|
||||
"description": "foo_description",
|
||||
"metadata": {"foo_metadata": "foo"},
|
||||
@ -122,8 +120,6 @@ class ManilaSharesTestCase(test.ScenarioTestCase):
|
||||
"params": {
|
||||
"share_proto": "cifs",
|
||||
"size": 4,
|
||||
"share_network": "foo",
|
||||
"share_type": "bar",
|
||||
"snapshot_id": "snapshot_foo",
|
||||
"description": "foo_description",
|
||||
"metadata": {"foo_metadata": "foo"},
|
||||
@ -182,8 +178,6 @@ class ManilaSharesTestCase(test.ScenarioTestCase):
|
||||
"access_level": "ro",
|
||||
"share_proto": "cifs",
|
||||
"size": 4,
|
||||
"share_network": "foo",
|
||||
"share_type": "bar",
|
||||
"snapshot_id": "snapshot_foo",
|
||||
"description": "foo_description",
|
||||
"metadata": {"foo_metadata": "foo"},
|
||||
|
@ -84,8 +84,8 @@ class DDTDecoratorCheckerTestCase(test.TestCase):
|
||||
|
||||
for dirname, dirnames, filenames in os.walk(self.tests_path):
|
||||
for filename in filenames:
|
||||
if not (filename.startswith("test_") and
|
||||
filename.endswith(".py")):
|
||||
if not (filename.startswith("test_")
|
||||
and filename.endswith(".py")):
|
||||
continue
|
||||
|
||||
filename = os.path.relpath(os.path.join(dirname, filename))
|
||||
|
@ -192,8 +192,8 @@ class FuncMockArgsDecoratorsChecker(ast.NodeVisitor):
|
||||
if funcname == "mock.patch":
|
||||
decname = self._get_value(decorator.args[0])
|
||||
elif funcname == "mock.patch.object":
|
||||
decname = (self._get_name(decorator.args[0]) + "." +
|
||||
self._get_value(decorator.args[1]))
|
||||
decname = (self._get_name(decorator.args[0]) + "."
|
||||
+ self._get_value(decorator.args[1]))
|
||||
else:
|
||||
continue
|
||||
|
||||
@ -306,8 +306,8 @@ class MockUsageCheckerTestCase(test.TestCase):
|
||||
|
||||
for dirname, dirnames, filenames in os.walk(self.tests_path):
|
||||
for filename in filenames:
|
||||
if (not filename.startswith("test_") or
|
||||
not filename.endswith(".py")):
|
||||
if (not filename.startswith("test_")
|
||||
or not filename.endswith(".py")):
|
||||
continue
|
||||
|
||||
filename = os.path.relpath(os.path.join(dirname, filename))
|
||||
|
@ -153,7 +153,7 @@ class FlavorTestCase(test.TestCase):
|
||||
resource_spec=resource_spec, config={})
|
||||
|
||||
def test_preprocess_by_regex(self):
|
||||
resource_spec = {"regex": "m(1|2)\.nano"}
|
||||
resource_spec = {"regex": r"m(1|2)\.nano"}
|
||||
flavor_id = self.type_cls.pre_process(
|
||||
resource_spec=resource_spec, config={})
|
||||
self.assertEqual("42", flavor_id)
|
||||
|
Loading…
x
Reference in New Issue
Block a user