Fix E265 violations and re-enable gating

E265 got stricter in hacking 0.9, so fix new violations and
re-enable gating.

Change-Id: I64159facb0698c19ea0689e9b1dd5266615ddb45
This commit is contained in:
Igor Degtiarov 2014-06-25 13:18:42 +03:00
parent 831c303815
commit a4ad787b3f
20 changed files with 65 additions and 67 deletions

View File

@ -41,8 +41,8 @@ class CombinationEvaluator(evaluator.Evaluator):
"""Ensure there is sufficient data for evaluation, """Ensure there is sufficient data for evaluation,
transitioning to unknown otherwise. transitioning to unknown otherwise.
""" """
#note(sileht): alarm can be evaluated only with # note(sileht): alarm can be evaluated only with
#stable state of other alarm # stable state of other alarm
alarms_missing_states = [alarm_id for alarm_id, state in states alarms_missing_states = [alarm_id for alarm_id, state in states
if not state or state == evaluator.UNKNOWN] if not state or state == evaluator.UNKNOWN]
sufficient = len(alarms_missing_states) == 0 sufficient = len(alarms_missing_states) == 0

View File

@ -190,7 +190,7 @@ class Query(_Base):
field = wtypes.text field = wtypes.text
"The name of the field to test" "The name of the field to test"
#op = wsme.wsattr(operation_kind, default='eq') # op = wsme.wsattr(operation_kind, default='eq')
# this ^ doesn't seem to work. # this ^ doesn't seem to work.
op = wsme.wsproperty(operation_kind, get_op, set_op) op = wsme.wsproperty(operation_kind, get_op, set_op)
"The comparison operator. Defaults to 'eq'." "The comparison operator. Defaults to 'eq'."
@ -434,9 +434,9 @@ def _validate_timestamp_fields(query, field_name, operator_list,
for item in query: for item in query:
if item.field == field_name: if item.field == field_name:
#If *timestamp* or *search_offset* field was specified in the # If *timestamp* or *search_offset* field was specified in the
#query, but timestamp is not supported on that resource, on # query, but timestamp is not supported on that resource, on
#which the query was invoked, then raise an exception. # which the query was invoked, then raise an exception.
if not allow_timestamps: if not allow_timestamps:
raise wsme.exc.UnknownArgument(field_name, raise wsme.exc.UnknownArgument(field_name,
"not valid for " + "not valid for " +
@ -1006,7 +1006,7 @@ class MetersController(rest.RestController):
""" """
q = q or [] q = q or []
#Timestamp field is not supported for Meter queries # Timestamp field is not supported for Meter queries
kwargs = _query_to_kwargs(q, pecan.request.storage_conn.get_meters, kwargs = _query_to_kwargs(q, pecan.request.storage_conn.get_meters,
allow_timestamps=False) allow_timestamps=False)
return [Meter.from_db_model(m) return [Meter.from_db_model(m)
@ -1503,8 +1503,8 @@ class AlarmThresholdRule(_Base):
meter_name = wsme.wsattr(wtypes.text, mandatory=True) meter_name = wsme.wsattr(wtypes.text, mandatory=True)
"The name of the meter" "The name of the meter"
#FIXME(sileht): default doesn't work # FIXME(sileht): default doesn't work
#workaround: default is set in validate method # workaround: default is set in validate method
query = wsme.wsattr([Query], default=[]) query = wsme.wsattr([Query], default=[])
"""The query to find the data for computing statistics. """The query to find the data for computing statistics.
Ownership settings are automatically included based on the Alarm owner. Ownership settings are automatically included based on the Alarm owner.
@ -1538,15 +1538,15 @@ class AlarmThresholdRule(_Base):
@staticmethod @staticmethod
def validate(threshold_rule): def validate(threshold_rule):
#note(sileht): wsme default doesn't work in some case # note(sileht): wsme default doesn't work in some case
#workaround for https://bugs.launchpad.net/wsme/+bug/1227039 # workaround for https://bugs.launchpad.net/wsme/+bug/1227039
if not threshold_rule.query: if not threshold_rule.query:
threshold_rule.query = [] threshold_rule.query = []
#Timestamp is not allowed for AlarmThresholdRule query, as the alarm # Timestamp is not allowed for AlarmThresholdRule query, as the alarm
#evaluator will construct timestamp bounds for the sequence of # evaluator will construct timestamp bounds for the sequence of
#statistics queries as the sliding evaluation window advances # statistics queries as the sliding evaluation window advances
#over time. # over time.
_validate_query(threshold_rule.query, storage.SampleFilter.__init__, _validate_query(threshold_rule.query, storage.SampleFilter.__init__,
allow_timestamps=False) allow_timestamps=False)
return threshold_rule return threshold_rule
@ -2139,7 +2139,7 @@ class AlarmsController(rest.RestController):
:param q: Filter rules for the alarms to be returned. :param q: Filter rules for the alarms to be returned.
""" """
q = q or [] q = q or []
#Timestamp is not supported field for Simple Alarm queries # Timestamp is not supported field for Simple Alarm queries
kwargs = _query_to_kwargs(q, kwargs = _query_to_kwargs(q,
pecan.request.storage_conn.get_alarms, pecan.request.storage_conn.get_alarms,
allow_timestamps=False) allow_timestamps=False)

View File

@ -25,7 +25,7 @@ import sys
import eventlet import eventlet
# NOTE(jd) We need to monkey patch the socket and select module for, # NOTE(jd) We need to monkey patch the socket and select module for,
# at least, oslo.messaging, otherwise everything's blocked on its # at least, oslo.messaging, otherwise everything's blocked on its
# first read() or select(), thread need to be patched too, because # first read() or select(), thread need to be patched too, because
# oslo.messaging use threading.local # oslo.messaging use threading.local
eventlet.monkey_patch(socket=True, select=True, thread=True) eventlet.monkey_patch(socket=True, select=True, thread=True)

View File

@ -51,20 +51,20 @@ def parse_snmp_return(ret):
class SNMPInspector(base.Inspector): class SNMPInspector(base.Inspector):
#CPU OIDs # CPU OIDs
_cpu_1_min_load_oid = "1.3.6.1.4.1.2021.10.1.3.1" _cpu_1_min_load_oid = "1.3.6.1.4.1.2021.10.1.3.1"
_cpu_5_min_load_oid = "1.3.6.1.4.1.2021.10.1.3.2" _cpu_5_min_load_oid = "1.3.6.1.4.1.2021.10.1.3.2"
_cpu_15_min_load_oid = "1.3.6.1.4.1.2021.10.1.3.3" _cpu_15_min_load_oid = "1.3.6.1.4.1.2021.10.1.3.3"
#Memory OIDs # Memory OIDs
_memory_total_oid = "1.3.6.1.4.1.2021.4.5.0" _memory_total_oid = "1.3.6.1.4.1.2021.4.5.0"
_memory_used_oid = "1.3.6.1.4.1.2021.4.6.0" _memory_used_oid = "1.3.6.1.4.1.2021.4.6.0"
#Disk OIDs # Disk OIDs
_disk_index_oid = "1.3.6.1.4.1.2021.9.1.1" _disk_index_oid = "1.3.6.1.4.1.2021.9.1.1"
_disk_path_oid = "1.3.6.1.4.1.2021.9.1.2" _disk_path_oid = "1.3.6.1.4.1.2021.9.1.2"
_disk_device_oid = "1.3.6.1.4.1.2021.9.1.3" _disk_device_oid = "1.3.6.1.4.1.2021.9.1.3"
_disk_size_oid = "1.3.6.1.4.1.2021.9.1.6" _disk_size_oid = "1.3.6.1.4.1.2021.9.1.6"
_disk_used_oid = "1.3.6.1.4.1.2021.9.1.8" _disk_used_oid = "1.3.6.1.4.1.2021.9.1.8"
#Network Interface OIDs # Network Interface OIDs
_interface_index_oid = "1.3.6.1.2.1.2.2.1.1" _interface_index_oid = "1.3.6.1.2.1.2.2.1.1"
_interface_name_oid = "1.3.6.1.2.1.2.2.1.2" _interface_name_oid = "1.3.6.1.2.1.2.2.1.2"
_interface_bandwidth_oid = "1.3.6.1.2.1.2.2.1.5" _interface_bandwidth_oid = "1.3.6.1.2.1.2.2.1.5"
@ -73,7 +73,7 @@ class SNMPInspector(base.Inspector):
_interface_received_oid = "1.3.6.1.2.1.2.2.1.10" _interface_received_oid = "1.3.6.1.2.1.2.2.1.10"
_interface_transmitted_oid = "1.3.6.1.2.1.2.2.1.16" _interface_transmitted_oid = "1.3.6.1.2.1.2.2.1.16"
_interface_error_oid = "1.3.6.1.2.1.2.2.1.20" _interface_error_oid = "1.3.6.1.2.1.2.2.1.20"
#Default port and security name # Default port and security name
_port = 161 _port = 161
_security_name = 'public' _security_name = 'public'
@ -107,13 +107,13 @@ class SNMPInspector(base.Inspector):
return self._get_or_walk_oid(oid, host, False) return self._get_or_walk_oid(oid, host, False)
def inspect_cpu(self, host): def inspect_cpu(self, host):
#get 1 minute load # get 1 minute load
cpu_1_min_load = \ cpu_1_min_load = \
str(self._get_value_from_oid(self._cpu_1_min_load_oid, host)) str(self._get_value_from_oid(self._cpu_1_min_load_oid, host))
#get 5 minute load # get 5 minute load
cpu_5_min_load = \ cpu_5_min_load = \
str(self._get_value_from_oid(self._cpu_5_min_load_oid, host)) str(self._get_value_from_oid(self._cpu_5_min_load_oid, host))
#get 15 minute load # get 15 minute load
cpu_15_min_load = \ cpu_15_min_load = \
str(self._get_value_from_oid(self._cpu_15_min_load_oid, host)) str(self._get_value_from_oid(self._cpu_15_min_load_oid, host))
@ -122,9 +122,9 @@ class SNMPInspector(base.Inspector):
cpu_15_min=float(cpu_15_min_load)) cpu_15_min=float(cpu_15_min_load))
def inspect_memory(self, host): def inspect_memory(self, host):
#get total memory # get total memory
total = self._get_value_from_oid(self._memory_total_oid, host) total = self._get_value_from_oid(self._memory_total_oid, host)
#get used memory # get used memory
used = self._get_value_from_oid(self._memory_used_oid, host) used = self._get_value_from_oid(self._memory_used_oid, host)
yield base.MemoryStats(total=int(total), used=int(used)) yield base.MemoryStats(total=int(total), used=int(used))

View File

@ -45,11 +45,11 @@ class _Base(plugin.CentralPollster):
def _get_images(self, ksclient): def _get_images(self, ksclient):
client = self.get_glance_client(ksclient) client = self.get_glance_client(ksclient)
#TODO(eglynn): use pagination to protect against unbounded # TODO(eglynn): use pagination to protect against unbounded
# memory usage # memory usage
rawImageList = list(itertools.chain( rawImageList = list(itertools.chain(
client.images.list(filters={"is_public": True}), client.images.list(filters={"is_public": True}),
#TODO(eglynn): extend glance API with all_tenants logic to # TODO(eglynn): extend glance API with all_tenants logic to
# avoid second call to retrieve private images # avoid second call to retrieve private images
client.images.list(filters={"is_public": False}))) client.images.list(filters={"is_public": False})))

View File

@ -60,7 +60,7 @@ class NetworkNotificationBase(plugin.NotificationBase):
# it may mean we miss charging for some amount of time, # it may mean we miss charging for some amount of time,
# but it is better than throwing away the existing # but it is better than throwing away the existing
# metadata for a resource when it is deleted. # metadata for a resource when it is deleted.
##'%s.delete.start' % (self.resource_name), # '%s.delete.start' % (self.resource_name),
] ]
@staticmethod @staticmethod

View File

@ -60,7 +60,7 @@ class NotificationBase(PluginBase):
:param conf: Configuration. :param conf: Configuration.
""" """
#TODO(sileht): Backwards compatibility, remove in J+1 # TODO(sileht): Backwards compatibility, remove in J+1
if hasattr(self, 'get_exchange_topics'): if hasattr(self, 'get_exchange_topics'):
LOG.warn(_('get_exchange_topics API of NotificationPlugin is' LOG.warn(_('get_exchange_topics API of NotificationPlugin is'
'deprecated, implements get_targets instead.')) 'deprecated, implements get_targets instead.'))
@ -114,8 +114,8 @@ class NotificationBase(PluginBase):
""" """
#TODO(sileht): this will be moved into oslo.messaging # TODO(sileht): this will be moved into oslo.messaging
#see oslo.messaging bp notification-dispatcher-filter # see oslo.messaging bp notification-dispatcher-filter
if not self._handle_event_type(notification['event_type'], if not self._handle_event_type(notification['event_type'],
self.event_types): self.event_types):
return return

View File

@ -62,7 +62,7 @@ def _handle_sort_key(model_name, sort_key=None):
if not sort_key: if not sort_key:
return sort_keys return sort_keys
# NOTE(Fengqian): We need to put the sort key from user # NOTE(Fengqian): We need to put the sort key from user
#in the first place of sort keys list. # in the first place of sort keys list.
try: try:
sort_keys.remove(sort_key) sort_keys.remove(sort_key)
except ValueError: except ValueError:

View File

@ -333,7 +333,7 @@ class Connection(base.Connection):
data['counter_name'], data['counter_type'], data['counter_name'], data['counter_type'],
data['counter_unit'], rts, data['source']) data['counter_unit'], rts, data['source'])
#TODO(nprivalova): try not to store resource_id # TODO(nprivalova): try not to store resource_id
resource = hbase_utils.serialize_entry(**{ resource = hbase_utils.serialize_entry(**{
'source': data['source'], 'source': data['source'],
'meter': {new_meter: data['timestamp']}, 'meter': {new_meter: data['timestamp']},
@ -348,7 +348,7 @@ class Connection(base.Connection):
ts = int(time.mktime(data['timestamp'].timetuple()) * 1000) ts = int(time.mktime(data['timestamp'].timetuple()) * 1000)
resource_table.put(data['resource_id'], resource, ts) resource_table.put(data['resource_id'], resource, ts)
#TODO(nprivalova): improve uniqueness # TODO(nprivalova): improve uniqueness
# Rowkey consists of reversed timestamp, meter and an md5 of # Rowkey consists of reversed timestamp, meter and an md5 of
# user+resource+project for purposes of uniqueness # user+resource+project for purposes of uniqueness
m = hashlib.md5() m = hashlib.md5()

View File

@ -18,8 +18,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""MongoDB storage backend """MongoDB storage backend"""
"""
import calendar import calendar
import copy import copy
@ -607,14 +606,15 @@ class Connection(pymongo_base.Connection):
sort_criteria_list = [] sort_criteria_list = []
for i in range(len(sort_keys)): for i in range(len(sort_keys)):
#NOTE(fengqian): Generate the query criteria recursively. # NOTE(fengqian): Generate the query criteria recursively.
#sort_keys=[k1, k2, k3], maker_value=[v1, v2, v3] # sort_keys=[k1, k2, k3], maker_value=[v1, v2, v3]
#sort_flags = ['$lt', '$gt', 'lt']. # sort_flags = ['$lt', '$gt', 'lt'].
#The query criteria should be # The query criteria should be
#{'k3': {'$lt': 'v3'}, 'k2': {'eq': 'v2'}, 'k1': {'eq': 'v1'}}, # {'k3': {'$lt': 'v3'}, 'k2': {'eq': 'v2'}, 'k1':
#{'k2': {'$gt': 'v2'}, 'k1': {'eq': 'v1'}}, # {'eq': 'v1'}},
#{'k1': {'$lt': 'v1'}} with 'OR' operation. # {'k2': {'$gt': 'v2'}, 'k1': {'eq': 'v1'}},
#Each recurse will generate one items of three. # {'k1': {'$lt': 'v1'}} with 'OR' operation.
# Each recurse will generate one items of three.
sort_criteria_list.append(cls._recurse_sort_keys( sort_criteria_list.append(cls._recurse_sort_keys(
sort_keys[:(len(sort_keys) - i)], sort_keys[:(len(sort_keys) - i)],
marker, _op)) marker, _op))
@ -673,9 +673,9 @@ class Connection(pymongo_base.Connection):
sort_dir) sort_dir)
q.update(query) q.update(query)
#NOTE(Fengqian):MongoDB collection.find can not handle limit # NOTE(Fengqian): MongoDB collection.find can not handle limit
#when it equals None, it will raise TypeError, so we treate # when it equals None, it will raise TypeError, so we treat
#None as 0 for the value of limit. # None as 0 for the value of limit.
if limit is None: if limit is None:
limit = 0 limit = 0
return db_collection.find(q, limit=limit, sort=all_sort) return db_collection.find(q, limit=limit, sort=all_sort)

View File

@ -365,8 +365,8 @@ class Connection(base.Connection):
@staticmethod @staticmethod
def _decode_matching_metadata(matching_metadata): def _decode_matching_metadata(matching_metadata):
if isinstance(matching_metadata, dict): if isinstance(matching_metadata, dict):
#note(sileht): keep compatibility with alarm # note(sileht): keep compatibility with alarm
#with matching_metadata as a dict # with matching_metadata as a dict
return matching_metadata return matching_metadata
else: else:
new_matching_metadata = {} new_matching_metadata = {}

View File

@ -16,7 +16,7 @@ from sqlalchemy import Index, MetaData, Table
INDEXES = { INDEXES = {
#`table_name`: ((`index_name`, `column`),) # `table_name`: ((`index_name`, `column`),)
"user": (('ix_user_id', 'id'),), "user": (('ix_user_id', 'id'),),
"source": (('ix_source_id', 'id'),), "source": (('ix_source_id', 'id'),),
"project": (('ix_project_id', 'id'),), "project": (('ix_project_id', 'id'),),

View File

@ -81,7 +81,7 @@ def downgrade(migrate_engine):
for row in table.select().execute().fetchall(): for row in table.select().execute().fetchall():
if row.type != 'threshold': if row.type != 'threshold':
#note: type insupported in previous version # note: type insupported in previous version
table.delete().where(table.c.id == row.id).execute() table.delete().where(table.c.id == row.id).execute()
else: else:
rule = json.loads(row.rule) rule = json.loads(row.rule)
@ -93,7 +93,7 @@ def downgrade(migrate_engine):
'meter_name': int(rule['mater_name']), 'meter_name': int(rule['mater_name']),
'matching_metadata': {}} 'matching_metadata': {}}
#note: op are ignored because previous format don't support it # note: op are ignored because previous format don't support it
for q in rule['query']: for q in rule['query']:
values['matching_metadata'][q['field']] = q['value'] values['matching_metadata'][q['field']] = q['value']
values['matching_metadata'] = json.dumps( values['matching_metadata'] = json.dumps(

View File

@ -105,7 +105,7 @@ def upgrade(migrate_engine):
_alter_sourceassoc(meta, 'meter', 'idx_sm') _alter_sourceassoc(meta, 'meter', 'idx_sm')
sourceassoc = sa.Table('sourceassoc', meta, autoload=True) sourceassoc = sa.Table('sourceassoc', meta, autoload=True)
sourceassoc.c.meter_id.alter(name='sample_id') sourceassoc.c.meter_id.alter(name='sample_id')
#re-bind metadata to pick up alter name change # re-bind metadata to pick up alter name change
meta = sa.MetaData(bind=migrate_engine) meta = sa.MetaData(bind=migrate_engine)
_alter_sourceassoc(meta, 'sample', 'idx_ss', True) _alter_sourceassoc(meta, 'sample', 'idx_ss', True)

View File

@ -66,7 +66,7 @@ class TestCoordinate(tests_base.BaseTestCase):
self.mock_utcnow.return_value = self.override_start self.mock_utcnow.return_value = self.override_start
self.partition_coordinator = coordination.PartitionCoordinator() self.partition_coordinator = coordination.PartitionCoordinator()
self.partition_coordinator.coordination_rpc = mock.Mock() self.partition_coordinator.coordination_rpc = mock.Mock()
#add extra logger to check exception conditions and logged content # add extra logger to check exception conditions and logged content
self.str_handler = MockLoggingHandler() self.str_handler = MockLoggingHandler()
coordination.LOG.logger.addHandler(self.str_handler) coordination.LOG.logger.addHandler(self.str_handler)

View File

@ -307,7 +307,7 @@ class TestTraitDefinition(ConverterBase):
jsonpath_rw.parse('(payload.test)|(payload.other)')) jsonpath_rw.parse('(payload.test)|(payload.other)'))
def test_invalid_path_config(self): def test_invalid_path_config(self):
#test invalid jsonpath... # test invalid jsonpath...
cfg = dict(fields='payload.bogus(') cfg = dict(fields='payload.bogus(')
self.assertRaises(converter.EventDefinitionException, self.assertRaises(converter.EventDefinitionException,
converter.TraitDefinition, converter.TraitDefinition,
@ -316,7 +316,7 @@ class TestTraitDefinition(ConverterBase):
self.fake_plugin_mgr) self.fake_plugin_mgr)
def test_invalid_plugin_config(self): def test_invalid_plugin_config(self):
#test invalid jsonpath... # test invalid jsonpath...
cfg = dict(fields='payload.test', plugin=dict(bogus="true")) cfg = dict(fields='payload.test', plugin=dict(bogus="true"))
self.assertRaises(converter.EventDefinitionException, self.assertRaises(converter.EventDefinitionException,
converter.TraitDefinition, converter.TraitDefinition,
@ -325,7 +325,7 @@ class TestTraitDefinition(ConverterBase):
self.fake_plugin_mgr) self.fake_plugin_mgr)
def test_unknown_plugin(self): def test_unknown_plugin(self):
#test invalid jsonpath... # test invalid jsonpath...
cfg = dict(fields='payload.test', plugin=dict(name='bogus')) cfg = dict(fields='payload.test', plugin=dict(name='bogus'))
self.assertRaises(converter.EventDefinitionException, self.assertRaises(converter.EventDefinitionException,
converter.TraitDefinition, converter.TraitDefinition,
@ -351,7 +351,7 @@ class TestTraitDefinition(ConverterBase):
self.assertEqual(models.Trait.DATETIME_TYPE, t.trait_type) self.assertEqual(models.Trait.DATETIME_TYPE, t.trait_type)
def test_invalid_type_config(self): def test_invalid_type_config(self):
#test invalid jsonpath... # test invalid jsonpath...
cfg = dict(type='bogus', fields='payload.test') cfg = dict(type='bogus', fields='payload.test')
self.assertRaises(converter.EventDefinitionException, self.assertRaises(converter.EventDefinitionException,
converter.TraitDefinition, converter.TraitDefinition,

View File

@ -54,8 +54,8 @@ class MongoDBConnection(tests_db.TestBase):
@tests_db.run_with('mongodb') @tests_db.run_with('mongodb')
class MongoDBTestMarkerBase(test_storage_scenarios.DBTestBase): class MongoDBTestMarkerBase(test_storage_scenarios.DBTestBase):
#NOTE(Fengqian): All these three test case are the same for resource # NOTE(Fengqian): All these three test case are the same for resource
#and meter collection. As to alarm, we will set up in AlarmTestPagination. # and meter collection. As to alarm, we will set up in AlarmTestPagination.
def test_get_marker(self): def test_get_marker(self):
marker_pairs = {'user_id': 'user-id-4'} marker_pairs = {'user_id': 'user-id-4'}
ret = impl_mongodb.Connection._get_marker(self.conn.db.resource, ret = impl_mongodb.Connection._get_marker(self.conn.db.resource,

View File

@ -2868,8 +2868,7 @@ class GetEventTest(EventTestBase):
def test_get_traits(self): def test_get_traits(self):
traits = self.conn.get_traits("Bar") traits = self.conn.get_traits("Bar")
#format results in a way that makes them easier to # format results in a way that makes them easier to work with
#work with
trait_dict = {} trait_dict = {}
for trait in traits: for trait in traits:
trait_dict[trait.name] = trait.dtype trait_dict[trait.name] = trait.dtype

View File

@ -216,7 +216,7 @@ class AggregatorTransformer(ScalingTransformer):
def _get_unique_key(self, s): def _get_unique_key(self, s):
non_aggregated_keys = "-".join([getattr(s, field) non_aggregated_keys = "-".join([getattr(s, field)
for field in self.key_attributes]) for field in self.key_attributes])
#NOTE(sileht): it assumes, a meter always have the same unit/type # NOTE(sileht): it assumes, a meter always have the same unit/type
return "%s-%s-%s" % (s.name, s.resource_id, non_aggregated_keys) return "%s-%s-%s" % (s.name, s.resource_id, non_aggregated_keys)
def handle_sample(self, context, sample): def handle_sample(self, context, sample):

View File

@ -37,13 +37,12 @@ commands = python setup.py build_sphinx
commands = {posargs} commands = {posargs}
[flake8] [flake8]
# E265 block comment should start with #
# F402 import module shadowed by loop variable # F402 import module shadowed by loop variable
# H305 imports not grouped correctly # H305 imports not grouped correctly
# H307 like imports should be grouped together # H307 like imports should be grouped together
# H405 multi line docstring summary not separated with an empty line # H405 multi line docstring summary not separated with an empty line
# H904 Wrap long lines in parentheses instead of a backslash # H904 Wrap long lines in parentheses instead of a backslash
ignore = E265,F402,H305,H307,H405,H904 ignore = F402,H305,H307,H405,H904
builtins = _ builtins = _
exclude=.venv,.git,.tox,dist,doc,./ceilometer/openstack/common,*lib/python*,*egg,tools,nova_tests,build exclude=.venv,.git,.tox,dist,doc,./ceilometer/openstack/common,*lib/python*,*egg,tools,nova_tests,build
show-source = True show-source = True