Remove six

Remove six.moves Replace the following items with Python 3 style code.

- six.moves.range
- six.moves.xrange
- six.text_type
- six.PY2
- six.PY3
- six.moves.urllib
- six.string_types
- six.iteritems
- six.add_metaclass
- six.moves.map
- six.with_metaclass
- six.iterkeys
- six.itervalues

Change-Id: I469b49772899672c05f869c6870512fddfc6183d
This commit is contained in:
wangzihao 2020-10-21 10:39:23 +08:00 committed by wdd
parent 1c5797631e
commit a2d9f3ce50
37 changed files with 101 additions and 165 deletions

View File

@ -31,9 +31,8 @@ from oslo_utils import uuidutils
import pecan import pecan
from pecan import rest from pecan import rest
import pytz import pytz
import six
from six.moves.urllib import parse as urlparse
from stevedore import extension from stevedore import extension
from urllib import parse as urlparse
import wsme import wsme
from wsme import types as wtypes from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan import wsmeext.pecan as wsme_pecan
@ -129,7 +128,7 @@ def is_over_quota(conn, project_id, user_id):
class CronType(wtypes.UserType): class CronType(wtypes.UserType):
"""A user type that represents a cron format.""" """A user type that represents a cron format."""
basetype = six.string_types basetype = str
name = 'cron' name = 'cron'
@staticmethod @staticmethod
@ -215,7 +214,7 @@ class Alarm(base.Base):
rule = getattr(self, '%s_rule' % self.type, None) rule = getattr(self, '%s_rule' % self.type, None)
if not self._description: if not self._description:
if hasattr(rule, 'default_description'): if hasattr(rule, 'default_description'):
return six.text_type(rule.default_description) return str(rule.default_description)
return "%s alarm rule" % self.type return "%s alarm rule" % self.type
return self._description return self._description
@ -549,7 +548,7 @@ def stringify_timestamps(data):
"""Stringify any datetimes in given dict.""" """Stringify any datetimes in given dict."""
return dict((k, v.isoformat() return dict((k, v.isoformat()
if isinstance(v, datetime.datetime) else v) if isinstance(v, datetime.datetime) else v)
for (k, v) in six.iteritems(data)) for (k, v) in data.items())
@profiler.trace_cls('api') @profiler.trace_cls('api')

View File

@ -25,7 +25,6 @@ import functools
from oslo_utils import strutils from oslo_utils import strutils
from oslo_utils import timeutils from oslo_utils import timeutils
import pecan import pecan
import six
import wsme import wsme
from wsme import types as wtypes from wsme import types as wtypes
@ -119,7 +118,7 @@ class Query(Base):
'float': float, 'float': float,
'boolean': functools.partial( 'boolean': functools.partial(
strutils.bool_from_string, strict=True), strutils.bool_from_string, strict=True),
'string': six.text_type, 'string': str,
'datetime': timeutils.parse_isotime} 'datetime': timeutils.parse_isotime}
_op = None # provide a default _op = None # provide a default

View File

@ -20,7 +20,6 @@
import pecan import pecan
from pecan import rest from pecan import rest
import six
from wsme import types as wtypes from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan import wsmeext.pecan as wsme_pecan
@ -36,7 +35,7 @@ def _decode_unicode(input):
# predictable insertion order to avoid inconsistencies in the # predictable insertion order to avoid inconsistencies in the
# message signature computation for equivalent payloads modulo # message signature computation for equivalent payloads modulo
# ordering # ordering
for key, value in sorted(six.iteritems(input)): for key, value in sorted(input.items()):
temp[_decode_unicode(key)] = _decode_unicode(value) temp[_decode_unicode(key)] = _decode_unicode(value)
return temp return temp
elif isinstance(input, (tuple, list)): elif isinstance(input, (tuple, list)):
@ -44,7 +43,7 @@ def _decode_unicode(input):
# the tuple would become list. So we have to generate the value as # the tuple would become list. So we have to generate the value as
# list here. # list here.
return [_decode_unicode(element) for element in input] return [_decode_unicode(element) for element in input]
elif isinstance(input, six.text_type): elif isinstance(input, str):
return input.encode('utf-8') return input.encode('utf-8')
else: else:
return input return input
@ -52,7 +51,7 @@ def _decode_unicode(input):
def _recursive_keypairs(d, separator=':'): def _recursive_keypairs(d, separator=':'):
"""Generator that produces sequence of keypairs for nested dictionaries.""" """Generator that produces sequence of keypairs for nested dictionaries."""
for name, value in sorted(six.iteritems(d)): for name, value in sorted(d.items()):
if isinstance(value, dict): if isinstance(value, dict):
for subname, subvalue in _recursive_keypairs(value, separator): for subname, subvalue in _recursive_keypairs(value, separator):
yield ('%s%s%s' % (name, separator, subname), subvalue) yield ('%s%s%s' % (name, separator, subname), subvalue)

View File

@ -23,8 +23,7 @@ import datetime
from oslo_utils import timeutils from oslo_utils import timeutils
import pecan import pecan
import six from urllib import parse as urllib_parse
from six.moves.urllib import parse as urllib_parse
import wsme import wsme
from aodh.api.controllers.v2 import base from aodh.api.controllers.v2 import base
@ -294,8 +293,6 @@ def set_resp_location_hdr(location):
# str in py2 and py3 even this is not the same thing in both # str in py2 and py3 even this is not the same thing in both
# version # version
# see: http://legacy.python.org/dev/peps/pep-3333/#unicode-issues # see: http://legacy.python.org/dev/peps/pep-3333/#unicode-issues
if six.PY2 and isinstance(location, six.text_type):
location = location.encode('utf-8')
location = urllib_parse.quote(location) location = urllib_parse.quote(location)
pecan.response.headers['Location'] = location pecan.response.headers['Location'] = location

View File

@ -23,7 +23,6 @@ import json
from lxml import etree from lxml import etree
from oslo_log import log from oslo_log import log
import six
import webob import webob
from aodh import i18n from aodh import i18n
@ -101,12 +100,10 @@ class ParsableErrorMiddleware(object):
LOG.error('Error parsing HTTP response: %s', err) LOG.error('Error parsing HTTP response: %s', err)
error_message = state['status_code'] error_message = state['status_code']
body = '<error_message>%s</error_message>' % error_message body = '<error_message>%s</error_message>' % error_message
if six.PY3:
body = body.encode('utf-8') body = body.encode('utf-8')
else: else:
content_type = 'application/json' content_type = 'application/json'
app_data = b'\n'.join(app_iter) app_data = b'\n'.join(app_iter)
if six.PY3:
app_data = app_data.decode('utf-8') app_data = app_data.decode('utf-8')
try: try:
fault = json.loads(app_data) fault = json.loads(app_data)
@ -116,7 +113,6 @@ class ParsableErrorMiddleware(object):
except ValueError: except ValueError:
fault = app_data fault = app_data
body = json.dumps({'error_message': fault}) body = json.dumps({'error_message': fault})
if six.PY3:
body = body.encode('utf-8') body = body.encode('utf-8')
state['headers'].append(('Content-Length', str(len(body)))) state['headers'].append(('Content-Length', str(len(body))))

View File

@ -20,7 +20,6 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
from oslo_utils import encodeutils from oslo_utils import encodeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six
import tenacity import tenacity
import tooz.coordination import tooz.coordination
@ -73,7 +72,7 @@ class HashRing(object):
self._sorted_keys = [] self._sorted_keys = []
for node in nodes: for node in nodes:
for r in six.moves.range(replicas): for r in range(replicas):
hashed_key = self._hash('%s-%s' % (node, r)) hashed_key = self._hash('%s-%s' % (node, r))
self._ring[hashed_key] = node self._ring[hashed_key] = node
self._sorted_keys.append(hashed_key) self._sorted_keys.append(hashed_key)

View File

@ -28,7 +28,6 @@ from oslo_log import log
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
import pytz import pytz
import six
from stevedore import extension from stevedore import extension
import aodh import aodh
@ -55,8 +54,7 @@ OPTS = [
] ]
@six.add_metaclass(abc.ABCMeta) class Evaluator(object, metaclass=abc.ABCMeta):
class Evaluator(object):
"""Base class for alarm rule evaluator plugins.""" """Base class for alarm rule evaluator plugins."""
def __init__(self, conf): def __init__(self, conf):

View File

@ -15,7 +15,6 @@
import copy import copy
import datetime import datetime
import operator import operator
import six
from ceilometerclient import client as ceiloclient from ceilometerclient import client as ceiloclient
from oslo_log import log from oslo_log import log
@ -182,7 +181,7 @@ class ThresholdEvaluator(evaluator.Evaluator):
' %(limit)s', {'value': value, 'limit': limit}) ' %(limit)s', {'value': value, 'limit': limit})
return op(value, limit) return op(value, limit)
compared = list(six.moves.map(_compare, statistics)) compared = list(map(_compare, statistics))
distilled = all(compared) distilled = all(compared)
unequivocal = distilled or not any(compared) unequivocal = distilled or not any(compared)
number_outside = len([c for c in compared if c]) number_outside = len([c for c in compared if c])

View File

@ -13,7 +13,6 @@
# #
from oslo_log import log from oslo_log import log
import six
import stevedore import stevedore
from aodh import evaluator from aodh import evaluator
@ -87,7 +86,7 @@ class AndOp(object):
return all(self.rule_targets) return all(self.rule_targets)
def __str__(self): def __str__(self):
return '(' + ' and '.join(six.moves.map(str, self.rule_targets)) + ')' return '(' + ' and '.join(map(str, self.rule_targets)) + ')'
__nonzero__ = __bool__ __nonzero__ = __bool__
@ -100,7 +99,7 @@ class OrOp(object):
return any(self.rule_targets) return any(self.rule_targets)
def __str__(self): def __str__(self):
return '(' + ' or '.join(six.moves.map(str, self.rule_targets)) + ')' return '(' + ' or '.join(map(str, self.rule_targets)) + ')'
__nonzero__ = __bool__ __nonzero__ = __bool__

View File

@ -19,7 +19,6 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
from oslo_utils import fnmatch from oslo_utils import fnmatch
from oslo_utils import timeutils from oslo_utils import timeutils
import six
from aodh import evaluator from aodh import evaluator
@ -50,7 +49,7 @@ def _sanitize_trait_value(value, trait_type):
elif trait_type in (4, 'datetime'): elif trait_type in (4, 'datetime'):
return timeutils.normalize_time(timeutils.parse_isotime(value)) return timeutils.normalize_time(timeutils.parse_isotime(value))
else: else:
return six.text_type(value) return str(value)
class InvalidEvent(Exception): class InvalidEvent(Exception):
@ -170,8 +169,7 @@ class EventAlarmEvaluator(evaluator.Evaluator):
'for it.', e) 'for it.', e)
continue continue
for id, alarm in six.iteritems( for id, alarm in self._get_project_alarms(event.project).items():
self._get_project_alarms(event.project)):
try: try:
self._evaluate_alarm(alarm, event) self._evaluate_alarm(alarm, event)
except Exception: except Exception:

View File

@ -19,7 +19,6 @@ from octaviaclient.api.v2 import octavia
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
from oslo_utils import timeutils from oslo_utils import timeutils
import six
from aodh import evaluator from aodh import evaluator
from aodh.evaluator import threshold from aodh.evaluator import threshold
@ -73,7 +72,7 @@ class LoadBalancerMemberHealthEvaluator(evaluator.Evaluator):
ret = self.lb_client.member_list(pool_id) ret = self.lb_client.member_list(pool_id)
except Exception as e: except Exception as e:
LOG.warning("Failed to communicate with load balancing service, " LOG.warning("Failed to communicate with load balancing service, "
"error: %s", six.text_type(e)) "error: %s", str(e))
raise threshold.InsufficientDataError( raise threshold.InsufficientDataError(
'failed to communicate with load balancing service', 'failed to communicate with load balancing service',
[] []

View File

@ -15,7 +15,6 @@
import datetime import datetime
import operator import operator
import six
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
@ -118,7 +117,7 @@ class ThresholdEvaluator(evaluator.Evaluator):
' %(limit)s', {'value': value, 'limit': limit}) ' %(limit)s', {'value': value, 'limit': limit})
return op(value, limit) return op(value, limit)
compared = list(six.moves.map(_compare, statistics)) compared = list(map(_compare, statistics))
distilled = all(compared) distilled = all(compared)
unequivocal = distilled or not any(compared) unequivocal = distilled or not any(compared)
number_outside = len([c for c in compared if c]) number_outside = len([c for c in compared if c])

View File

@ -20,7 +20,6 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
import oslo_messaging import oslo_messaging
from oslo_utils import netutils from oslo_utils import netutils
import six
from stevedore import extension from stevedore import extension
from aodh import messaging from aodh import messaging
@ -40,8 +39,7 @@ OPTS = [
] ]
@six.add_metaclass(abc.ABCMeta) class AlarmNotifier(object, metaclass=abc.ABCMeta):
class AlarmNotifier(object):
"""Base class for alarm notifier plugins.""" """Base class for alarm notifier plugins."""
@staticmethod @staticmethod

View File

@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
from oslo_log import log from oslo_log import log
import six
from aodh import keystone_client as aodh_keystone from aodh import keystone_client as aodh_keystone
from aodh import notifier from aodh import notifier
@ -117,4 +116,4 @@ class TrustHeatAlarmNotifier(notifier.AlarmNotifier):
except Exception as e: except Exception as e:
LOG.exception("Failed to communicate with Heat service for alarm " LOG.exception("Failed to communicate with Heat service for alarm "
"%s, error: %s", "%s, error: %s",
alarm_id, six.text_type(e)) alarm_id, str(e))

View File

@ -19,7 +19,7 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
from oslo_utils import uuidutils from oslo_utils import uuidutils
import requests import requests
import six.moves.urllib.parse as urlparse import urllib.parse as urlparse
from aodh import notifier from aodh import notifier

View File

@ -14,7 +14,7 @@
# under the License. # under the License.
"""Rest alarm notifier with trusted authentication.""" """Rest alarm notifier with trusted authentication."""
from six.moves.urllib import parse from urllib import parse
from aodh import keystone_client from aodh import keystone_client
from aodh.notifier import rest from aodh.notifier import rest

View File

@ -17,7 +17,7 @@
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
import six.moves.urllib.parse as urlparse from urllib import parse as urlparse
from aodh import keystone_client from aodh import keystone_client
from aodh import notifier from aodh import notifier

View File

@ -14,7 +14,6 @@
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
import oslo_messaging import oslo_messaging
import six
from aodh import messaging from aodh import messaging
from aodh.storage import models from aodh.storage import models
@ -53,6 +52,6 @@ class AlarmNotifier(object):
'severity': alarm.severity, 'severity': alarm.severity,
'previous': previous, 'previous': previous,
'current': alarm.state, 'current': alarm.state,
'reason': six.text_type(reason), 'reason': str(reason),
'reason_data': reason_data} 'reason_data': reason_data}
self.notifier.sample({}, 'alarm.update', payload) self.notifier.sample({}, 'alarm.update', payload)

View File

@ -19,9 +19,9 @@ import datetime
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
from oslo_utils import timeutils from oslo_utils import timeutils
import six.moves.urllib.parse as urlparse
from stevedore import driver from stevedore import driver
import tenacity import tenacity
from urllib import parse as urlparse
_NAMESPACE = 'aodh.storage' _NAMESPACE = 'aodh.storage'

View File

@ -16,8 +16,6 @@
""" """
import copy import copy
import six
import aodh import aodh
from aodh.utils import get_func_valid_keys from aodh.utils import get_func_valid_keys
@ -28,7 +26,7 @@ def update_nested(original_dict, updates):
Updates occur without replacing entire sub-dicts. Updates occur without replacing entire sub-dicts.
""" """
dict_to_update = copy.deepcopy(original_dict) dict_to_update = copy.deepcopy(original_dict)
for key, value in six.iteritems(updates): for key, value in updates.items():
if isinstance(value, dict): if isinstance(value, dict):
sub_dict = update_nested(dict_to_update.get(key, {}), value) sub_dict = update_nested(dict_to_update.get(key, {}), value)
dict_to_update[key] = sub_dict dict_to_update[key] = sub_dict
@ -38,11 +36,11 @@ def update_nested(original_dict, updates):
class Model(object): class Model(object):
"""Base class for storage API models.""" """base class for storage api models."""
def __init__(self, **kwds): def __init__(self, **kwds):
self.fields = list(kwds) self.fields = list(kwds)
for k, v in six.iteritems(kwds): for k, v in kwds.items():
setattr(self, k, v) setattr(self, k, v)
def as_dict(self): def as_dict(self):

View File

@ -17,7 +17,6 @@ import json
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six
import sqlalchemy as sa import sqlalchemy as sa
from sqlalchemy import Column, String, Index, Boolean, Text, DateTime, Integer from sqlalchemy import Column, String, Index, Boolean, Text, DateTime, Integer
from sqlalchemy.dialects import mysql from sqlalchemy.dialects import mysql
@ -68,7 +67,7 @@ class AodhBase(object):
def update(self, values): def update(self, values):
"""Make the model object behave like a dict.""" """Make the model object behave like a dict."""
for k, v in six.iteritems(values): for k, v in values.items():
setattr(self, k, v) setattr(self, k, v)

View File

@ -23,7 +23,6 @@ import unittest
import oslo_messaging.conffixture import oslo_messaging.conffixture
from oslo_utils import timeutils from oslo_utils import timeutils
from oslotest import base from oslotest import base
import six
import webtest import webtest
import aodh import aodh
@ -121,10 +120,10 @@ def _skip_decorator(func):
try: try:
return func(*args, **kwargs) return func(*args, **kwargs)
except aodh.NotImplementedError as e: except aodh.NotImplementedError as e:
raise unittest.SkipTest(six.text_type(e)) raise unittest.SkipTest(str(e))
except webtest.app.AppError as e: except webtest.app.AppError as e:
if 'not implemented' in six.text_type(e): if 'not implemented' in str(e):
raise unittest.SkipTest(six.text_type(e)) raise unittest.SkipTest(str(e))
raise raise
return skip_if_not_implemented return skip_if_not_implemented

View File

@ -23,8 +23,6 @@ from unittest import mock
import fixtures import fixtures
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six
from six import moves
import webtest import webtest
from aodh.api import app from aodh.api import app
@ -468,7 +466,7 @@ class TestAlarms(TestAlarmsBase):
} }
}, },
} }
for field, json in six.iteritems(jsons): for field, json in jsons.items():
resp = self.post_json('/alarms', params=json, expect_errors=True, resp = self.post_json('/alarms', params=json, expect_errors=True,
status=400, headers=self.auth_headers) status=400, headers=self.auth_headers)
self.assertEqual("Invalid input for field/attribute %s." self.assertEqual("Invalid input for field/attribute %s."
@ -912,7 +910,7 @@ class TestAlarms(TestAlarmsBase):
'granularity': 180, 'granularity': 180,
} }
} }
for aspect, id in six.iteritems(identifiers): for aspect, id in identifiers.items():
json['%s_id' % aspect] = id json['%s_id' % aspect] = id
return json return json
@ -1636,7 +1634,7 @@ class TestAlarmsHistory(TestAlarmsBase):
return resp return resp
def _assert_is_subset(self, expected, actual): def _assert_is_subset(self, expected, actual):
for k, v in six.iteritems(expected): for k, v in expected.items():
current = actual.get(k) current = actual.get(k)
if k == 'detail' and isinstance(v, dict): if k == 'detail' and isinstance(v, dict):
current = jsonlib.loads(current) current = jsonlib.loads(current)
@ -1645,7 +1643,7 @@ class TestAlarmsHistory(TestAlarmsBase):
def _assert_in_json(self, expected, actual): def _assert_in_json(self, expected, actual):
actual = jsonlib.dumps(jsonlib.loads(actual), sort_keys=True) actual = jsonlib.dumps(jsonlib.loads(actual), sort_keys=True)
for k, v in six.iteritems(expected): for k, v in expected.items():
fragment = jsonlib.dumps({k: v}, sort_keys=True)[1:-1] fragment = jsonlib.dumps({k: v}, sort_keys=True)[1:-1]
self.assertIn(fragment, actual, self.assertIn(fragment, actual,
'%s not in %s' % (fragment, actual)) '%s not in %s' % (fragment, actual))
@ -1870,14 +1868,14 @@ class TestAlarmsHistory(TestAlarmsBase):
self._get_alarm_history('a', expect_errors=True, status=404) self._get_alarm_history('a', expect_errors=True, status=404)
def test_get_alarm_history_ordered_by_recentness(self): def test_get_alarm_history_ordered_by_recentness(self):
for i in moves.xrange(10): for i in range(10):
self._update_alarm('a', dict(name='%s' % i)) self._update_alarm('a', dict(name='%s' % i))
history = self._get_alarm_history('a') history = self._get_alarm_history('a')
self.assertEqual(10, len(history), 'hist: %s' % history) self.assertEqual(10, len(history), 'hist: %s' % history)
self._assert_is_subset(dict(alarm_id='a', self._assert_is_subset(dict(alarm_id='a',
type='rule change'), type='rule change'),
history[0]) history[0])
for i in moves.xrange(1, 11): for i in range(1, 11):
detail = '{"name": "%s"}' % (10 - i) detail = '{"name": "%s"}' % (10 - i)
self._assert_is_subset(dict(alarm_id='a', self._assert_is_subset(dict(alarm_id='a',
detail=detail, detail=detail,
@ -2844,7 +2842,7 @@ class TestPaginationQuery(TestAlarmsBase):
self.assertEqual(['name1', 'name2', 'name3'], names) self.assertEqual(['name1', 'name2', 'name3'], names)
def test_pagination_query_history_data(self): def test_pagination_query_history_data(self):
for i in moves.xrange(10): for i in range(10):
self._update_alarm('a', dict(name='%s' % i)) self._update_alarm('a', dict(name='%s' % i))
url = '/alarms/a/history?sort=event_id:desc&sort=timestamp:desc' url = '/alarms/a/history?sort=event_id:desc&sort=timestamp:desc'
data = self.get_json(url, headers=self.auth_headers) data = self.get_json(url, headers=self.auth_headers)

View File

@ -18,7 +18,6 @@
import json import json
from unittest import mock from unittest import mock
import six
import wsme import wsme
from aodh import i18n from aodh import i18n
@ -142,9 +141,7 @@ class TestApiMiddleware(v2.FunctionalTest):
def test_translated_then_untranslated_error(self): def test_translated_then_untranslated_error(self):
resp = self.get_json('/alarms/alarm-id-3', expect_errors=True) resp = self.get_json('/alarms/alarm-id-3', expect_errors=True)
self.assertEqual(404, resp.status_code) self.assertEqual(404, resp.status_code)
body = resp.body body = resp.body.decode('utf-8')
if six.PY3:
body = body.decode('utf-8')
self.assertEqual("Alarm alarm-id-3 not found", self.assertEqual("Alarm alarm-id-3 not found",
json.loads(body)['error_message'] json.loads(body)['error_message']
['faultstring']) ['faultstring'])
@ -156,9 +153,7 @@ class TestApiMiddleware(v2.FunctionalTest):
resp = self.get_json('/alarms/alarm-id-5', expect_errors=True) resp = self.get_json('/alarms/alarm-id-5', expect_errors=True)
self.assertEqual(404, resp.status_code) self.assertEqual(404, resp.status_code)
body = resp.body body = resp.body.decode('utf-8')
if six.PY3:
body = body.decode('utf-8')
self.assertEqual("untranslated_error", self.assertEqual("untranslated_error",
json.loads(body)['error_message'] json.loads(body)['error_message']
['faultstring']) ['faultstring'])

View File

@ -21,8 +21,7 @@ import os
import fixtures import fixtures
from oslo_config import fixture as fixture_config from oslo_config import fixture as fixture_config
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six from urllib import parse as urlparse
from six.moves.urllib import parse as urlparse
from aodh import service from aodh import service
from aodh import storage from aodh import storage
@ -70,8 +69,8 @@ class SQLiteManager(fixtures.Fixture):
self.url = "sqlite://" self.url = "sqlite://"
@six.add_metaclass(test_base.SkipNotImplementedMeta) class TestBase(test_base.BaseTestCase,
class TestBase(test_base.BaseTestCase): metaclass=test_base.SkipNotImplementedMeta):
DRIVER_MANAGERS = { DRIVER_MANAGERS = {
'mysql': MySQLManager, 'mysql': MySQLManager,

View File

@ -24,8 +24,8 @@ from oslo_config import cfg
from oslo_config import fixture as fixture_config from oslo_config import fixture as fixture_config
from oslo_policy import opts from oslo_policy import opts
from oslo_utils import uuidutils from oslo_utils import uuidutils
from six.moves.urllib import parse as urlparse
import sqlalchemy_utils import sqlalchemy_utils
from urllib import parse as urlparse
from aodh.api import app from aodh.api import app
from aodh.api import rbac from aodh.api import rbac

View File

@ -16,7 +16,6 @@ import abc
from unittest import mock from unittest import mock
from oslo_db.sqlalchemy import test_migrations from oslo_db.sqlalchemy import test_migrations
import six
from aodh.storage.sqlalchemy import models from aodh.storage.sqlalchemy import models
from aodh.tests import base from aodh.tests import base
@ -27,10 +26,9 @@ class ABCSkip(base.SkipNotImplementedMeta, abc.ABCMeta):
pass pass
class ModelsMigrationsSync( class ModelsMigrationsSync(tests_db.TestBase,
six.with_metaclass(ABCSkip, test_migrations.ModelsMigrationsSync,
tests_db.TestBase, metaclass=ABCSkip):
test_migrations.ModelsMigrationsSync)):
def setUp(self): def setUp(self):
super(ModelsMigrationsSync, self).setUp() super(ModelsMigrationsSync, self).setUp()

View File

@ -24,8 +24,6 @@ from aodh import service
from aodh import storage from aodh import storage
from aodh.storage import impl_log from aodh.storage import impl_log
import six
class EngineTest(base.BaseTestCase): class EngineTest(base.BaseTestCase):
def setUp(self): def setUp(self):
@ -46,7 +44,7 @@ class EngineTest(base.BaseTestCase):
try: try:
storage.get_connection_from_config(self.CONF) storage.get_connection_from_config(self.CONF)
except RuntimeError as err: except RuntimeError as err:
self.assertIn('no-such-engine', six.text_type(err)) self.assertIn('no-such-engine', str(err))
class ConnectionRetryTest(base.BaseTestCase): class ConnectionRetryTest(base.BaseTestCase):

View File

@ -23,7 +23,7 @@ devstack).
import os import os
from gabbi import driver from gabbi import driver
import six.moves.urllib.parse as urlparse from urllib import parse as urlparse
TESTS_DIR = 'gabbits-live' TESTS_DIR = 'gabbits-live'

View File

@ -52,9 +52,5 @@ class TestEvaluatorBase(base.BaseTestCase):
self.assertEqual(state, alarm.state) self.assertEqual(state, alarm.state)
def assertDictContains(self, parent, child): def assertDictContains(self, parent, child):
"""Checks whether child dict is a subset of parent. """Checks whether child dict is a subset of parent."""
assertDictContainsSubset() in standard Python 2.7 has been deprecated
since Python 3.2
"""
self.assertEqual(parent, dict(parent, **child)) self.assertEqual(parent, dict(parent, **child))

View File

@ -17,8 +17,6 @@ from unittest import mock
import fixtures import fixtures
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six
from six import moves
from aodh import evaluator from aodh import evaluator
from aodh.evaluator import composite from aodh.evaluator import composite
@ -39,7 +37,7 @@ class BaseCompositeEvaluate(base.TestEvaluatorBase):
@staticmethod @staticmethod
def _get_gnocchi_stats(granularity, values): def _get_gnocchi_stats(granularity, values):
now = timeutils.utcnow_ts() now = timeutils.utcnow_ts()
return [[six.text_type(now - len(values) * granularity), return [[str(now - len(values) * granularity),
granularity, value] for value in values] granularity, value] for value in values]
@staticmethod @staticmethod
@ -54,7 +52,7 @@ class BaseCompositeEvaluate(base.TestEvaluatorBase):
evaluator.UNKNOWN: 'state evaluated to unknown.'} evaluator.UNKNOWN: 'state evaluated to unknown.'}
params = {'state': new_state, params = {'state': new_state,
'expression': user_expression, 'expression': user_expression,
'rules': ', '.join(sorted(six.iterkeys(root_cause_rules))), 'rules': ', '.join(sorted(root_cause_rules.keys())),
'description': description[new_state]} 'description': description[new_state]}
reason_data = { reason_data = {
'type': 'composite', 'type': 'composite',
@ -290,18 +288,18 @@ class CompositeTest(BaseCompositeEvaluate):
# self.sub_rule5: ok # self.sub_rule5: ok
# self.sub_rule6: alarm # self.sub_rule6: alarm
maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v
for v in moves.xrange(1, 5)]) for v in range(1, 5)])
avgs1 = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v avgs1 = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v
for v in moves.xrange(1, 4)]) for v in range(1, 4)])
avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v
for v in moves.xrange(1, 6)]) for v in range(1, 6)])
gavgs1 = self._get_gnocchi_stats(60, [self.sub_rule4['threshold'] gavgs1 = self._get_gnocchi_stats(60, [self.sub_rule4['threshold']
- v for v in moves.xrange(1, 6)]) - v for v in range(1, 6)])
gmaxs = self._get_gnocchi_stats(300, [self.sub_rule5['threshold'] + v gmaxs = self._get_gnocchi_stats(300, [self.sub_rule5['threshold'] + v
for v in moves.xrange(1, 5)]) for v in range(1, 5)])
gavgs2 = self._get_gnocchi_stats(50, [self.sub_rule6['threshold'] + v gavgs2 = self._get_gnocchi_stats(50, [self.sub_rule6['threshold'] + v
for v in moves.xrange(1, 7)]) for v in range(1, 7)])
self.client.metric.get_measures.side_effect = [gavgs1] self.client.metric.get_measures.side_effect = [gavgs1]
self.client.metric.aggregation.side_effect = [maxs, avgs1, avgs2, self.client.metric.aggregation.side_effect = [maxs, avgs1, avgs2,
@ -323,7 +321,7 @@ class CompositeTest(BaseCompositeEvaluate):
alarm = self.alarms[1] alarm = self.alarms[1]
# self.sub_rule1: alarm # self.sub_rule1: alarm
avgs = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] + v avgs = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] + v
for v in moves.xrange(1, 6)]) for v in range(1, 6)])
self.client.metric.aggregation.side_effect = [avgs] self.client.metric.aggregation.side_effect = [avgs]
self.evaluator.evaluate(alarm) self.evaluator.evaluate(alarm)
self.assertEqual('alarm', alarm.state) self.assertEqual('alarm', alarm.state)
@ -339,7 +337,7 @@ class CompositeTest(BaseCompositeEvaluate):
alarm = self.alarms[2] alarm = self.alarms[2]
# self.sub_rule1: ok # self.sub_rule1: ok
avgs = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v avgs = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v
for v in moves.xrange(1, 6)]) for v in range(1, 6)])
self.client.metric.aggregation.side_effect = [avgs] self.client.metric.aggregation.side_effect = [avgs]
self.evaluator.evaluate(alarm) self.evaluator.evaluate(alarm)
self.assertEqual('ok', alarm.state) self.assertEqual('ok', alarm.state)
@ -354,11 +352,11 @@ class CompositeTest(BaseCompositeEvaluate):
def test_unknown_state_with_sub_rules_trending_state(self): def test_unknown_state_with_sub_rules_trending_state(self):
alarm = self.alarms[0] alarm = self.alarms[0]
maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v
for v in moves.xrange(-1, 4)]) for v in range(-1, 4)])
avgs = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v avgs = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v
for v in moves.xrange(-1, 3)]) for v in range(-1, 3)])
avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v
for v in moves.xrange(1, 6)]) for v in range(1, 6)])
self.client.metric.aggregation.side_effect = [avgs2, maxs, avgs] self.client.metric.aggregation.side_effect = [avgs2, maxs, avgs]
self.evaluator.evaluate(alarm) self.evaluator.evaluate(alarm)
@ -377,11 +375,11 @@ class CompositeTest(BaseCompositeEvaluate):
alarm.state = 'ok' alarm.state = 'ok'
maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v
for v in moves.xrange(-1, 4)]) for v in range(-1, 4)])
avgs = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v avgs = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v
for v in moves.xrange(-1, 3)]) for v in range(-1, 3)])
avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v
for v in moves.xrange(1, 6)]) for v in range(1, 6)])
self.client.metric.aggregation.side_effect = [avgs2, maxs, avgs] self.client.metric.aggregation.side_effect = [avgs2, maxs, avgs]
self.evaluator.evaluate(alarm) self.evaluator.evaluate(alarm)
@ -399,11 +397,11 @@ class CompositeTest(BaseCompositeEvaluate):
alarm = self.alarms[2] alarm = self.alarms[2]
alarm.state = 'ok' alarm.state = 'ok'
maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v maxs = self._get_gnocchi_stats(60, [self.sub_rule2['threshold'] + v
for v in moves.xrange(-1, 4)]) for v in range(-1, 4)])
avgs = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v avgs = self._get_gnocchi_stats(60, [self.sub_rule3['threshold'] + v
for v in moves.xrange(-1, 3)]) for v in range(-1, 3)])
avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v avgs2 = self._get_gnocchi_stats(60, [self.sub_rule1['threshold'] - v
for v in moves.xrange(1, 6)]) for v in range(1, 6)])
self.client.metric.aggregation.side_effect = [avgs2, maxs, avgs] self.client.metric.aggregation.side_effect = [avgs2, maxs, avgs]
self.evaluator.evaluate(alarm) self.evaluator.evaluate(alarm)
self.assertEqual('ok', alarm.state) self.assertEqual('ok', alarm.state)

View File

@ -20,7 +20,6 @@ from unittest import mock
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six
from aodh import evaluator from aodh import evaluator
from aodh.evaluator import event as event_evaluator from aodh.evaluator import event as event_evaluator
@ -66,7 +65,7 @@ class TestEventAlarmEvaluate(base.TestEvaluatorBase):
self._update_history = [] self._update_history = []
def get_alarms(**kwargs): def get_alarms(**kwargs):
return (a for a in six.itervalues(self._stored_alarms)) return (a for a in self._stored_alarms.values())
def update_alarm(alarm): def update_alarm(alarm):
self._stored_alarms[alarm.alarm_id] = copy.deepcopy(alarm) self._stored_alarms[alarm.alarm_id] = copy.deepcopy(alarm)
@ -106,7 +105,7 @@ class TestEventAlarmEvaluate(base.TestEvaluatorBase):
self.storage_conn.get_alarms.call_args_list) self.storage_conn.get_alarms.call_args_list)
if expect_alarm_states is not None: if expect_alarm_states is not None:
for alarm_id, state in six.iteritems(expect_alarm_states): for alarm_id, state in expect_alarm_states.items():
self.assertEqual(state, self._stored_alarms[alarm_id].state) self.assertEqual(state, self._stored_alarms[alarm_id].state)
if expect_alarm_updates is not None: if expect_alarm_updates is not None:

View File

@ -17,15 +17,12 @@ import copy
import datetime import datetime
import fixtures import fixtures
import json import json
import unittest
from unittest import mock from unittest import mock
from gnocchiclient import exceptions from gnocchiclient import exceptions
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
import pytz import pytz
import six
from six import moves
from aodh.evaluator import gnocchi from aodh.evaluator import gnocchi
from aodh import messaging from aodh import messaging
@ -126,7 +123,7 @@ class TestGnocchiEvaluatorBase(base.TestEvaluatorBase):
@staticmethod @staticmethod
def _get_stats(granularity, values): def _get_stats(granularity, values):
now = timeutils.utcnow_ts() now = timeutils.utcnow_ts()
return [[six.text_type(now - len(values) * granularity), return [[str(now - len(values) * granularity),
granularity, value] for value in values] granularity, value] for value in values]
@staticmethod @staticmethod
@ -171,7 +168,7 @@ class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase):
def test_retry_transient_api_failure(self): def test_retry_transient_api_failure(self):
means = self._get_stats(60, [self.alarms[0].rule['threshold'] - v means = self._get_stats(60, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(5)]) for v in range(5)])
self.client.metric.get_measures.side_effect = [ self.client.metric.get_measures.side_effect = [
exceptions.ClientException(501, "error2"), means] exceptions.ClientException(501, "error2"), means]
self._test_retry_transient() self._test_retry_transient()
@ -185,7 +182,7 @@ class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase):
utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0)
self._set_all_alarms('ok') self._set_all_alarms('ok')
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 6)]) for v in range(1, 6)])
self.client.metric.get_measures.side_effect = [avgs] self.client.metric.get_measures.side_effect = [avgs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
start_alarm = "2015-01-26T12:51:00" start_alarm = "2015-01-26T12:51:00"
@ -207,7 +204,7 @@ class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase):
def test_simple_alarm_clear(self): def test_simple_alarm_clear(self):
self._set_all_alarms('alarm') self._set_all_alarms('alarm')
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] - v avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(5)]) for v in range(5)])
self.client.metric.get_measures.side_effect = [avgs] self.client.metric.get_measures.side_effect = [avgs]
@ -263,7 +260,7 @@ class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase):
self._set_all_alarms('ok') self._set_all_alarms('ok')
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 6)]) for v in range(1, 6)])
self.client.metric.get_measures.side_effect = [avgs] self.client.metric.get_measures.side_effect = [avgs]
@ -287,7 +284,7 @@ class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase):
def test_equivocal_from_known_state_ok(self): def test_equivocal_from_known_state_ok(self):
self._set_all_alarms('ok') self._set_all_alarms('ok')
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(5)]) for v in range(5)])
self.client.metric.get_measures.side_effect = [avgs] self.client.metric.get_measures.side_effect = [avgs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
@ -300,7 +297,7 @@ class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase):
self._set_all_alarms('ok') self._set_all_alarms('ok')
self.alarms[0].repeat_actions = True self.alarms[0].repeat_actions = True
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 6)]) for v in range(1, 6)])
self.client.metric.get_measures.side_effect = [avgs] self.client.metric.get_measures.side_effect = [avgs]
@ -319,7 +316,7 @@ class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase):
def test_equivocal_from_unknown(self): def test_equivocal_from_unknown(self):
self._set_all_alarms('insufficient data') self._set_all_alarms('insufficient data')
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 6)]) for v in range(1, 6)])
self.client.metric.get_measures.side_effect = [avgs] self.client.metric.get_measures.side_effect = [avgs]
@ -336,8 +333,6 @@ class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase):
reason, reason_data) reason, reason_data)
self.assertEqual(expected, self.notifier.notify.call_args) self.assertEqual(expected, self.notifier.notify.call_args)
@unittest.skipIf(six.PY3,
"the aodh base class is not python 3 ready")
@mock.patch.object(timeutils, 'utcnow') @mock.patch.object(timeutils, 'utcnow')
def test_no_state_change_outside_time_constraint(self, mock_utcnow): def test_no_state_change_outside_time_constraint(self, mock_utcnow):
self._set_all_alarms('ok') self._set_all_alarms('ok')
@ -360,8 +355,6 @@ class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase):
" time is outside its time constraint.") " time is outside its time constraint.")
self.assertEqual([], self.notifier.notify.call_args_list) self.assertEqual([], self.notifier.notify.call_args_list)
@unittest.skipIf(six.PY3,
"the aodh base class is not python 3 ready")
@mock.patch.object(timeutils, 'utcnow') @mock.patch.object(timeutils, 'utcnow')
def test_state_change_inside_time_constraint(self, mock_utcnow): def test_state_change_inside_time_constraint(self, mock_utcnow):
self._set_all_alarms('ok') self._set_all_alarms('ok')
@ -419,7 +412,7 @@ class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase):
self._set_all_alarms('ok') self._set_all_alarms('ok')
original_alarms = copy.deepcopy(self.alarms) original_alarms = copy.deepcopy(self.alarms)
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 6)]) for v in range(1, 6)])
self.client.metric.get_measures.side_effect = [avgs] self.client.metric.get_measures.side_effect = [avgs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('alarm') self._assert_all_alarms('alarm')
@ -439,7 +432,7 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
def test_retry_transient_api_failure(self): def test_retry_transient_api_failure(self):
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(4)]) for v in range(4)])
self.client.metric.aggregation.side_effect = [Exception('boom'), maxs] self.client.metric.aggregation.side_effect = [Exception('boom'), maxs]
self._test_retry_transient() self._test_retry_transient()
@ -453,7 +446,7 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
self._set_all_alarms('ok') self._set_all_alarms('ok')
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(4)]) for v in range(4)])
self.client.metric.aggregation.side_effect = [maxs] self.client.metric.aggregation.side_effect = [maxs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
start_alarm = "2015-01-26T12:32:00" start_alarm = "2015-01-26T12:32:00"
@ -482,7 +475,7 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
def test_simple_alarm_clear(self): def test_simple_alarm_clear(self):
self._set_all_alarms('alarm') self._set_all_alarms('alarm')
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 5)]) for v in range(1, 5)])
self.client.metric.aggregation.side_effect = [maxs] self.client.metric.aggregation.side_effect = [maxs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('ok') self._assert_all_alarms('ok')
@ -499,7 +492,7 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
def test_equivocal_from_known_state_ok(self): def test_equivocal_from_known_state_ok(self):
self._set_all_alarms('ok') self._set_all_alarms('ok')
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(-1, 3)]) for v in range(-1, 3)])
self.client.metric.aggregation.side_effect = [maxs] self.client.metric.aggregation.side_effect = [maxs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('ok') self._assert_all_alarms('ok')
@ -513,7 +506,7 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
# NOTE(sileht): we add one useless point (81.0) that will break # NOTE(sileht): we add one useless point (81.0) that will break
# the test if the evaluator doesn't remove it. # the test if the evaluator doesn't remove it.
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(-1, 5)]) for v in range(-1, 5)])
self.client.metric.aggregation.side_effect = [maxs] self.client.metric.aggregation.side_effect = [maxs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('alarm') self._assert_all_alarms('alarm')
@ -522,7 +515,7 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
self._set_all_alarms('ok') self._set_all_alarms('ok')
self.alarms[0].repeat_actions = True self.alarms[0].repeat_actions = True
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(-1, 3)]) for v in range(-1, 3)])
self.client.metric.aggregation.side_effect = [maxs] self.client.metric.aggregation.side_effect = [maxs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('ok') self._assert_all_alarms('ok')
@ -538,7 +531,7 @@ class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase):
self.alarms[0].repeat_actions = True self.alarms[0].repeat_actions = True
maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(4)]) for v in range(4)])
self.client.metric.aggregation.side_effect = [maxs] self.client.metric.aggregation.side_effect = [maxs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('alarm') self._assert_all_alarms('alarm')
@ -560,7 +553,7 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
def test_retry_transient_api_failure(self): def test_retry_transient_api_failure(self):
avgs2 = self._get_stats(50, [self.alarms[0].rule['threshold'] - v avgs2 = self._get_stats(50, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(6)]) for v in range(6)])
self.client.metric.aggregation.side_effect = [ self.client.metric.aggregation.side_effect = [
exceptions.ClientException(500, "error"), avgs2] exceptions.ClientException(500, "error"), avgs2]
self._test_retry_transient() self._test_retry_transient()
@ -574,7 +567,7 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0)
self._set_all_alarms('ok') self._set_all_alarms('ok')
avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 7)]) for v in range(1, 7)])
self.client.metric.aggregation.side_effect = [avgs] self.client.metric.aggregation.side_effect = [avgs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
@ -602,7 +595,7 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
def test_simple_alarm_clear(self): def test_simple_alarm_clear(self):
self._set_all_alarms('alarm') self._set_all_alarms('alarm')
avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] - v avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(6)]) for v in range(6)])
self.client.metric.aggregation.side_effect = [avgs] self.client.metric.aggregation.side_effect = [avgs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('ok') self._assert_all_alarms('ok')
@ -618,7 +611,7 @@ class TestGnocchiAggregationResourcesThresholdEvaluate(
def test_equivocal_from_known_state_ok(self): def test_equivocal_from_known_state_ok(self):
self._set_all_alarms('ok') self._set_all_alarms('ok')
avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(6)]) for v in range(6)])
self.client.metric.aggregation.side_effect = [avgs] self.client.metric.aggregation.side_effect = [avgs]
self._evaluate_all_alarms() self._evaluate_all_alarms()
self._assert_all_alarms('ok') self._assert_all_alarms('ok')

View File

@ -18,7 +18,6 @@ import os
import subprocess import subprocess
from oslo_utils import fileutils from oslo_utils import fileutils
import six
from aodh.tests import base from aodh.tests import base
@ -28,7 +27,6 @@ class BinTestCase(base.BaseTestCase):
super(BinTestCase, self).setUp() super(BinTestCase, self).setUp()
content = ("[database]\n" content = ("[database]\n"
"connection=log://localhost\n") "connection=log://localhost\n")
if six.PY3:
content = content.encode('utf-8') content = content.encode('utf-8')
self.tempfile = fileutils.write_to_tempfile(content=content, self.tempfile = fileutils.write_to_tempfile(content=content,
prefix='aodh', prefix='aodh',
@ -58,7 +56,6 @@ class BinTestCase(base.BaseTestCase):
content = ("[database]\n" content = ("[database]\n"
"alarm_history_time_to_live=1\n" "alarm_history_time_to_live=1\n"
"connection=log://localhost\n") "connection=log://localhost\n")
if six.PY3:
content = content.encode('utf-8') content = content.encode('utf-8')
self.tempfile = fileutils.write_to_tempfile(content=content, self.tempfile = fileutils.write_to_tempfile(content=content,
prefix='aodh', prefix='aodh',
@ -71,7 +68,6 @@ class BinTestCase(base.BaseTestCase):
out, __ = subp.communicate() out, __ = subp.communicate()
self.assertEqual(0, subp.poll()) self.assertEqual(0, subp.poll())
msg = "Dropping alarm history data with TTL 1" msg = "Dropping alarm history data with TTL 1"
if six.PY3:
msg = msg.encode('utf-8') msg = msg.encode('utf-8')
self.assertIn(msg, out) self.assertIn(msg, out)
@ -81,7 +77,6 @@ class BinEvaluatorTestCase(base.BaseTestCase):
super(BinEvaluatorTestCase, self).setUp() super(BinEvaluatorTestCase, self).setUp()
content = ("[database]\n" content = ("[database]\n"
"connection=log://localhost\n") "connection=log://localhost\n")
if six.PY3:
content = content.encode('utf-8') content = content.encode('utf-8')
self.tempfile = fileutils.write_to_tempfile(content=content, self.tempfile = fileutils.write_to_tempfile(content=content,
prefix='aodh', prefix='aodh',

View File

@ -22,7 +22,7 @@ from oslo_config import fixture as fixture_config
import oslo_messaging import oslo_messaging
import requests import requests
import six.moves.urllib.parse as urlparse from urllib import parse as urlparse
from aodh import keystone_client from aodh import keystone_client
from aodh import notifier from aodh import notifier

View File

@ -13,11 +13,6 @@
# under the License. # under the License.
import inspect import inspect
import six
def get_func_valid_keys(func): def get_func_valid_keys(func):
if six.PY2:
return inspect.getargspec(func)[0]
else:
return inspect.getfullargspec(func)[0] return inspect.getfullargspec(func)[0]

View File

@ -24,7 +24,6 @@ oslo.utils>=3.5.0 # Apache-2.0
python-keystoneclient>=1.6.0 python-keystoneclient>=1.6.0
pytz>=2013.6 pytz>=2013.6
requests>=2.5.2 requests>=2.5.2
six>=1.9.0
stevedore>=1.5.0 # Apache-2.0 stevedore>=1.5.0 # Apache-2.0
tooz>=1.28.0 # Apache-2.0 tooz>=1.28.0 # Apache-2.0
voluptuous>=0.8.10 voluptuous>=0.8.10