Merge "Change chart test
key to object and support cleanup flag"
This commit is contained in:
commit
164f4aa7d7
@ -45,7 +45,11 @@ class TestReleasesReleaseNameController(api.BaseResource):
|
||||
CONF.tiller_port,
|
||||
tiller_namespace=req.get_param(
|
||||
'tiller_namespace', default=CONF.tiller_namespace))
|
||||
success = test_release_for_success(tiller, release)
|
||||
cleanup = req.get_param_as_bool('cleanup')
|
||||
if cleanup is None:
|
||||
cleanup = False
|
||||
success = test_release_for_success(
|
||||
tiller, release, cleanup=cleanup)
|
||||
# TODO(fmontei): Provide more sensible exception(s) here.
|
||||
except Exception as e:
|
||||
err_message = 'Failed to test {}: {}'.format(release, e)
|
||||
@ -154,12 +158,24 @@ class TestReleasesManifestController(api.BaseResource):
|
||||
for group in armada_obj.get(const.KEYWORD_ARMADA).get(
|
||||
const.KEYWORD_GROUPS):
|
||||
for ch in group.get(const.KEYWORD_CHARTS):
|
||||
release_name = release_prefixer(prefix,
|
||||
ch.get('chart').get('release'))
|
||||
|
||||
chart = ch['chart']
|
||||
release_name = release_prefixer(prefix, chart.get('release'))
|
||||
cleanup = req.get_param_as_bool('cleanup')
|
||||
if cleanup is None:
|
||||
test_chart_override = chart.get('test', {})
|
||||
if isinstance(test_chart_override, bool):
|
||||
self.logger.warn(
|
||||
'Boolean value for chart `test` key is deprecated '
|
||||
'and will be removed. Use `test.enabled` instead.')
|
||||
# Use old default value.
|
||||
cleanup = True
|
||||
else:
|
||||
cleanup = test_chart_override.get('options', {}).get(
|
||||
'cleanup', False)
|
||||
if release_name in known_releases:
|
||||
self.logger.info('RUNNING: %s tests', release_name)
|
||||
success = test_release_for_success(tiller, release_name)
|
||||
success = test_release_for_success(
|
||||
tiller, release_name, cleanup=cleanup)
|
||||
if success:
|
||||
self.logger.info("PASSED: %s", release_name)
|
||||
message['test']['passed'].append(release_name)
|
||||
|
@ -74,19 +74,25 @@ SHORT_DESC = "Command tests releases."
|
||||
help=("The target manifest to run. Required for specifying "
|
||||
"which manifest to run when multiple are available."),
|
||||
default=None)
|
||||
@click.option(
|
||||
'--cleanup',
|
||||
help=("Delete test pods upon completion."),
|
||||
is_flag=True,
|
||||
default=None)
|
||||
@click.option('--debug', help="Enable debug logging.", is_flag=True)
|
||||
@click.pass_context
|
||||
def test_charts(ctx, file, release, tiller_host, tiller_port, tiller_namespace,
|
||||
target_manifest, debug):
|
||||
target_manifest, cleanup, debug):
|
||||
CONF.debug = debug
|
||||
TestChartManifest(ctx, file, release, tiller_host, tiller_port,
|
||||
tiller_namespace, target_manifest).safe_invoke()
|
||||
tiller_namespace, target_manifest,
|
||||
cleanup).safe_invoke()
|
||||
|
||||
|
||||
class TestChartManifest(CliAction):
|
||||
|
||||
def __init__(self, ctx, file, release, tiller_host, tiller_port,
|
||||
tiller_namespace, target_manifest):
|
||||
tiller_namespace, target_manifest, cleanup):
|
||||
|
||||
super(TestChartManifest, self).__init__()
|
||||
self.ctx = ctx
|
||||
@ -96,6 +102,7 @@ class TestChartManifest(CliAction):
|
||||
self.tiller_port = tiller_port
|
||||
self.tiller_namespace = tiller_namespace
|
||||
self.target_manifest = target_manifest
|
||||
self.cleanup = cleanup
|
||||
|
||||
def invoke(self):
|
||||
tiller = Tiller(
|
||||
@ -107,7 +114,8 @@ class TestChartManifest(CliAction):
|
||||
if self.release:
|
||||
if not self.ctx.obj.get('api', False):
|
||||
self.logger.info("RUNNING: %s tests", self.release)
|
||||
success = test_release_for_success(tiller, self.release)
|
||||
success = test_release_for_success(
|
||||
tiller, self.release, cleanup=self.cleanup)
|
||||
if success:
|
||||
self.logger.info("PASSED: %s", self.release)
|
||||
else:
|
||||
@ -127,7 +135,7 @@ class TestChartManifest(CliAction):
|
||||
|
||||
if self.file:
|
||||
if not self.ctx.obj.get('api', False):
|
||||
documents = yaml.safe_load_all(open(self.file).read())
|
||||
documents = list(yaml.safe_load_all(open(self.file).read()))
|
||||
armada_obj = Manifest(
|
||||
documents,
|
||||
target_manifest=self.target_manifest).get_manifest()
|
||||
@ -137,14 +145,28 @@ class TestChartManifest(CliAction):
|
||||
for group in armada_obj.get(const.KEYWORD_ARMADA).get(
|
||||
const.KEYWORD_GROUPS):
|
||||
for ch in group.get(const.KEYWORD_CHARTS):
|
||||
chart = ch['chart']
|
||||
release_name = release_prefixer(
|
||||
prefix,
|
||||
ch.get('chart').get('release'))
|
||||
prefix, chart.get('release'))
|
||||
|
||||
if release_name in known_release_names:
|
||||
cleanup = self.cleanup
|
||||
if cleanup is None:
|
||||
test_chart_override = chart.get('test', {})
|
||||
if isinstance(test_chart_override, bool):
|
||||
self.logger.warn(
|
||||
'Boolean value for chart `test` key is'
|
||||
' deprecated and support for this will'
|
||||
' be removed. Use `test.enabled` '
|
||||
'instead.')
|
||||
# Use old default value.
|
||||
cleanup = True
|
||||
else:
|
||||
cleanup = test_chart_override.get(
|
||||
'options', {}).get('cleanup', False)
|
||||
self.logger.info('RUNNING: %s tests', release_name)
|
||||
success = test_release_for_success(
|
||||
tiller, release_name)
|
||||
tiller, release_name, cleanup=cleanup)
|
||||
if success:
|
||||
self.logger.info("PASSED: %s", release_name)
|
||||
else:
|
||||
|
@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import difflib
|
||||
import functools
|
||||
import time
|
||||
import yaml
|
||||
|
||||
@ -326,8 +327,21 @@ class Armada(object):
|
||||
# TODO(MarshM) better handling of timeout/timer
|
||||
cg_max_timeout = max(wait_timeout, cg_max_timeout)
|
||||
|
||||
# Chart test policy can override ChartGroup, if specified
|
||||
test_this_chart = chart.get('test', cg_test_all_charts)
|
||||
test_chart_override = chart.get('test')
|
||||
# Use old default value when not using newer `test` key
|
||||
test_cleanup = True
|
||||
if test_chart_override is None:
|
||||
test_this_chart = cg_test_all_charts
|
||||
elif isinstance(test_chart_override, bool):
|
||||
LOG.warn('Boolean value for chart `test` key is'
|
||||
' deprecated and support for this will'
|
||||
' be removed. Use `test.enabled` '
|
||||
'instead.')
|
||||
test_this_chart = test_chart_override
|
||||
else:
|
||||
test_this_chart = test_chart_override['enabled']
|
||||
test_cleanup = test_chart_override.get('options', {}).get(
|
||||
'cleanup', False)
|
||||
|
||||
chartbuilder = ChartBuilder(chart)
|
||||
protoc_chart = chartbuilder.get_helm_chart()
|
||||
@ -438,6 +452,7 @@ class Armada(object):
|
||||
|
||||
# Sequenced ChartGroup should run tests after each Chart
|
||||
timer = int(round(deadline - time.time()))
|
||||
test_chart_args = (release_name, timer, test_cleanup)
|
||||
if test_this_chart:
|
||||
if cg_sequenced:
|
||||
LOG.info(
|
||||
@ -449,12 +464,14 @@ class Armada(object):
|
||||
LOG.error(reason)
|
||||
raise armada_exceptions.ArmadaTimeoutException(
|
||||
reason)
|
||||
self._test_chart(release_name, timer)
|
||||
self._test_chart(*test_chart_args)
|
||||
|
||||
# Un-sequenced ChartGroup should run tests at the end
|
||||
else:
|
||||
# Keeping track of time remaining
|
||||
tests_to_run.append((release_name, timer))
|
||||
tests_to_run.append(
|
||||
functools.partial(self._test_chart,
|
||||
*test_chart_args))
|
||||
|
||||
# End of Charts in ChartGroup
|
||||
LOG.info('All Charts applied in ChartGroup %s.', cg_name)
|
||||
@ -486,8 +503,8 @@ class Armada(object):
|
||||
timeout=timer)
|
||||
|
||||
# After entire ChartGroup is healthy, run any pending tests
|
||||
for (test, test_timer) in tests_to_run:
|
||||
self._test_chart(test, test_timer)
|
||||
for callback in tests_to_run:
|
||||
callback()
|
||||
|
||||
self.post_flight_ops()
|
||||
|
||||
@ -531,7 +548,7 @@ class Armada(object):
|
||||
k8s_wait_attempt_sleep=self.k8s_wait_attempt_sleep,
|
||||
timeout=timeout)
|
||||
|
||||
def _test_chart(self, release_name, timeout):
|
||||
def _test_chart(self, release_name, timeout, cleanup):
|
||||
if self.dry_run:
|
||||
LOG.info(
|
||||
'Skipping test during `dry-run`, would have tested '
|
||||
@ -539,7 +556,7 @@ class Armada(object):
|
||||
return True
|
||||
|
||||
success = test_release_for_success(
|
||||
self.tiller, release_name, timeout=timeout)
|
||||
self.tiller, release_name, timeout=timeout, cleanup=cleanup)
|
||||
if success:
|
||||
LOG.info("Test passed for release: %s", release_name)
|
||||
else:
|
||||
|
@ -22,8 +22,10 @@ TESTRUN_STATUS_RUNNING = 3
|
||||
|
||||
def test_release_for_success(tiller,
|
||||
release,
|
||||
timeout=const.DEFAULT_TILLER_TIMEOUT):
|
||||
test_suite_run = tiller.test_release(release, timeout)
|
||||
timeout=const.DEFAULT_TILLER_TIMEOUT,
|
||||
cleanup=False):
|
||||
test_suite_run = tiller.test_release(
|
||||
release, timeout=timeout, cleanup=cleanup)
|
||||
results = getattr(test_suite_run, 'results', [])
|
||||
failed_results = [r for r in results if r.status != TESTRUN_STATUS_SUCCESS]
|
||||
return len(failed_results) == 0
|
||||
|
@ -440,7 +440,7 @@ class Tiller(object):
|
||||
def test_release(self,
|
||||
release,
|
||||
timeout=const.DEFAULT_TILLER_TIMEOUT,
|
||||
cleanup=True):
|
||||
cleanup=False):
|
||||
'''
|
||||
:param release: name of release to test
|
||||
:param timeout: runtime before exiting
|
||||
|
@ -59,7 +59,23 @@ data:
|
||||
type: boolean
|
||||
additionalProperties: false
|
||||
test:
|
||||
type: boolean
|
||||
anyOf:
|
||||
# TODO: Remove boolean support after deprecation period.
|
||||
- type: boolean
|
||||
- type: object
|
||||
properties:
|
||||
enabled:
|
||||
type: boolean
|
||||
options:
|
||||
type: object
|
||||
properties:
|
||||
cleanup:
|
||||
type: boolean
|
||||
additionalProperties: false
|
||||
additionalProperties: false
|
||||
required:
|
||||
- enabled
|
||||
|
||||
timeout:
|
||||
type: integer
|
||||
wait:
|
||||
|
@ -33,6 +33,8 @@ class TestReleasesManifestControllerTest(base.BaseControllerTest):
|
||||
rules = {'armada:tests_manifest': '@'}
|
||||
self.policy.set_rules(rules)
|
||||
|
||||
# TODO: Don't use example charts in tests.
|
||||
# TODO: Test cleanup arg is taken from url, then manifest.
|
||||
manifest_path = os.path.join(os.getcwd(), 'examples',
|
||||
'keystone-manifest.yaml')
|
||||
with open(manifest_path, 'r') as f:
|
||||
@ -61,7 +63,10 @@ class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
|
||||
|
||||
mock_test_release_for_success.return_value = True
|
||||
|
||||
resp = self.app.simulate_get('/api/v1.0/test/fake-release')
|
||||
release = 'fake-release'
|
||||
resp = self.app.simulate_get('/api/v1.0/test/{}'.format(release))
|
||||
mock_test_release_for_success.assert_has_calls(
|
||||
[mock.call(mock_tiller.return_value, release, cleanup=False)])
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.assertEqual('MESSAGE: Test Pass',
|
||||
json.loads(resp.text)['message'])
|
||||
@ -74,12 +79,29 @@ class TestReleasesReleaseNameControllerTest(base.BaseControllerTest):
|
||||
self.policy.set_rules(rules)
|
||||
|
||||
mock_test_release_for_success.return_value = False
|
||||
|
||||
resp = self.app.simulate_get('/api/v1.0/test/fake-release')
|
||||
release = 'fake-release'
|
||||
resp = self.app.simulate_get('/api/v1.0/test/{}'.format(release))
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.assertEqual('MESSAGE: Test Fail',
|
||||
json.loads(resp.text)['message'])
|
||||
|
||||
@mock.patch.object(test, 'test_release_for_success')
|
||||
@mock.patch.object(test, 'Tiller')
|
||||
def test_test_controller_cleanup(self, mock_tiller,
|
||||
mock_test_release_for_success):
|
||||
rules = {'armada:test_release': '@'}
|
||||
self.policy.set_rules(rules)
|
||||
|
||||
mock_test_release_for_success.return_value = True
|
||||
release = 'fake-release'
|
||||
resp = self.app.simulate_get(
|
||||
'/api/v1.0/test/{}'.format(release), query_string='cleanup=true')
|
||||
mock_test_release_for_success.assert_has_calls(
|
||||
[mock.call(mock_tiller.return_value, release, cleanup=True)])
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.assertEqual('MESSAGE: Test Pass',
|
||||
json.loads(resp.text)['message'])
|
||||
|
||||
|
||||
@test_utils.attr(type=['negative'])
|
||||
class TestReleasesManifestControllerNegativeTest(base.BaseControllerTest):
|
||||
|
@ -111,6 +111,10 @@ data:
|
||||
options:
|
||||
force: true
|
||||
recreate_pods: true
|
||||
test:
|
||||
enabled: true
|
||||
options:
|
||||
cleanup: true
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
@ -129,6 +133,8 @@ data:
|
||||
dependencies: []
|
||||
wait:
|
||||
timeout: 10
|
||||
test:
|
||||
enabled: true
|
||||
"""
|
||||
|
||||
CHART_SOURCES = [('git://github.com/dummy/armada',
|
||||
@ -163,6 +169,9 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
|
||||
'values': {},
|
||||
'wait': {
|
||||
'timeout': 10
|
||||
},
|
||||
'test': {
|
||||
'enabled': True
|
||||
}
|
||||
}
|
||||
}, {
|
||||
@ -190,6 +199,12 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
|
||||
'force': True,
|
||||
'recreate_pods': True
|
||||
}
|
||||
},
|
||||
'test': {
|
||||
'enabled': True,
|
||||
'options': {
|
||||
'cleanup': True
|
||||
}
|
||||
}
|
||||
}
|
||||
}, {
|
||||
@ -319,13 +334,14 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
|
||||
|
||||
chart_group = armada_obj.manifest['armada']['chart_groups'][0]
|
||||
charts = chart_group['chart_group']
|
||||
cg_test_all_charts = chart_group.get('test_charts', False)
|
||||
|
||||
m_tiller = mock_tiller.return_value
|
||||
m_tiller.list_charts.return_value = known_releases
|
||||
|
||||
if test_failure_to_run:
|
||||
|
||||
def fail(tiller, release, timeout=None):
|
||||
def fail(tiller, release, timeout=None, cleanup=False):
|
||||
status = AttrDict(
|
||||
**{'info': AttrDict(**{'Description': 'Failed'})})
|
||||
raise tiller_exceptions.ReleaseException(
|
||||
@ -419,12 +435,26 @@ class ArmadaHandlerTestCase(base.ArmadaTestCase):
|
||||
values=yaml.safe_dump(chart['values']),
|
||||
wait=this_chart_should_wait,
|
||||
timeout=chart['wait']['timeout']))
|
||||
test_this_chart = chart.get(
|
||||
'test', chart_group.get('test_charts', False))
|
||||
|
||||
test_chart_override = chart.get('test')
|
||||
# Use old default value when not using newer `test` key
|
||||
test_cleanup = True
|
||||
if test_chart_override is None:
|
||||
test_this_chart = cg_test_all_charts
|
||||
elif isinstance(test_chart_override, bool):
|
||||
test_this_chart = test_chart_override
|
||||
else:
|
||||
test_this_chart = test_chart_override.get('enabled', True)
|
||||
test_cleanup = test_chart_override.get('options', {}).get(
|
||||
'cleanup', False)
|
||||
|
||||
if test_this_chart:
|
||||
expected_test_release_for_success_calls.append(
|
||||
mock.call(m_tiller, release_name, timeout=mock.ANY))
|
||||
mock.call(
|
||||
m_tiller,
|
||||
release_name,
|
||||
timeout=mock.ANY,
|
||||
cleanup=test_cleanup))
|
||||
|
||||
# Verify that at least 1 release is either installed or updated.
|
||||
self.assertTrue(
|
||||
|
@ -98,7 +98,7 @@ Chart
|
||||
| protected | object | do not delete FAILED releases when encountered from previous run (provide the |
|
||||
| | | 'continue_processing' bool to continue or halt execution (default: halt)) |
|
||||
+-----------------+----------+---------------------------------------------------------------------------------------+
|
||||
| test | bool | run pre-defined helm tests helm in a chart |
|
||||
| test | object | Run helm tests on the chart after install/upgrade |
|
||||
+-----------------+----------+---------------------------------------------------------------------------------------+
|
||||
| install | object | install the chart into your Kubernetes cluster |
|
||||
+-----------------+----------+---------------------------------------------------------------------------------------+
|
||||
@ -113,6 +113,40 @@ Chart
|
||||
| timeout | int | time (in seconds) allotted for chart to deploy when 'wait' flag is set (DEPRECATED) |
|
||||
+-----------------+----------+---------------------------------------------------------------------------------------+
|
||||
|
||||
Test
|
||||
^^^^
|
||||
|
||||
+-------------+----------+---------------------------------------------------------------+
|
||||
| keyword | type | action |
|
||||
+=============+==========+===============================================================+
|
||||
| enabled | bool | whether to enable helm tests for this chart |
|
||||
+-------------+----------+---------------------------------------------------------------+
|
||||
| options | object | options to pass through to helm |
|
||||
+-------------+----------+---------------------------------------------------------------+
|
||||
|
||||
.. DANGER::
|
||||
|
||||
DEPRECATION: In addition to an object with the above fields, the ``test``
|
||||
key currently also supports ``bool``, which maps to ``enabled``, but this is
|
||||
deprecated and will be removed. The ``cleanup`` option below is set to true
|
||||
in this case for backward compatability.
|
||||
|
||||
Test - Options
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
+-------------+----------+---------------------------------------------------------------+
|
||||
| keyword | type | action |
|
||||
+=============+==========+===============================================================+
|
||||
| cleanup | bool | cleanup test pods after test completion, defaults to false |
|
||||
+-------------+----------+---------------------------------------------------------------+
|
||||
|
||||
.. note::
|
||||
|
||||
The preferred way to achieve test cleanup is to add a pre-upgrade delete
|
||||
action on the test pod, which allows for debugging the test pod up until the
|
||||
next upgrade.
|
||||
|
||||
|
||||
Upgrade, Install - Pre or Post
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -181,6 +215,8 @@ Chart Example
|
||||
timeout: 100
|
||||
protected:
|
||||
continue_processing: false
|
||||
test:
|
||||
enabled: true
|
||||
install:
|
||||
no_hooks: false
|
||||
upgrade:
|
||||
|
@ -77,7 +77,8 @@ metadata:
|
||||
name: keystone
|
||||
data:
|
||||
chart_name: keystone
|
||||
test: true
|
||||
test:
|
||||
enabled: true
|
||||
release: keystone
|
||||
namespace: openstack
|
||||
timeout: 100
|
||||
|
Loading…
x
Reference in New Issue
Block a user