Deploy environment with Telemetry plugin and Kafka plugin enabled

Change-Id: I96763dfbfe87dae63c1f99b985014957108ac2ef
This commit is contained in:
Artem 2016-09-15 14:47:00 +03:00 committed by vgusev
parent 60757fc06a
commit 72a1f72736
4 changed files with 250 additions and 67 deletions

View File

@ -21,6 +21,9 @@ role_name = 'kafka'
plugin_path = settings.KAFKA_PLUGIN_PATH
version = helpers.get_plugin_version(plugin_path)
default_options = {}
default_options = {
"kafka_jvm_heap_size/value": "1",
"zookeeper_jvm_heap_size/value": "1"
}
toolchain_options = default_options

View File

@ -71,51 +71,57 @@ class OpenstackTelemeteryPluginApi(base_test.PluginApi):
self.helpers.activate_plugin(
self.settings.name, self.settings.version, options)
def is_kafka_enabled(self):
return (
self.fuel_web.get_nailgun_cluster_nodes_by_roles(
self.helpers.cluster_id, ["kafka"])
)
def check_services_on_nodes(self, services, node_role):
node_ips = [
node["ip"] for node in
self.fuel_web.get_nailgun_cluster_nodes_by_roles(
self.helpers.cluster_id, [node_role])]
logger.info("Check {} services on {} nodes".format(
services, node_role))
for ip in node_ips:
for service in services:
fuelweb_checkers.verify_service(
ip, service, ignore_count_of_proccesses=True)
def check_plugin_online(self):
non_ha_pcmk_resources = ['p_ceilometer-agent-central',
'p_aodh-evaluator']
ha_pcmk_resources = ['telemetry-collector-heka']
controller_services = ['ceilometer-agent-notification',
'ceilometer-api', 'aodh-api']
compute_services = ['ceilometer-polling']
controller_ips = [
controller['ip'] for controller in
self.fuel_web.get_nailgun_cluster_nodes_by_roles(
self.helpers.cluster_id, ['controller'])]
compute_ips = [
compute['ip'] for compute in
self.fuel_web.get_nailgun_cluster_nodes_by_roles(
self.helpers.cluster_id, ['compute'])]
logger.info("Check {} pacemaker resources".format(
non_ha_pcmk_resources))
for resource in non_ha_pcmk_resources:
self.helpers.check_pacemaker_resource(
resource, "controller", is_ha=False)
logger.info("Check {} pacemaker resources".format(ha_pcmk_resources))
non_ha_pcmk_resources = ["p_ceilometer-agent-central",
"p_aodh-evaluator"]
ha_pcmk_resources = ["telemetry-collector-heka"]
controller_services = ["ceilometer-agent-notification",
"ceilometer-api", "aodh-api"]
compute_services = ["ceilometer-polling"]
if self.is_kafka_enabled():
kafka_services = ["telemetry-collector-hindsight"]
self.check_services_on_nodes(kafka_services, "kafka")
ha_pcmk_resources = non_ha_pcmk_resources
else:
logger.info("Check {} pacemaker resources".format(
non_ha_pcmk_resources))
for resource in non_ha_pcmk_resources:
self.helpers.check_pacemaker_resource(
resource, "controller", is_ha=False)
logger.info(
"Check {} pacemaker resources".format(ha_pcmk_resources))
for resource in ha_pcmk_resources:
self.helpers.check_pacemaker_resource(resource, "controller")
logger.info("Check {} services on {}".format(
controller_services, controller_ips))
for ip in controller_ips:
for service in controller_services:
fuelweb_checkers.verify_service(
ip, service, ignore_count_of_proccesses=True)
logger.info(
"Check {} services on {}".format(compute_services, compute_ips))
for ip in compute_ips:
for service in compute_services:
fuelweb_checkers.verify_service(
ip, service, ignore_count_of_proccesses=True)
self.check_services_on_nodes(controller_services, "controller")
self.check_services_on_nodes(compute_services, "compute")
logger.info("Check Ceilometer API")
keystone_access = self.helpers.os_conn.keystone_access
endpoint = keystone_access.service_catalog.url_for(
service_type='metering', service_name='ceilometer',
interface='internal')
service_type="metering", service_name="ceilometer",
interface="internal")
if not endpoint:
raise helpers.NotFound("Cannot find ceilometer endpoint")
raise helpers.NotFound("Cannot find Ceilometer endpoint")
headers = {
'X-Auth-Token': keystone_access.auth_token,
'content-type': 'application/json'
"X-Auth-Token": keystone_access.auth_token,
"content-type": "application/json"
}
checkers.check_http_get_response("{}/v2/capabilities".format(endpoint),
headers=headers)

View File

@ -27,6 +27,7 @@ from stacklight_tests.helpers import helpers
from stacklight_tests.helpers import remote_ops
from stacklight_tests.helpers import ui_tester
from stacklight_tests.influxdb_grafana import api as influx_api
from stacklight_tests.kafka import api as kafka_api
from stacklight_tests.lma_collector import api as collector_api
from stacklight_tests.lma_infrastructure_alerting import (
api as infrastructure_alerting_api)
@ -45,6 +46,7 @@ class ToolchainApi(object):
self.ui_tester = ui_tester
self.ELASTICSEARCH_KIBANA = elasticsearch_api.ElasticsearchPluginApi()
self.INFLUXDB_GRAFANA = influx_api.InfluxdbPluginApi()
self.KAFKA = kafka_api.KafkaPluginApi()
self.LMA_COLLECTOR = collector_api.LMACollectorPluginApi()
self.LMA_INFRASTRUCTURE_ALERTING = (
infrastructure_alerting_api.InfraAlertingPluginApi())

View File

@ -22,24 +22,36 @@ from stacklight_tests.toolchain import api
class TestOpenstackTelemetry(api.ToolchainApi):
"""Class for testing the Openstack Telemetry Plugin."""
kafka_roles = {
"slave-01": ["controller", "kafka"],
"slave-02": ["controller", "kafka"],
"slave-03": ["controller", "kafka"],
"slave-04": ["compute", "cinder"],
"slave-05": ["elasticsearch_kibana", "influxdb_grafana"]
}
def _deploy_telemetry_plugin(self, caller, advanced_options=None,
additional_tests=None):
additional_tests=None,
additional_plugins=None, roles=None):
self.check_run(caller)
self.env.revert_snapshot("ready_with_5_slaves")
self.add_plugin(self.OPENSTACK_TELEMETRY)
self.disable_plugin(self.LMA_INFRASTRUCTURE_ALERTING)
if additional_plugins:
self.add_plugin(additional_plugins)
self.prepare_plugins()
self.helpers.create_cluster(name=self.__class__.__name__)
self.activate_plugins()
if advanced_options:
self.OPENSTACK_TELEMETRY.activate_plugin(options=advanced_options)
roles = ["elasticsearch_kibana", "influxdb_grafana"]
self.helpers.deploy_cluster(
{"slave-01": ["controller"],
"slave-02": ["controller"],
"slave-03": ["controller"],
"slave-04": ["compute", "cinder"],
"slave-05": roles})
node_roles = {
"slave-01": ["controller"],
"slave-02": ["controller"],
"slave-03": ["controller"],
"slave-04": ["compute", "cinder"],
"slave-05": ["elasticsearch_kibana", "influxdb_grafana"]
} if not roles else roles
self.helpers.deploy_cluster(nodes_roles=node_roles)
self.check_plugins_online()
self.helpers.run_ostf()
self.OPENSTACK_TELEMETRY.check_ceilometer_sample_functionality()
@ -75,8 +87,7 @@ class TestOpenstackTelemetry(api.ToolchainApi):
self._deploy_telemetry_plugin("deploy_openstack_telemetry")
@test(depends_on_groups=["prepare_slaves_5"],
groups=["openstack_telemetry_event_functional",
"deploy_openstack_telemetry", "functional"])
groups=["openstack_telemetry_event_functional", "functional"])
@log_snapshot_after_test
def openstack_telemetry_event_functional(self):
"""Deploy an environment with Openstack-Telemetry plugin with
@ -114,8 +125,7 @@ class TestOpenstackTelemetry(api.ToolchainApi):
)
@test(depends_on_groups=["prepare_slaves_5"],
groups=["openstack_telemetry_resource_functional",
"deploy_openstack_telemetry", "functional"])
groups=["openstack_telemetry_resource_functional", "functional"])
@log_snapshot_after_test
def openstack_telemetry_resource_functional(self):
"""Deploy an environment with Openstack-Telemetry plugin with
@ -128,13 +138,13 @@ class TestOpenstackTelemetry(api.ToolchainApi):
4. Add 3 nodes with controller role
5. Add 1 nodes with compute and cinder roles
6. Add 1 nodes with elasticsearch_kibana and influxdb_grafana roles
9. Enable Ceilometer Resource API
10. Deploy the cluster
11. Check that plugins are running
12. Run OSTF
13. Check Ceilometer Sample API
14. Check Ceilometer Alarm API
15. Check Ceilometer Resource API
7. Enable Ceilometer Resource API
8. Deploy the cluster
9. Check that plugins are running
10. Run OSTF
11. Check Ceilometer Sample API
12. Check Ceilometer Alarm API
13. Check Ceilometer Resource API
Duration 90m
"""
@ -154,8 +164,7 @@ class TestOpenstackTelemetry(api.ToolchainApi):
)
@test(depends_on_groups=["prepare_slaves_5"],
groups=["openstack_telemetry_all_functional",
"deploy_openstack_telemetry", "functional"])
groups=["openstack_telemetry_all_functional", "functional"])
@log_snapshot_after_test
def openstack_telemetry_full_functional(self):
"""Deploy an environment with Openstack-Telemetry plugin with
@ -168,14 +177,14 @@ class TestOpenstackTelemetry(api.ToolchainApi):
4. Add 3 nodes with controller role
5. Add 1 nodes with compute and cinder roles
6. Add 1 nodes with elasticsearch_kibana and influxdb_grafana roles
9. Enable Ceilometer Event and Resource API
10. Deploy the cluster
11. Check that plugins are running
12. Run OSTF
13. Check Ceilometer Sample API
14. Check Ceilometer Alarm API
15. Check Ceilometer Event API
16. Check Ceilometer Resource API
7. Enable Ceilometer Event and Resource API
8. Deploy the cluster
9. Check that plugins are running
10. Run OSTF
11. Check Ceilometer Sample API
12. Check Ceilometer Alarm API
13. Check Ceilometer Event API
14. Check Ceilometer Resource API
Duration 90m
"""
@ -195,3 +204,166 @@ class TestOpenstackTelemetry(api.ToolchainApi):
additional_tests=additional_tests,
advanced_options=options
)
@test(depends_on_groups=['prepare_slaves_5'],
groups=["deploy_openstack_telemetry_kafka", "deploy", "smoke"])
@log_snapshot_after_test
def deploy_openstack_telemetry_kafka(self):
"""Deploy an environment with Openstack-Telemetry plugin
with Elasticsearch and InfluxDB backends and Kafka plugin.
1. Upload the Openstack-Telemetry, Elasticsearch-Kibana, Kafka and
InfluxDB-Grafana plugins to the master node
2. Install the plugins
3. Create the cluster
4. Add 3 nodes with controller and kafka roles
5. Add 1 node with compute and cinder roles
6. Add 1 node with elasticsearch_kibana and influxdb_grafana roles
7. Deploy the cluster
8. Check that plugins are running
9. Run OSTF
10. Check Ceilometer Sample API
11. Check Ceilometer Alarm API
Duration 90m
"""
self._deploy_telemetry_plugin(
"deploy_openstack_telemetry_kafka",
additional_plugins=self.KAFKA,
roles=self.kafka_roles
)
@test(depends_on_groups=['prepare_slaves_5'],
groups=["deploy_telemetry_kafka_resource_api", "deploy", "smoke"])
@log_snapshot_after_test
def deploy_telemetry_kafka_resource_api(self):
"""Deploy an environment with Openstack-Telemetry and Kafka plugins
with Elasticsearch and InfluxDB backends and enabled Ceilometer
Resource API.
1. Upload the Openstack-Telemetry, Elasticsearch-Kibana, Kafka and
InfluxDB-Grafana plugins to the master node
2. Install the plugins
3. Create the cluster
4. Add 3 nodes with controller and kafka roles
5. Add 1 node with compute and cinder roles
6. Add 1 node with elasticsearch_kibana and influxdb_grafana roles
7. Enable Ceilometer Resource API
8. Deploy the cluster
9. Check that plugins are running
10. Run OSTF
11. Check Ceilometer Sample API
12. Check Ceilometer Alarm API
13. Check Ceilometer Resource API
Duration 90m
"""
additional_tests = (
self.OPENSTACK_TELEMETRY.check_ceilometer_resource_functionality,
)
options = {
"advanced_settings/value": True,
"resource_api/value": True,
}
self._deploy_telemetry_plugin(
"deploy_telemetry_kafka_resource_api",
additional_tests=additional_tests,
advanced_options=options,
additional_plugins=self.KAFKA,
roles=self.kafka_roles
)
@test(depends_on_groups=['prepare_slaves_5'],
groups=["deploy_telemetry_kafka_event_api", "deploy", "smoke"])
@log_snapshot_after_test
def deploy_telemetry_kafka_event_api(self):
"""Deploy an environment with Openstack-Telemetry and Kafka plugins
with Elasticsearch and InfluxDB backends and enabled Ceilometer
Event API.
1. Upload the Openstack-Telemetry, Elasticsearch-Kibana, Kafka and
InfluxDB-Grafana plugins to the master node
2. Install the plugins
3. Create the cluster
4. Add 3 nodes with controller and kafka roles
5. Add 1 node with compute and cinder roles
6. Add 1 node with elasticsearch_kibana and influxdb_grafana roles
7. Enable Ceilometer Event API
8. Deploy the cluster
9. Check that plugins are running
10. Run OSTF
11. Check Ceilometer Sample API
12. Check Ceilometer Alarm API
13. Check Ceilometer Event API
Duration 90m
"""
additional_tests = (
self.OPENSTACK_TELEMETRY.check_ceilometer_event_functionality,
)
options = {
"advanced_settings/value": True,
"event_api/value": True,
}
self._deploy_telemetry_plugin(
"deploy_telemetry_kafka_event_api",
additional_tests=additional_tests,
advanced_options=options,
additional_plugins=self.KAFKA,
roles=self.kafka_roles
)
@test(depends_on_groups=['prepare_slaves_5'],
groups=["deploy_telemetry_kafka_resource_event_api", "deploy",
"smoke"])
@log_snapshot_after_test
def deploy_telemetry_kafka_resource_event_api(self):
"""Deploy an environment with Openstack-Telemetry and Kafka plugins
with Elasticsearch and InfluxDB backends and enabled Ceilometer
Resource and Event API.
1. Upload the Openstack-Telemetry, Elasticsearch-Kibana, Kafka and
InfluxDB-Grafana plugins to the master node
2. Install the plugins
3. Create the cluster
4. Add 3 nodes with controller and kafka roles
5. Add 1 node with compute and cinder roles
6. Add 1 node with elasticsearch_kibana and influxdb_grafana roles
7. Enable Ceilometer Resource API
8. Enable Ceilometer Event API
9. Deploy the cluster
10. Check that plugins are running
11. Run OSTF
12. Check Ceilometer Sample API
13. Check Ceilometer Alarm API
14. Check Ceilometer Resource API
15. Check Ceilometer Event API
Duration 90m
"""
additional_tests = (
self.OPENSTACK_TELEMETRY.check_ceilometer_event_functionality,
self.OPENSTACK_TELEMETRY.check_ceilometer_resource_functionality,
)
options = {
"advanced_settings/value": True,
"event_api/value": True,
"resource_api/value": True,
}
self._deploy_telemetry_plugin(
"deploy_telemetry_kafka_resource_event_api",
additional_tests=additional_tests,
advanced_options=options,
additional_plugins=self.KAFKA,
roles=self.kafka_roles
)