[Magnum] Context and scenario for Kubernetes

Context and scenario to create pods and RCs

In this scenario, a Kubernetes cluster is created and
pods and replication controllers are launched.  These
will in turn launch docker instances.  The manifest
for the pods and replication controllers specifies
the docker image to be downloaded and used in the
containers.

The sample files will create nginx containers.

This scenario is intended to test the performance
of the Kubernetes as provisioned and configured by
Magnum.  There are many ways to configure the
cluster, therefore it would be helpful to detect
if any configuration can be tuned for better
performance.

Partially-Implements: blueprint benchmark-scenarios-for-magnum
Co-Authored-By: Ton Ngo <ton@us.ibm.com>
Change-Id: I3284f44ecce1f6b30087ad380b72da9ac41f21ce
This commit is contained in:
Winnie Tsang 2016-05-27 00:07:44 +00:00 committed by Mathieu Velten
parent dc2c7e49ce
commit f0786c82e9
18 changed files with 1475 additions and 2 deletions

View File

@ -28,4 +28,18 @@ OPTS = {"benchmark": [
default=1.0, default=1.0,
help="Time interval(in sec) between checks when waiting for " help="Time interval(in sec) between checks when waiting for "
"cluster creation."), "cluster creation."),
cfg.FloatOpt("k8s_pod_create_timeout",
default=600.0,
help="Time(in sec) to wait for k8s pod to be created."),
cfg.FloatOpt("k8s_pod_create_poll_interval",
default=1.0,
help="Time interval(in sec) between checks when waiting for "
"k8s pod creation."),
cfg.FloatOpt("k8s_rc_create_timeout",
default=600.0,
help="Time(in sec) to wait for k8s rc to be created."),
cfg.FloatOpt("k8s_rc_create_poll_interval",
default=1.0,
help="Time interval(in sec) between checks when waiting for "
"k8s rc creation."),
]} ]}

View File

@ -0,0 +1,136 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography import x509
from cryptography.x509 import oid
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils as rutils
from rally import consts
from rally.plugins.openstack.scenarios.magnum import utils as magnum_utils
from rally.task import context
LOG = logging.getLogger(__name__)
@context.configure(name="ca_certs", order=490)
class CaCertGenerator(context.Context):
"""Context class for generating temporary ca cert for benchmarks."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"directory": {
"type": "string",
}
},
"additionalProperties": False
}
def _generate_csr_and_key(self):
"""Return a dict with a new csr and key."""
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=backends.default_backend())
csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([
x509.NameAttribute(oid.NameOID.COMMON_NAME, u"Magnum User"),
])).sign(key, hashes.SHA256(), backends.default_backend())
result = {
"csr": csr.public_bytes(encoding=serialization.Encoding.PEM),
"key": key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()),
}
return result
@logging.log_task_wrapper(LOG.info, _("Enter context: `Ca Cert`"))
def setup(self):
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
magnum_scenario = magnum_utils.MagnumScenario({
"user": user,
"task": self.context["task"],
"config": {"api_versions": self.context["config"].get(
"api_versions", [])}
})
# get the cluster and cluster_template
cluster_uuid = str(self.context["tenants"][tenant_id]["cluster"])
cluster = magnum_scenario._get_cluster(cluster_uuid)
cluster_template = magnum_scenario._get_cluster_template(
cluster.cluster_template_id)
if not cluster_template.tls_disabled:
tls = self._generate_csr_and_key()
dir = ""
if self.config.get("directory") is not None:
dir = self.config.get("directory")
self.context["ca_certs_directory"] = dir
fname = os.path.join(dir, cluster_uuid + ".key")
with open(fname, "w") as key_file:
key_file.write(tls["key"])
# get CA certificate for this cluster
ca_cert = magnum_scenario._get_ca_certificate(cluster_uuid)
fname = os.path.join(dir, cluster_uuid + "_ca.crt")
with open(fname, "w") as ca_cert_file:
ca_cert_file.write(ca_cert.pem)
# send csr to Magnum to have it signed
csr_req = {"cluster_uuid": cluster_uuid,
"csr": tls["csr"]}
cert = magnum_scenario._create_ca_certificate(csr_req)
fname = os.path.join(dir, cluster_uuid + ".crt")
with open(fname, "w") as cert_file:
cert_file.write(cert.pem)
@logging.log_task_wrapper(LOG.info, _("Exit context: `Ca Cert`"))
def cleanup(self):
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
magnum_scenario = magnum_utils.MagnumScenario({
"user": user,
"task": self.context["task"],
"config": {"api_versions": self.context["config"].get(
"api_versions", [])}
})
# get the cluster and cluster_template
cluster_uuid = str(self.context["tenants"][tenant_id]["cluster"])
cluster = magnum_scenario._get_cluster(cluster_uuid)
cluster_template = magnum_scenario._get_cluster_template(
cluster.cluster_template_id)
if not cluster_template.tls_disabled:
dir = self.context["ca_certs_directory"]
fname = os.path.join(dir, cluster_uuid + ".key")
os.remove(fname)
fname = os.path.join(dir, cluster_uuid + "_ca.crt")
os.remove(fname)
fname = os.path.join(dir, cluster_uuid + ".crt")
os.remove(fname)

View File

@ -0,0 +1,73 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.magnum import utils
from rally.task import validation
"""Scenarios for Kubernetes pods and rcs."""
@validation.required_services(consts.Service.MAGNUM)
@validation.required_openstack(users=True)
@scenario.configure(name="K8sPods.list_pods")
class ListPods(utils.MagnumScenario):
def run(self):
"""List all pods.
"""
self._list_v1pods()
@validation.required_services(consts.Service.MAGNUM)
@validation.required_openstack(users=True)
@scenario.configure(name="K8sPods.create_pods")
class CreatePods(utils.MagnumScenario):
def run(self, manifests):
"""create pods and wait for them to be ready.
:param manifests: manifest files used to create the pods
"""
for manifest in manifests:
with open(manifest, "r") as f:
manifest_str = f.read()
manifest = yaml.load(manifest_str)
pod = self._create_v1pod(manifest)
msg = ("Pod isn't created")
self.assertTrue(pod, err_msg=msg)
@validation.required_services(consts.Service.MAGNUM)
@validation.required_openstack(users=True)
@scenario.configure(name="K8sPods.create_rcs")
class CreateRcs(utils.MagnumScenario):
def run(self, manifests):
"""create rcs and wait for them to be ready.
:param manifests: manifest files use to create the rcs
"""
for manifest in manifests:
with open(manifest, "r") as f:
manifest_str = f.read()
manifest = yaml.load(manifest_str)
rc = self._create_v1rc(manifest)
msg = ("RC isn't created")
self.assertTrue(rc, err_msg=msg)

View File

@ -12,14 +12,21 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import os
import random
import string
import time
import k8sclient.client as k8s_client
from oslo_config import cfg from oslo_config import cfg
from k8sclient.client.rest import ApiException
from rally.common import utils as common_utils from rally.common import utils as common_utils
from rally import exceptions
from rally.plugins.openstack import scenario from rally.plugins.openstack import scenario
from rally.task import atomic from rally.task import atomic
from rally.task import utils from rally.task import utils
CONF = cfg.CONF CONF = cfg.CONF
@ -58,11 +65,20 @@ class MagnumScenario(scenario.OpenStackScenario):
return self.clients("magnum").cluster_templates.create(**kwargs) return self.clients("magnum").cluster_templates.create(**kwargs)
@atomic.action_timer("magnum.get_cluster_template")
def _get_cluster_template(self, cluster_template):
"""Return details of the specify cluster template.
:param cluster_template: ID or name of the cluster template to show
:returns: clustertemplate detail
"""
return self.clients("magnum").cluster_templates.get(cluster_template)
@atomic.action_timer("magnum.list_clusters") @atomic.action_timer("magnum.list_clusters")
def _list_clusters(self, limit=None, **kwargs): def _list_clusters(self, limit=None, **kwargs):
"""Return list of clusters. """Return list of clusters.
:param limit: (Optional) the maximum number of results to return :param limit: Optional, the maximum number of results to return
per request, if: per request, if:
1) limit > 0, the maximum number of clusters to return. 1) limit > 0, the maximum number of clusters to return.
@ -101,3 +117,147 @@ class MagnumScenario(scenario.OpenStackScenario):
id_attr="uuid" id_attr="uuid"
) )
return cluster return cluster
@atomic.action_timer("magnum.get_cluster")
def _get_cluster(self, cluster):
"""Return details of the specify cluster.
:param cluster: ID or name of the cluster to show
:returns: cluster detail
"""
return self.clients("magnum").clusters.get(cluster)
@atomic.action_timer("magnum.get_ca_certificate")
def _get_ca_certificate(self, cluster_uuid):
"""Get CA certificate for this cluster
:param cluster_uuid: uuid of the cluster
"""
return self.clients("magnum").certificates.get(cluster_uuid)
@atomic.action_timer("magnum.create_ca_certificate")
def _create_ca_certificate(self, csr_req):
"""Send csr to Magnum to have it signed
:param csr_req: {"cluster_uuid": <uuid>, "csr": <csr file content>}
"""
return self.clients("magnum").certificates.create(**csr_req)
def _get_k8s_api_client(self):
cluster_uuid = self.context["tenant"]["cluster"]
cluster = self._get_cluster(cluster_uuid)
cluster_template = self._get_cluster_template(
cluster.cluster_template_id)
key_file = None
cert_file = None
ca_certs = None
if not cluster_template.tls_disabled:
dir = self.context["ca_certs_directory"]
key_file = cluster_uuid + ".key"
key_file = os.path.join(dir, key_file)
cert_file = cluster_uuid + ".crt"
cert_file = os.path.join(dir, cert_file)
ca_certs = cluster_uuid + "_ca.crt"
ca_certs = os.path.join(dir, ca_certs)
client = k8s_client.api_client.ApiClient(
cluster.api_address,
key_file=key_file,
cert_file=cert_file,
ca_certs=ca_certs)
return k8s_client.apis.apiv_api.ApivApi(client)
@atomic.action_timer("magnum.k8s_list_v1pods")
def _list_v1pods(self):
"""List all pods.
"""
k8s_api = self._get_k8s_api_client()
return k8s_api.list_namespaced_pod(namespace="default")
@atomic.action_timer("magnum.k8s_create_v1pod")
def _create_v1pod(self, manifest):
"""Create a pod on the specify cluster.
:param manifest: manifest use to create the pod
"""
k8s_api = self._get_k8s_api_client()
podname = manifest["metadata"]["name"] + "-"
for i in range(5):
podname = podname + random.choice(string.ascii_lowercase)
manifest["metadata"]["name"] = podname
for i in range(150):
try:
k8s_api.create_namespaced_pod(body=manifest,
namespace="default")
break
except ApiException as e:
if e.status != 403:
raise
time.sleep(2)
start = time.time()
while True:
resp = k8s_api.read_namespaced_pod(
name=podname, namespace="default")
if resp.status.conditions:
for condition in resp.status.conditions:
if condition.type.lower() == "ready" and \
condition.status.lower() == "true":
return resp
if (time.time() - start
> CONF.benchmark.k8s_pod_create_timeout):
raise exceptions.TimeoutException(
desired_status="Ready",
resource_name=podname,
resource_type="Pod",
resource_id=resp.metadata.uid,
resource_status=resp.status)
common_utils.interruptable_sleep(
CONF.benchmark.k8s_pod_create_poll_interval)
@atomic.action_timer("magnum.k8s_list_v1rcs")
def _list_v1rcs(self):
"""List all rcs.
"""
k8s_api = self._get_k8s_api_client()
return k8s_api.list_namespaced_replication_controller(
namespace="default")
@atomic.action_timer("magnum.k8s_create_v1rc")
def _create_v1rc(self, manifest):
"""Create rc on the specify cluster.
:param manifest: manifest use to create the replication controller
"""
k8s_api = self._get_k8s_api_client()
suffix = "-"
for i in range(5):
suffix = suffix + random.choice(string.ascii_lowercase)
rcname = manifest["metadata"]["name"] + suffix
manifest["metadata"]["name"] = rcname
resp = k8s_api.create_namespaced_replication_controller(
body=manifest,
namespace="default")
expectd_status = resp.spec.replicas
start = time.time()
while True:
resp = k8s_api.read_namespaced_replication_controller(
name=rcname,
namespace="default")
status = resp.status.replicas
if status == expectd_status:
return resp
else:
if time.time() - start > CONF.benchmark.k8s_rc_create_timeout:
raise exceptions.TimeoutException(
desired_status=expectd_status,
resource_name=rcname,
resource_type="ReplicationController",
resource_id=resp.metadata.uid,
resource_status=status)
common_utils.interruptable_sleep(
CONF.benchmark.k8s_rc_create_poll_interval)

View File

@ -50,3 +50,4 @@ python-swiftclient>=3.2.0,<=3.3.0 # Apache Software License
python-troveclient>=2.2.0,<=2.9.0 # Apache Software License python-troveclient>=2.2.0,<=2.9.0 # Apache Software License
python-watcherclient>=0.23.0,<=1.1.0 # Apache Software License python-watcherclient>=0.23.0,<=1.1.0 # Apache Software License
python-zaqarclient>=1.0.0,<=1.5.0 # Apache Software License python-zaqarclient>=1.0.0,<=1.5.0 # Apache Software License
python-k8sclient>=0.2.0 # Apache Software License

View File

@ -0,0 +1,66 @@
{
"Dummy.openstack": [
{
"args": {
"sleep": 0.1
},
"runner": {
"type": "constant",
"concurrency": 1,
"times": 1
},
"context": {
"ca_certs": {
"directory": "/home/stack"
},
"clusters": {
"node_count": 2
},
"cluster_templates": {
"dns_nameserver": "8.8.8.8",
"external_network_id": "public",
"flavor_id": "m1.small",
"docker_volume_size": 5,
"coe": "kubernetes",
"image_id": "fedora-atomic-latest",
"network_driver": "flannel"
},
"users": {
"users_per_tenant": 1,
"tenants": 1
}
}
},
{
"args": {
"sleep": 0.1
},
"runner": {
"type": "constant",
"concurrency": 1,
"times": 1
},
"context": {
"ca_certs": {
"directory": "/home/stack"
},
"clusters": {
"node_count": 2
},
"cluster_templates": {
"dns_nameserver": "8.8.8.8",
"external_network_id": "public",
"flavor_id": "m1.small",
"docker_volume_size": 5,
"coe": "swarm",
"image_id": "fedora-atomic-latest",
"network_driver": "docker"
},
"users": {
"users_per_tenant": 1,
"tenants": 1
}
}
}
]
}

View File

@ -0,0 +1,48 @@
---
Dummy.openstack:
-
args:
sleep: 0.1
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
cluster_templates:
image_id: "fedora-atomic-latest"
external_network_id: "public"
dns_nameserver: "8.8.8.8"
flavor_id: "m1.small"
docker_volume_size: 5
network_driver: "flannel"
coe: "kubernetes"
clusters:
node_count: 2
ca_certs:
directory: "/home/stack"
-
args:
sleep: 0.1
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
cluster_templates:
image_id: "fedora-atomic-latest"
external_network_id: "public"
dns_nameserver: "8.8.8.8"
flavor_id: "m1.small"
docker_volume_size: 5
network_driver: "docker"
coe: "swarm"
clusters:
node_count: 2
ca_certs:
directory: "/home/stack"

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-1
labels:
app: nginx-1
spec:
containers:
- name: nginx-1
image: nginx
ports:
- containerPort: 80

View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx-controller
spec:
replicas: 2
# selector identifies the set of pods that this
# replication controller is responsible for managing
selector:
name: nginx
# template defines the 'cookie cutter' used for creating
# new pods when necessary
template:
metadata:
labels:
# Important: these labels need to match the selector above
# The api server enforces this constraint.
name: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80

View File

@ -0,0 +1,64 @@
{
"K8sPods.create_pods": [
{
"runner": {
"type": "constant",
"concurrency": 1,
"times": 1
},
"args": {
"manifests": ["artifacts/nginx.yaml.k8s"]
},
"context": {
"users": {
"users_per_tenant": 1,
"tenants": 1
},
"cluster_templates": {
"docker_volume_size": 5,
"coe": "kubernetes",
"image_id": "fedora-atomic-latest",
"dns_nameserver": "8.8.8.8",
"external_network_id": "public",
"flavor_id": "m1.small",
"network_driver": "flannel"
},
"clusters": {
"node_count": 2
},
"ca_certs": {
"directory": "/home/stack"
}
}
},
{
"runner": {
"type": "constant",
"concurrency": 1,
"times": 1
},
"args": {
"manifests": ["artifacts/nginx.yaml.k8s"]
},
"context": {
"users": {
"users_per_tenant": 1,
"tenants": 1
},
"cluster_templates": {
"docker_volume_size": 5,
"coe": "kubernetes",
"image_id": "fedora-atomic-latest",
"dns_nameserver": "8.8.8.8",
"external_network_id": "public",
"flavor_id": "m1.small",
"network_driver": "flannel",
"tls_disabled": true
},
"clusters": {
"node_count": 2
}
}
}
]
}

View File

@ -0,0 +1,47 @@
---
K8sPods.create_pods:
-
args:
manifests: ["artifacts/nginx.yaml.k8s"]
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
cluster_templates:
image_id: "fedora-atomic-latest"
external_network_id: "public"
dns_nameserver: "8.8.8.8"
flavor_id: "m1.small"
docker_volume_size: 5
network_driver: "flannel"
coe: "kubernetes"
clusters:
node_count: 2
ca_certs:
directory: "/home/stack"
-
args:
manifests: ["artifacts/nginx.yaml.k8s"]
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
cluster_templates:
image_id: "fedora-atomic-latest"
external_network_id: "public"
dns_nameserver: "8.8.8.8"
flavor_id: "m1.small"
docker_volume_size: 5
network_driver: "flannel"
coe: "kubernetes"
tls_disabled: True
clusters:
node_count: 2

View File

@ -0,0 +1,64 @@
{
"K8sPods.create_rcs": [
{
"runner": {
"type": "constant",
"concurrency": 1,
"times": 1
},
"args": {
"manifests": ["artifacts/rc_nginx.yaml.k8s"]
},
"context": {
"users": {
"users_per_tenant": 1,
"tenants": 1
},
"cluster_templates": {
"docker_volume_size": 5,
"coe": "kubernetes",
"image_id": "fedora-atomic-latest",
"dns_nameserver": "8.8.8.8",
"external_network_id": "public",
"flavor_id": "m1.small",
"network_driver": "flannel"
},
"clusters": {
"node_count": 2
},
"ca_certs": {
"directory": "/home/stack"
}
}
},
{
"runner": {
"type": "constant",
"concurrency": 1,
"times": 1
},
"args": {
"manifests": ["artifacts/rc_nginx.yaml.k8s"]
},
"context": {
"users": {
"users_per_tenant": 1,
"tenants": 1
},
"cluster_templates": {
"docker_volume_size": 5,
"coe": "kubernetes",
"image_id": "fedora-atomic-latest",
"dns_nameserver": "8.8.8.8",
"external_network_id": "public",
"flavor_id": "m1.small",
"network_driver": "flannel",
"tls_disabled": true
},
"clusters": {
"node_count": 2
}
}
}
]
}

View File

@ -0,0 +1,47 @@
---
K8sPods.create_rcs:
-
args:
manifests: ["artifacts/rc_nginx.yaml.k8s"]
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
cluster_templates:
image_id: "fedora-atomic-latest"
external_network_id: "public"
dns_nameserver: "8.8.8.8"
flavor_id: "m1.small"
docker_volume_size: 5
network_driver: "flannel"
coe: "kubernetes"
clusters:
node_count: 2
ca_certs:
directory: "/home/stack"
-
args:
manifests: ["artifacts/rc_nginx.yaml.k8s"]
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
cluster_templates:
image_id: "fedora-atomic-latest"
external_network_id: "public"
dns_nameserver: "8.8.8.8"
flavor_id: "m1.small"
docker_volume_size: 5
network_driver: "flannel"
coe: "kubernetes"
tls_disabled: True
clusters:
node_count: 2

View File

@ -0,0 +1,58 @@
{
"K8sPods.list_pods": [
{
"runner": {
"type": "constant",
"concurrency": 1,
"times": 1
},
"context": {
"users": {
"users_per_tenant": 1,
"tenants": 1
},
"cluster_templates": {
"docker_volume_size": 5,
"coe": "kubernetes",
"image_id": "fedora-atomic-latest",
"dns_nameserver": "8.8.8.8",
"external_network_id": "public",
"flavor_id": "m1.small",
"network_driver": "flannel"
},
"clusters": {
"node_count": 2
},
"ca_certs": {
"directory": ""
}
}
},
{
"runner": {
"type": "constant",
"concurrency": 1,
"times": 1
},
"context": {
"users": {
"users_per_tenant": 1,
"tenants": 1
},
"cluster_templates": {
"docker_volume_size": 5,
"coe": "kubernetes",
"image_id": "fedora-atomic-latest",
"dns_nameserver": "8.8.8.8",
"external_network_id": "public",
"flavor_id": "m1.small",
"network_driver": "flannel",
"tls_disabled": true
},
"clusters": {
"node_count": 2
}
}
}
]
}

View File

@ -0,0 +1,43 @@
---
K8sPods.list_pods:
-
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
cluster_templates:
image_id: "fedora-atomic-latest"
external_network_id: "public"
dns_nameserver: "8.8.8.8"
flavor_id: "m1.small"
docker_volume_size: 5
network_driver: "flannel"
coe: "kubernetes"
clusters:
node_count: 2
ca_certs:
directory: ""
-
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
cluster_templates:
image_id: "fedora-atomic-latest"
external_network_id: "public"
dns_nameserver: "8.8.8.8"
flavor_id: "m1.small"
docker_volume_size: 5
network_driver: "flannel"
coe: "kubernetes"
tls_disabled: True
clusters:
node_count: 2

View File

@ -0,0 +1,248 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.context.magnum import ca_certs
from tests.unit import test
CTX = "rally.plugins.openstack.context.magnum"
SCN = "rally.plugins.openstack.scenarios"
class CaCertsGeneratorTestCase(test.ScenarioTestCase):
def _gen_tenants(self, count):
tenants = {}
for id_ in range(count):
tenants[str(id_)] = {"name": str(id_)}
tenants[str(id_)]["cluster"] = "rally_cluster_uuid"
return tenants
def test__generate_csr_and_key(self):
ca_cert_ctx = ca_certs.CaCertGenerator(self.context)
result = ca_cert_ctx._generate_csr_and_key()
assert result["csr"] is not None
assert result["key"] is not None
@mock.patch("%s.magnum.utils.MagnumScenario._create_ca_certificate" % SCN)
@mock.patch("%s.magnum.utils.MagnumScenario._get_ca_certificate" % SCN)
@mock.patch("%s.ca_certs.open" % CTX, side_effect=mock.mock_open(),
create=True)
@mock.patch("%s.ca_certs.CaCertGenerator._generate_csr_and_key"
% CTX)
@mock.patch("%s.magnum.utils.MagnumScenario._get_cluster_template" % SCN)
@mock.patch("%s.magnum.utils.MagnumScenario._get_cluster" % SCN,
return_value=mock.Mock())
def test_setup(self, mock_magnum_scenario__get_cluster,
mock_magnum_scenario__get_cluster_template,
mock_ca_cert_generator__generate_csr_and_key,
mock_open,
mock_magnum_scenario__get_ca_certificate,
mock_magnum_scenario__create_ca_certificate):
tenants_count = 2
users_per_tenant = 5
tenants = self._gen_tenants(tenants_count)
users = []
for ten_id in tenants:
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": ten_id,
"credential": mock.MagicMock()})
self.context.update({
"config": {
"users": {
"tenants": tenants_count,
"users_per_tenant": users_per_tenant,
"concurrent": 10,
},
"clusters": {
"cluster_template_uuid": "123456789",
"node_count": 2
},
"ca_certs": {
"directory": ""
}
},
"users": users,
"tenants": tenants
})
fake_ct = mock.Mock()
fake_ct.tls_disabled = False
mock_magnum_scenario__get_cluster_template.return_value = fake_ct
fake_tls = {"csr": "fake_csr", "key": "fake_key"}
mock_ca_cert_generator__generate_csr_and_key.return_value = fake_tls
fake_ca_cert = mock.Mock()
fake_ca_cert.pem = "fake_ca_cert"
mock_magnum_scenario__get_ca_certificate.return_value = fake_ca_cert
fake_cert = mock.Mock()
fake_cert.pem = "fake_cert"
mock_magnum_scenario__create_ca_certificate.return_value = fake_cert
ca_cert_ctx = ca_certs.CaCertGenerator(self.context)
ca_cert_ctx.setup()
mock_cluster = mock_magnum_scenario__get_cluster.return_value
mock_calls = [mock.call(mock_cluster.cluster_template_id)
for i in range(tenants_count)]
mock_magnum_scenario__get_cluster_template.assert_has_calls(
mock_calls)
mock_calls = [mock.call("rally_cluster_uuid")
for i in range(tenants_count)]
mock_magnum_scenario__get_cluster.assert_has_calls(mock_calls)
mock_magnum_scenario__get_ca_certificate.assert_has_calls(mock_calls)
fake_csr_req = {"cluster_uuid": "rally_cluster_uuid",
"csr": fake_tls["csr"]}
mock_calls = [mock.call(fake_csr_req)
for i in range(tenants_count)]
mock_magnum_scenario__create_ca_certificate.assert_has_calls(
mock_calls)
@mock.patch("%s.magnum.utils.MagnumScenario._create_ca_certificate" % SCN)
@mock.patch("%s.magnum.utils.MagnumScenario._get_ca_certificate" % SCN)
@mock.patch("%s.magnum.utils.MagnumScenario._get_cluster_template" % SCN)
@mock.patch("%s.magnum.utils.MagnumScenario._get_cluster" % SCN,
return_value=mock.Mock())
def test_tls_disabled_setup(self, mock_magnum_scenario__get_cluster,
mock_magnum_scenario__get_cluster_template,
mock_magnum_scenario__get_ca_certificate,
mock_magnum_scenario__create_ca_certificate):
tenants_count = 2
users_per_tenant = 5
tenants = self._gen_tenants(tenants_count)
users = []
for ten_id in tenants:
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": ten_id,
"credential": mock.MagicMock()})
self.context.update({
"config": {
"users": {
"tenants": tenants_count,
"users_per_tenant": users_per_tenant,
"concurrent": 10,
},
"clusters": {
"cluster_template_uuid": "123456789",
"node_count": 2
},
"ca_certs": {
"directory": ""
}
},
"users": users,
"tenants": tenants
})
fake_ct = mock.Mock()
fake_ct.tls_disabled = True
mock_magnum_scenario__get_cluster_template.return_value = fake_ct
ca_cert_ctx = ca_certs.CaCertGenerator(self.context)
ca_cert_ctx.setup()
mock_cluster = mock_magnum_scenario__get_cluster.return_value
mock_calls = [mock.call(mock_cluster.cluster_template_id)
for i in range(tenants_count)]
mock_magnum_scenario__get_cluster_template.assert_has_calls(
mock_calls)
mock_calls = [mock.call("rally_cluster_uuid")
for i in range(tenants_count)]
mock_magnum_scenario__get_cluster.assert_has_calls(mock_calls)
mock_magnum_scenario__get_ca_certificate.assert_not_called()
mock_magnum_scenario__create_ca_certificate.assert_not_called()
@mock.patch("os.remove", return_value=mock.Mock())
@mock.patch("os.path.join", return_value=mock.Mock())
@mock.patch("%s.magnum.utils.MagnumScenario._get_cluster_template" % SCN)
@mock.patch("%s.magnum.utils.MagnumScenario._get_cluster" % SCN,
return_value=mock.Mock())
def test_cleanup(self, mock_magnum_scenario__get_cluster,
mock_magnum_scenario__get_cluster_template,
mock_os_path_join, mock_os_remove):
tenants_count = 2
users_per_tenant = 5
tenants = self._gen_tenants(tenants_count)
users = []
for ten_id in tenants:
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": ten_id,
"credential": mock.MagicMock()})
self.context.update({
"config": {
},
"ca_certs_directory": "",
"users": users,
"tenants": tenants
})
fake_ct = mock.Mock()
fake_ct.tls_disabled = False
mock_magnum_scenario__get_cluster_template.return_value = fake_ct
ca_cert_ctx = ca_certs.CaCertGenerator(self.context)
ca_cert_ctx.cleanup()
cluster_uuid = "rally_cluster_uuid"
dir = self.context["ca_certs_directory"]
mock_os_path_join.assert_has_calls(dir, cluster_uuid.__add__(".key"))
mock_os_path_join.assert_has_calls(
dir, cluster_uuid.__add__("_ca.crt"))
mock_os_path_join.assert_has_calls(dir, cluster_uuid.__add__(".crt"))
@mock.patch("os.remove", return_value=mock.Mock())
@mock.patch("os.path.join", return_value=mock.Mock())
@mock.patch("%s.magnum.utils.MagnumScenario._get_cluster_template" % SCN)
@mock.patch("%s.magnum.utils.MagnumScenario._get_cluster" % SCN,
return_value=mock.Mock())
def test_tls_disabled_cleanup(self, mock_magnum_scenario__get_cluster,
mock_magnum_scenario__get_cluster_template,
mock_os_path_join, mock_os_remove):
tenants_count = 2
users_per_tenant = 5
tenants = self._gen_tenants(tenants_count)
users = []
for ten_id in tenants:
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": ten_id,
"credential": mock.MagicMock()})
self.context.update({
"config": {
},
"ca_certs_directory": "",
"users": users,
"tenants": tenants
})
fake_ct = mock.Mock()
fake_ct.tls_disabled = True
mock_magnum_scenario__get_cluster_template.return_value = fake_ct
ca_cert_ctx = ca_certs.CaCertGenerator(self.context)
ca_cert_ctx.cleanup()
mock_os_path_join.assert_not_called()
mock_os_remove.assert_not_called()

View File

@ -0,0 +1,104 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally import exceptions
from rally.plugins.openstack.scenarios.magnum import k8s_pods
from tests.unit import test
@ddt.ddt
class K8sPodsTestCase(test.ScenarioTestCase):
def test_list_pods(self):
scenario = k8s_pods.ListPods()
scenario._list_v1pods = mock.Mock()
scenario.run()
scenario._list_v1pods.assert_called_once_with()
@ddt.data(["manifest.json"], ["manifest.yaml"])
def test_create_pods(self, manifests):
manifest = manifests[0]
scenario = k8s_pods.CreatePods()
file_content = "data: fake_content"
if manifest == "manifest.json":
file_content = "{\"data\": \"fake_content\"}"
file_mock = mock.mock_open(read_data=file_content)
fake_pod = mock.Mock()
scenario._create_v1pod = mock.MagicMock(return_value=fake_pod)
with mock.patch(
"rally.plugins.openstack.scenarios.magnum.k8s_pods.open",
file_mock, create=True) as m:
scenario.run(manifests)
m.assert_called_once_with(manifest, "r")
m.return_value.read.assert_called_once_with()
scenario._create_v1pod.assert_called_once_with(
{"data": "fake_content"})
# test error cases:
# 1. pod not created
scenario._create_v1pod = mock.MagicMock(return_value=None)
with mock.patch(
"rally.plugins.openstack.scenarios.magnum.k8s_pods.open",
file_mock, create=True) as m:
self.assertRaises(
exceptions.RallyAssertionError,
scenario.run, manifests)
m.assert_called_with(manifest, "r")
m.return_value.read.assert_called_with()
scenario._create_v1pod.assert_called_with(
{"data": "fake_content"})
@ddt.data(["manifest.json"], ["manifest.yaml"])
def test_create_rcs(self, manifests):
manifest = manifests[0]
scenario = k8s_pods.CreateRcs()
file_content = "data: fake_content"
if manifest == "manifest.json":
file_content = "{\"data\": \"fake_content\"}"
file_mock = mock.mock_open(read_data=file_content)
fake_rc = mock.Mock()
scenario._create_v1rc = mock.MagicMock(return_value=fake_rc)
with mock.patch(
"rally.plugins.openstack.scenarios.magnum.k8s_pods.open",
file_mock, create=True) as m:
scenario.run(manifests)
m.assert_called_once_with(manifest, "r")
m.return_value.read.assert_called_once_with()
scenario._create_v1rc.assert_called_once_with({"data": "fake_content"})
# test error cases:
# 1. rc not created
scenario._create_v1rc = mock.MagicMock(return_value=None)
with mock.patch(
"rally.plugins.openstack.scenarios.magnum.k8s_pods.open",
file_mock, create=True) as m:
self.assertRaises(
exceptions.RallyAssertionError,
scenario.run, manifests)
m.assert_called_with(manifest, "r")
m.return_value.read.assert_called_with()
scenario._create_v1rc.assert_called_with({"data": "fake_content"})

View File

@ -12,11 +12,18 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import os
import k8sclient.client as k8s_client
import mock import mock
from k8sclient.client.rest import ApiException
from rally import exceptions
from rally.plugins.openstack.scenarios.magnum import utils from rally.plugins.openstack.scenarios.magnum import utils
from tests.unit import test from tests.unit import test
MAGNUM_UTILS = "rally.plugins.openstack.scenarios.magnum.utils"
CONF = utils.CONF CONF = utils.CONF
@ -25,6 +32,7 @@ class MagnumScenarioTestCase(test.ScenarioTestCase):
super(MagnumScenarioTestCase, self).setUp() super(MagnumScenarioTestCase, self).setUp()
self.cluster_template = mock.Mock() self.cluster_template = mock.Mock()
self.cluster = mock.Mock() self.cluster = mock.Mock()
self.pod = mock.Mock()
self.scenario = utils.MagnumScenario(self.context) self.scenario = utils.MagnumScenario(self.context)
def test_list_cluster_templates(self): def test_list_cluster_templates(self):
@ -61,6 +69,15 @@ class MagnumScenarioTestCase(test.ScenarioTestCase):
self._test_atomic_action_timer(self.scenario.atomic_actions(), self._test_atomic_action_timer(self.scenario.atomic_actions(),
"magnum.create_cluster_template") "magnum.create_cluster_template")
def test_get_cluster_template(self):
client = self.clients("magnum")
client.cluster_templates.get.return_value = self.cluster_template
return_cluster_template = self.scenario._get_cluster_template("uuid")
client.cluster_templates.get.assert_called_once_with("uuid")
self.assertEqual(self.cluster_template, return_cluster_template)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.get_cluster_template")
def test_list_clusters(self): def test_list_clusters(self):
return_clusters_list = self.scenario._list_clusters(limit="foo1") return_clusters_list = self.scenario._list_clusters(limit="foo1")
client = self.clients("magnum") client = self.clients("magnum")
@ -92,3 +109,250 @@ class MagnumScenarioTestCase(test.ScenarioTestCase):
self.mock_wait_for_status.mock.return_value, return_cluster) self.mock_wait_for_status.mock.return_value, return_cluster)
self._test_atomic_action_timer( self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.create_cluster") self.scenario.atomic_actions(), "magnum.create_cluster")
def test_get_cluster(self):
self.clients("magnum").clusters.get.return_value = self.cluster
return_cluster = self.scenario._get_cluster("uuid")
self.clients("magnum").clusters.get.assert_called_once_with("uuid")
self.assertEqual(self.cluster, return_cluster)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.get_cluster")
def test_get_ca_certificate(self):
self.scenario._get_ca_certificate(self.cluster.uuid)
self.clients("magnum").certificates.get.assert_called_once_with(
self.cluster.uuid)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.get_ca_certificate")
def test_create_ca_certificate(self):
csr_req = {"cluster_uuid": "uuid", "csr": "csr file"}
self.scenario._create_ca_certificate(csr_req)
self.clients("magnum").certificates.create.assert_called_once_with(
**csr_req)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.create_ca_certificate")
@mock.patch("k8sclient.client.apis.apiv_api.ApivApi")
@mock.patch("k8sclient.client.api_client.ApiClient")
def test_get_k8s_api_client_using_tls(self, mock_api_client,
mock_apiv_api):
self.context.update({
"ca_certs_directory": "/home/stack",
"tenant": {
"id": "rally_tenant_id",
"cluster": "rally_cluster_uuid"
}
})
self.scenario = utils.MagnumScenario(self.context)
cluster_uuid = self.context["tenant"]["cluster"]
client = self.clients("magnum")
client.clusters.get.return_value = self.cluster
cluster = self.scenario._get_cluster(cluster_uuid)
self.cluster_template.tls_disabled = False
client.cluster_templates.get.return_value = self.cluster_template
dir = self.context["ca_certs_directory"]
key_file = os.path.join(dir, cluster_uuid.__add__(".key"))
cert_file = os.path.join(dir, cluster_uuid.__add__(".crt"))
ca_certs = os.path.join(dir, cluster_uuid.__add__("_ca.crt"))
k8s_client = mock_api_client.return_value
self.scenario._get_k8s_api_client()
mock_api_client.assert_called_once_with(
cluster.api_address,
key_file=key_file,
cert_file=cert_file,
ca_certs=ca_certs)
mock_apiv_api.assert_called_once_with(k8s_client)
@mock.patch("k8sclient.client.apis.apiv_api.ApivApi")
@mock.patch("k8sclient.client.api_client.ApiClient")
def test_get_k8s_api_client(self, mock_api_client,
mock_apiv_api):
self.context.update({
"tenant": {
"id": "rally_tenant_id",
"cluster": "rally_cluster_uuid"
}
})
self.scenario = utils.MagnumScenario(self.context)
cluster_uuid = self.context["tenant"]["cluster"]
client = self.clients("magnum")
client.clusters.get.return_value = self.cluster
cluster = self.scenario._get_cluster(cluster_uuid)
self.cluster_template.tls_disabled = True
client.cluster_templates.get.return_value = self.cluster_template
k8s_client = mock_api_client.return_value
self.scenario._get_k8s_api_client()
mock_api_client.assert_called_once_with(
cluster.api_address, key_file=None, cert_file=None, ca_certs=None)
mock_apiv_api.assert_called_once_with(k8s_client)
@mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client")
def test_list_v1pods(self, mock__get_k8s_api_client):
k8s_api = mock__get_k8s_api_client.return_value
self.scenario._list_v1pods()
k8s_api.list_namespaced_pod.assert_called_once_with(
namespace="default")
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.k8s_list_v1pods")
@mock.patch("random.choice")
@mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client")
def test_create_v1pod(self, mock__get_k8s_api_client,
mock_random_choice):
k8s_api = mock__get_k8s_api_client.return_value
manifest = (
{"apiVersion": "v1", "kind": "Pod",
"metadata": {"name": "nginx"}})
podname = manifest["metadata"]["name"] + "-"
for i in range(5):
podname = podname + mock_random_choice.return_value
k8s_api.create_namespaced_pod = mock.MagicMock(
side_effect=[ApiException(status=403), self.pod])
not_ready_pod = k8s_client.models.V1Pod()
not_ready_status = k8s_client.models.V1PodStatus()
not_ready_status.phase = "not_ready"
not_ready_pod.status = not_ready_status
almost_ready_pod = k8s_client.models.V1Pod()
almost_ready_status = k8s_client.models.V1PodStatus()
almost_ready_status.phase = "almost_ready"
almost_ready_pod.status = almost_ready_status
ready_pod = k8s_client.models.V1Pod()
ready_condition = k8s_client.models.V1PodCondition()
ready_condition.status = "True"
ready_condition.type = "Ready"
ready_status = k8s_client.models.V1PodStatus()
ready_status.phase = "Running"
ready_status.conditions = [ready_condition]
ready_pod_metadata = k8s_client.models.V1ObjectMeta()
ready_pod_metadata.uid = "123456789"
ready_pod_spec = k8s_client.models.V1PodSpec()
ready_pod_spec.node_name = "host_abc"
ready_pod.status = ready_status
ready_pod.metadata = ready_pod_metadata
ready_pod.spec = ready_pod_spec
k8s_api.read_namespaced_pod = mock.MagicMock(
side_effect=[not_ready_pod, almost_ready_pod, ready_pod])
self.scenario._create_v1pod(manifest)
k8s_api.create_namespaced_pod.assert_called_with(
body=manifest, namespace="default")
k8s_api.read_namespaced_pod.assert_called_with(
name=podname, namespace="default")
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.k8s_create_v1pod")
@mock.patch("time.time")
@mock.patch("random.choice")
@mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client")
def test_create_v1pod_timeout(self, mock__get_k8s_api_client,
mock_random_choice, mock_time):
k8s_api = mock__get_k8s_api_client.return_value
manifest = (
{"apiVersion": "v1", "kind": "Pod",
"metadata": {"name": "nginx"}})
k8s_api.create_namespaced_pod.return_value = self.pod
mock_time.side_effect = [1, 2, 3, 4, 5, 900, 901]
not_ready_pod = k8s_client.models.V1Pod()
not_ready_status = k8s_client.models.V1PodStatus()
not_ready_status.phase = "not_ready"
not_ready_pod_metadata = k8s_client.models.V1ObjectMeta()
not_ready_pod_metadata.uid = "123456789"
not_ready_pod.status = not_ready_status
not_ready_pod.metadata = not_ready_pod_metadata
k8s_api.read_namespaced_pod = mock.MagicMock(
side_effect=[not_ready_pod
for i in range(4)])
self.assertRaises(
exceptions.TimeoutException,
self.scenario._create_v1pod, manifest)
@mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client")
def test_list_v1rcs(self, mock__get_k8s_api_client):
k8s_api = mock__get_k8s_api_client.return_value
self.scenario._list_v1rcs()
(k8s_api.list_namespaced_replication_controller
.assert_called_once_with(namespace="default"))
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.k8s_list_v1rcs")
@mock.patch("random.choice")
@mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client")
def test_create_v1rc(self, mock__get_k8s_api_client,
mock_random_choice):
k8s_api = mock__get_k8s_api_client.return_value
manifest = (
{"apiVersion": "v1",
"kind": "ReplicationController",
"metadata": {"name": "nginx-controller"},
"spec": {"replicas": 2,
"selector": {"name": "nginx"},
"template": {"metadata":
{"labels":
{"name": "nginx"}}}}})
suffix = "-"
for i in range(5):
suffix = suffix + mock_random_choice.return_value
rcname = manifest["metadata"]["name"] + suffix
rc = k8s_client.models.V1ReplicationController()
rc.spec = k8s_client.models.V1ReplicationControllerSpec()
rc.spec.replicas = manifest["spec"]["replicas"]
k8s_api.create_namespaced_replication_controller.return_value = rc
not_ready_rc = k8s_client.models.V1ReplicationController()
not_ready_rc_status = (
k8s_client.models.V1ReplicationControllerStatus())
not_ready_rc_status.replicas = None
not_ready_rc.status = not_ready_rc_status
ready_rc = k8s_client.models.V1ReplicationController()
ready_rc_status = k8s_client.models.V1ReplicationControllerStatus()
ready_rc_status.replicas = manifest["spec"]["replicas"]
ready_rc_metadata = k8s_client.models.V1ObjectMeta()
ready_rc_metadata.uid = "123456789"
ready_rc_metadata.name = rcname
ready_rc.status = ready_rc_status
ready_rc.metadata = ready_rc_metadata
k8s_api.read_namespaced_replication_controller = mock.MagicMock(
side_effect=[not_ready_rc, ready_rc])
self.scenario._create_v1rc(manifest)
(k8s_api.create_namespaced_replication_controller
.assert_called_once_with(body=manifest, namespace="default"))
(k8s_api.read_namespaced_replication_controller
.assert_called_with(name=rcname, namespace="default"))
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.k8s_create_v1rc")
@mock.patch("time.time")
@mock.patch("random.choice")
@mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client")
def test_create_v1rc_timeout(self, mock__get_k8s_api_client,
mock_random_choice, mock_time):
k8s_api = mock__get_k8s_api_client.return_value
manifest = (
{"apiVersion": "v1",
"kind": "ReplicationController",
"metadata": {"name": "nginx-controller"},
"spec": {"replicas": 2,
"selector": {"app": "nginx"},
"template": {"metadata":
{"labels":
{"name": "nginx"}}}}})
rc = k8s_client.models.V1ReplicationController()
rc.spec = k8s_client.models.V1ReplicationControllerSpec()
rc.spec.replicas = manifest["spec"]["replicas"]
mock_time.side_effect = [1, 2, 3, 4, 5, 900, 901]
k8s_api.create_namespaced_replication_controller.return_value = rc
not_ready_rc = k8s_client.models.V1ReplicationController()
not_ready_rc_status = (
k8s_client.models.V1ReplicationControllerStatus())
not_ready_rc_status.replicas = None
not_ready_rc_metadata = k8s_client.models.V1ObjectMeta()
not_ready_rc_metadata.uid = "123456789"
not_ready_rc.status = not_ready_rc_status
not_ready_rc.metadata = not_ready_rc_metadata
k8s_api.read_namespaced_replication_controller = mock.MagicMock(
side_effect=[not_ready_rc
for i in range(4)])
self.assertRaises(
exceptions.TimeoutException,
self.scenario._create_v1rc, manifest)