improve logging
As dynamic workloads will have multiple resources and different operations across different files, it will be convenient to have DYNAMIC_WORKLOADS as part of the log message to differentiate other log message. Printing ITERATION will be very helpful to track the resources. This patch introduces log_info which prints DYNAMIC_WORKLOADS and iteration number for each log message in dynamic workload files. All the dynamic workloads should use self.log_info instead of calling LG.info Change-Id: Idf8b97325a760509d69e6c391efd66000dc66279
This commit is contained in:
parent
d82920484f
commit
19d87dec04
@ -10,10 +10,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from rally.common import cfg
|
from rally.common import cfg
|
||||||
from rally.common import logging
|
|
||||||
from rally_openstack.scenarios.vm import utils as vm_utils
|
from rally_openstack.scenarios.vm import utils as vm_utils
|
||||||
from rally_openstack.scenarios.neutron import utils as neutron_utils
|
from rally_openstack.scenarios.neutron import utils as neutron_utils
|
||||||
from rally.task import atomic
|
from rally.task import atomic
|
||||||
@ -28,6 +28,14 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class NovaUtils(vm_utils.VMScenario):
|
class NovaUtils(vm_utils.VMScenario):
|
||||||
|
|
||||||
|
def log_info(self, msg):
|
||||||
|
log_msg = " DYNAMIC_WORKLOADS ITER: {} {} ".format(self.context["iteration"], msg)
|
||||||
|
LOG.info(log_msg)
|
||||||
|
|
||||||
|
def log_error(self, msg):
|
||||||
|
log_msg = " DYNAMIC_WORKLOADS ITER: {} {} ".format(self.context["iteration"], msg)
|
||||||
|
LOG.error(log_msg)
|
||||||
|
|
||||||
def _run_command_with_attempts(self, ssh_connection, cmd, max_attempts=120, timeout=2):
|
def _run_command_with_attempts(self, ssh_connection, cmd, max_attempts=120, timeout=2):
|
||||||
"""Run command over ssh connection with multiple attempts
|
"""Run command over ssh connection with multiple attempts
|
||||||
:param ssh_connection: ssh connection to run command
|
:param ssh_connection: ssh connection to run command
|
||||||
@ -161,6 +169,14 @@ class NovaUtils(vm_utils.VMScenario):
|
|||||||
|
|
||||||
class NeutronUtils(neutron_utils.NeutronScenario):
|
class NeutronUtils(neutron_utils.NeutronScenario):
|
||||||
|
|
||||||
|
def log_info(self, msg):
|
||||||
|
log_msg = " DYNAMIC_WORKLOADS ITER: {} {} ".format(self.context["iteration"], msg)
|
||||||
|
LOG.info(log_msg)
|
||||||
|
|
||||||
|
def log_error(self, msg):
|
||||||
|
log_msg = " DYNAMIC_WORKLOADS ITER: {} {} ".format(self.context["iteration"], msg)
|
||||||
|
LOG.error(log_msg)
|
||||||
|
|
||||||
@atomic.action_timer("neutron.create_router")
|
@atomic.action_timer("neutron.create_router")
|
||||||
def _create_router(self, router_create_args):
|
def _create_router(self, router_create_args):
|
||||||
"""Create neutron router.
|
"""Create neutron router.
|
||||||
|
@ -11,7 +11,6 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import io
|
import io
|
||||||
import logging
|
|
||||||
import time
|
import time
|
||||||
import random
|
import random
|
||||||
from rally.common import sshutils
|
from rally.common import sshutils
|
||||||
@ -20,8 +19,6 @@ from rally_openstack.scenarios.octavia import utils as octavia_utils
|
|||||||
from octaviaclient.api import exceptions
|
from octaviaclient.api import exceptions
|
||||||
import dynamic_utils
|
import dynamic_utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
||||||
def build_jump_host(self, ext_net_name, image, flavor, user, subnet, password=None, **kwargs):
|
def build_jump_host(self, ext_net_name, image, flavor, user, subnet, password=None, **kwargs):
|
||||||
@ -37,7 +34,7 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
|
|
||||||
kwargs["nics"] = [{"net-id": subnet['network_id']}]
|
kwargs["nics"] = [{"net-id": subnet['network_id']}]
|
||||||
keyname = self.context["user"]["keypair"]["name"]
|
keyname = self.context["user"]["keypair"]["name"]
|
||||||
LOG.info("Building Jump Host with key : {}".format(keyname))
|
self.log_info("Building Jump Host with key : {}".format(keyname))
|
||||||
jump_host, jump_host_ip = self._boot_server_with_fip(image,
|
jump_host, jump_host_ip = self._boot_server_with_fip(image,
|
||||||
flavor,
|
flavor,
|
||||||
True,
|
True,
|
||||||
@ -83,11 +80,11 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
try:
|
try:
|
||||||
userdata = io.open(user_data_file, "r")
|
userdata = io.open(user_data_file, "r")
|
||||||
kwargs["userdata"] = userdata
|
kwargs["userdata"] = userdata
|
||||||
LOG.info("Added user data")
|
self.log_info("Added user data")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.info("couldn't add user data %s", e)
|
self.log_info("couldn't add user data %s", e)
|
||||||
|
|
||||||
LOG.info("Launching Client : {}".format(i))
|
self.log_info("Launching Client : {}".format(i))
|
||||||
tag = "client:"+str(lb_subnet['network_id'])
|
tag = "client:"+str(lb_subnet['network_id'])
|
||||||
kwargs['description'] = subnet['network_id']
|
kwargs['description'] = subnet['network_id']
|
||||||
# server description consists of network_id
|
# server description consists of network_id
|
||||||
@ -103,7 +100,7 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
# IP Address
|
# IP Address
|
||||||
_clients.append(
|
_clients.append(
|
||||||
str(server.addresses[network_name][0]["addr"]))
|
str(server.addresses[network_name][0]["addr"]))
|
||||||
LOG.info(_clients)
|
self.log_info(_clients)
|
||||||
return _clients
|
return _clients
|
||||||
|
|
||||||
def create_listener(self, lb_id, protocol_port, max_attempts=10):
|
def create_listener(self, lb_id, protocol_port, max_attempts=10):
|
||||||
@ -121,7 +118,7 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
"connection_limit": -1,
|
"connection_limit": -1,
|
||||||
"admin_state_up": True,
|
"admin_state_up": True,
|
||||||
}
|
}
|
||||||
LOG.info("Creating a listener for lb {}".format(lb_id))
|
self.log_info("Creating a listener for lb {}".format(lb_id))
|
||||||
attempts = 0
|
attempts = 0
|
||||||
# Retry to avoid HTTP 409 errors like "Load Balancer
|
# Retry to avoid HTTP 409 errors like "Load Balancer
|
||||||
# is immutable and cannot be updated"
|
# is immutable and cannot be updated"
|
||||||
@ -136,9 +133,9 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
time.sleep(60)
|
time.sleep(60)
|
||||||
continue
|
continue
|
||||||
break
|
break
|
||||||
LOG.info("Listener created {}".format(listener))
|
self.log_info("Listener created {}".format(listener))
|
||||||
LOG.info("Waiting for the lb {} to be active, after listener_create"
|
self.log_info("Waiting for the lb {} to be active, after listener_create".format(
|
||||||
.format(lb_id))
|
lb_id))
|
||||||
return listener
|
return listener
|
||||||
|
|
||||||
def create_pool(self, lb_id, listener_id, max_attempts=10):
|
def create_pool(self, lb_id, listener_id, max_attempts=10):
|
||||||
@ -148,7 +145,7 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
:param max_attempts: max retries
|
:param max_attempts: max retries
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.info("Creating a pool for lb {}".format(lb_id))
|
self.log_info("Creating a pool for lb {}".format(lb_id))
|
||||||
attempts = 0
|
attempts = 0
|
||||||
# Retry to avoid HTTP 409 errors like "Load Balancer
|
# Retry to avoid HTTP 409 errors like "Load Balancer
|
||||||
# is immutable and cannot be updated"
|
# is immutable and cannot be updated"
|
||||||
@ -187,8 +184,8 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
"admin_state_up": True,
|
"admin_state_up": True,
|
||||||
"name": self.generate_random_name(),
|
"name": self.generate_random_name(),
|
||||||
}
|
}
|
||||||
LOG.info("Adding member : {} to the pool {} lb {}"
|
self.log_info("Adding member : {} to the pool {} lb {}".format(
|
||||||
.format(client_ip, pool_id, lb_id))
|
client_ip, pool_id, lb_id))
|
||||||
attempts = 0
|
attempts = 0
|
||||||
# Retry to avoid "Load Balancer is immutable and cannot be updated"
|
# Retry to avoid "Load Balancer is immutable and cannot be updated"
|
||||||
while attempts < max_attempts:
|
while attempts < max_attempts:
|
||||||
@ -201,13 +198,13 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
if e.code == 409:
|
if e.code == 409:
|
||||||
attempts += 1
|
attempts += 1
|
||||||
time.sleep(120)
|
time.sleep(120)
|
||||||
LOG.info("mem_create exception: Waiting for the lb {} to be active"
|
msg = "mem_create exception: Waiting for the lb {} to be active".format(lb_id)
|
||||||
.format(lb_id))
|
self.log_info(msg)
|
||||||
continue
|
continue
|
||||||
break
|
break
|
||||||
time.sleep(30)
|
time.sleep(30)
|
||||||
LOG.info("Waiting for the lb {} to be active, after member_create"
|
self.log_info("Waiting for the lb {} to be active, after member_create".format(
|
||||||
.format(lb_id))
|
lb_id))
|
||||||
|
|
||||||
def check_connection(self, lb, jump_ssh, num_pools, num_clients, max_attempts=10):
|
def check_connection(self, lb, jump_ssh, num_pools, num_clients, max_attempts=10):
|
||||||
"""Checks the connection
|
"""Checks the connection
|
||||||
@ -220,7 +217,7 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
|
|
||||||
port = 80
|
port = 80
|
||||||
lb_ip = lb["vip_address"]
|
lb_ip = lb["vip_address"]
|
||||||
LOG.info("Load balancer IP: {}".format(lb_ip))
|
self.log_info("Load balancer IP: {}".format(lb_ip))
|
||||||
# check for connectivity
|
# check for connectivity
|
||||||
self._wait_for_ssh(jump_ssh)
|
self._wait_for_ssh(jump_ssh)
|
||||||
for i in range(num_pools):
|
for i in range(num_pools):
|
||||||
@ -229,13 +226,13 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
attempts = 0
|
attempts = 0
|
||||||
while attempts < max_attempts:
|
while attempts < max_attempts:
|
||||||
test_exitcode, stdout_test, stderr = jump_ssh.execute(cmd, timeout=60)
|
test_exitcode, stdout_test, stderr = jump_ssh.execute(cmd, timeout=60)
|
||||||
LOG.info("cmd: {}, stdout:{}".format(cmd, stdout_test))
|
self.log_info("cmd: {}, stdout:{}".format(cmd, stdout_test))
|
||||||
if stdout_test != '1':
|
if stdout_test != '1':
|
||||||
LOG.error("ERROR with HTTP response {}".format(cmd))
|
self.log_error("ERROR with HTTP response {}".format(cmd))
|
||||||
attempts += 1
|
attempts += 1
|
||||||
time.sleep(30)
|
time.sleep(30)
|
||||||
else:
|
else:
|
||||||
LOG.info("cmd: {} successful".format(cmd))
|
self.log_info("cmd: {} successful".format(cmd))
|
||||||
break
|
break
|
||||||
port = port + 1
|
port = port + 1
|
||||||
|
|
||||||
@ -277,7 +274,7 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
subnet = self._create_subnet(network, subnet_create_args or {})
|
subnet = self._create_subnet(network, subnet_create_args or {})
|
||||||
subnets.append(subnet)
|
subnets.append(subnet)
|
||||||
self._add_interface_router(subnets[0]['subnet'], router['router'])
|
self._add_interface_router(subnets[0]['subnet'], router['router'])
|
||||||
LOG.info("Subnets {}".format(subnets))
|
self.log_info("Subnets {}".format(subnets))
|
||||||
vip_subnet_id = subnets[0]['subnet']['id']
|
vip_subnet_id = subnets[0]['subnet']['id']
|
||||||
mem_subnet_id = subnets[1]['subnet']['id']
|
mem_subnet_id = subnets[1]['subnet']['id']
|
||||||
|
|
||||||
@ -296,7 +293,7 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
description=router['router']['id'],
|
description=router['router']['id'],
|
||||||
admin_state=True)
|
admin_state=True)
|
||||||
lb_id = lb["id"]
|
lb_id = lb["id"]
|
||||||
LOG.info("Waiting for the lb {} to be active".format(lb["id"]))
|
self.log_info("Waiting for the lb {} to be active".format(lb["id"]))
|
||||||
self.octavia.wait_for_loadbalancer_prov_status(lb)
|
self.octavia.wait_for_loadbalancer_prov_status(lb)
|
||||||
time.sleep(90)
|
time.sleep(90)
|
||||||
|
|
||||||
@ -310,7 +307,7 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
protocol_port = protocol_port + 1
|
protocol_port = protocol_port + 1
|
||||||
self.check_connection(lb, jump_ssh, num_pools, num_clients)
|
self.check_connection(lb, jump_ssh, num_pools, num_clients)
|
||||||
self._delete_server_with_fip(jump_host, jump_host_ip)
|
self._delete_server_with_fip(jump_host, jump_host_ip)
|
||||||
LOG.info("Deleted Jump_host {}".format(jump_host_ip))
|
self.log_info("Deleted Jump_host {}".format(jump_host_ip))
|
||||||
|
|
||||||
def delete_loadbalancers(self, delete_num_lbs):
|
def delete_loadbalancers(self, delete_num_lbs):
|
||||||
"""Deletes <delete_num_lbs> loadbalancers randomly
|
"""Deletes <delete_num_lbs> loadbalancers randomly
|
||||||
@ -324,7 +321,7 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
random_lb = random.choice(lb_list["loadbalancers"])
|
random_lb = random.choice(lb_list["loadbalancers"])
|
||||||
# delete the lb
|
# delete the lb
|
||||||
self.octavia._clients.octavia().load_balancer_delete(random_lb["id"], cascade=True)
|
self.octavia._clients.octavia().load_balancer_delete(random_lb["id"], cascade=True)
|
||||||
LOG.info("Random LB deleted {}".format(random_lb["id"]))
|
self.log_info("Random LB deleted {}".format(random_lb["id"]))
|
||||||
time.sleep(15)
|
time.sleep(15)
|
||||||
# delete vm's that were added as members to lb
|
# delete vm's that were added as members to lb
|
||||||
tag = "client:"+str(random_lb['vip_network_id'])
|
tag = "client:"+str(random_lb['vip_network_id'])
|
||||||
@ -348,10 +345,10 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
|
|
||||||
lb_list = self.octavia.load_balancer_list()
|
lb_list = self.octavia.load_balancer_list()
|
||||||
random_lb = random.choice(lb_list["loadbalancers"])
|
random_lb = random.choice(lb_list["loadbalancers"])
|
||||||
LOG.info("Random LB selected {}".format(random_lb))
|
self.log_info("Random LB selected {}".format(random_lb))
|
||||||
self.octavia.wait_for_loadbalancer_prov_status(random_lb)
|
self.octavia.wait_for_loadbalancer_prov_status(random_lb)
|
||||||
random_lb_pool = random.choice(random_lb["pools"])
|
random_lb_pool = random.choice(random_lb["pools"])
|
||||||
LOG.info("Random pool {}".format(random_lb_pool))
|
self.log_info("Random pool {}".format(random_lb_pool))
|
||||||
members_list = self.octavia.member_list(random_lb_pool['id'])
|
members_list = self.octavia.member_list(random_lb_pool['id'])
|
||||||
for i in range(delete_num_members):
|
for i in range(delete_num_members):
|
||||||
attempts = 0
|
attempts = 0
|
||||||
@ -361,7 +358,7 @@ class DynamicOctaviaBase(octavia_utils.OctaviaBase, dynamic_utils.NovaUtils):
|
|||||||
try:
|
try:
|
||||||
self.octavia._clients.octavia().member_delete(random_lb_pool['id'],
|
self.octavia._clients.octavia().member_delete(random_lb_pool['id'],
|
||||||
random_member['id'])
|
random_member['id'])
|
||||||
LOG.info("Member-{} deleted {}".format(i, random_member['id']))
|
self.log_info("Member-{} deleted {}".format(i, random_member['id']))
|
||||||
break
|
break
|
||||||
except exceptions.OctaviaClientException as e:
|
except exceptions.OctaviaClientException as e:
|
||||||
# retry for 409 return code
|
# retry for 409 return code
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
|
||||||
import random
|
import random
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
@ -19,8 +18,6 @@ from rally_openstack.scenarios.neutron import utils as neutron_utils
|
|||||||
import dynamic_utils
|
import dynamic_utils
|
||||||
from rally.task import atomic
|
from rally.task import atomic
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class DynamicProviderNetworkBase(dynamic_utils.NovaUtils, neutron_utils.NeutronScenario):
|
class DynamicProviderNetworkBase(dynamic_utils.NovaUtils, neutron_utils.NeutronScenario):
|
||||||
|
|
||||||
@ -76,9 +73,9 @@ class DynamicProviderNetworkBase(dynamic_utils.NovaUtils, neutron_utils.NeutronS
|
|||||||
proc = subprocess.Popen(cmd)
|
proc = subprocess.Popen(cmd)
|
||||||
proc.wait()
|
proc.wait()
|
||||||
if proc.returncode == 0:
|
if proc.returncode == 0:
|
||||||
LOG.info("Ping to {} is successful".format(server_ip))
|
self.log_info("Ping to {} is successful".format(server_ip))
|
||||||
else:
|
else:
|
||||||
LOG.info("Ping to {} is failed".format(server_ip))
|
self.log_info("Ping to {} is failed".format(server_ip))
|
||||||
|
|
||||||
def provider_netcreate_nova_boot_ping(self, image, flavor, provider_phys_net, iface_name,
|
def provider_netcreate_nova_boot_ping(self, image, flavor, provider_phys_net, iface_name,
|
||||||
iface_mac, num_vms_provider_net, router_create_args=None,
|
iface_mac, num_vms_provider_net, router_create_args=None,
|
||||||
@ -104,7 +101,8 @@ class DynamicProviderNetworkBase(dynamic_utils.NovaUtils, neutron_utils.NeutronS
|
|||||||
kwargs["nics"] = [{'net-id': provider_network['network']['id']}]
|
kwargs["nics"] = [{'net-id': provider_network['network']['id']}]
|
||||||
tag = "provider_network:"+str(provider_network['network']['id'])
|
tag = "provider_network:"+str(provider_network['network']['id'])
|
||||||
server = self._boot_server_with_tag(image, flavor, tag, **kwargs)
|
server = self._boot_server_with_tag(image, flavor, tag, **kwargs)
|
||||||
LOG.info(" Server {} created on provider network {}".format(server, provider_network))
|
self.log_info(" Server {} created on provider network {}".format(
|
||||||
|
server, provider_network))
|
||||||
self.ping_server(server, iface_name, iface_mac, provider_network, subnet)
|
self.ping_server(server, iface_name, iface_mac, provider_network, subnet)
|
||||||
|
|
||||||
def pick_random_provider_net(self, provider_phys_net, **kwargs):
|
def pick_random_provider_net(self, provider_phys_net, **kwargs):
|
||||||
|
@ -12,13 +12,10 @@
|
|||||||
|
|
||||||
import random
|
import random
|
||||||
|
|
||||||
from rally.common import logging
|
|
||||||
from rally.common import sshutils
|
from rally.common import sshutils
|
||||||
|
|
||||||
import dynamic_utils
|
import dynamic_utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# This test simulates trunk subports using vlan interfaces inside the VM.
|
# This test simulates trunk subports using vlan interfaces inside the VM.
|
||||||
# It creates vlan interface for the subport and then adds it's MAC and ip.
|
# It creates vlan interface for the subport and then adds it's MAC and ip.
|
||||||
# After packet reaching the vlan interface, reply packet has to be properly
|
# After packet reaching the vlan interface, reply packet has to be properly
|
||||||
@ -318,7 +315,7 @@ class TrunkDynamicScenario(
|
|||||||
num_operations_completed += 1
|
num_operations_completed += 1
|
||||||
|
|
||||||
if num_operations_completed == 0:
|
if num_operations_completed == 0:
|
||||||
LOG.info("""No trunks which are not under lock, so
|
self.log_info("""No trunks which are not under lock, so
|
||||||
cannot add subports to any trunks.""")
|
cannot add subports to any trunks.""")
|
||||||
|
|
||||||
def delete_subports_from_random_trunks(self, num_trunks, subport_count):
|
def delete_subports_from_random_trunks(self, num_trunks, subport_count):
|
||||||
@ -394,7 +391,7 @@ class TrunkDynamicScenario(
|
|||||||
num_operations_completed += 1
|
num_operations_completed += 1
|
||||||
|
|
||||||
if num_operations_completed == 0:
|
if num_operations_completed == 0:
|
||||||
LOG.info("""No trunks which are not under lock, so
|
self.log_info("""No trunks which are not under lock, so
|
||||||
cannot delete subports from any trunks.""")
|
cannot delete subports from any trunks.""")
|
||||||
|
|
||||||
def swap_floating_ips_between_random_subports(self):
|
def swap_floating_ips_between_random_subports(self):
|
||||||
@ -403,7 +400,7 @@ class TrunkDynamicScenario(
|
|||||||
trunks = [trunk for trunk in self._list_trunks() if len(trunk["sub_ports"]) > 0]
|
trunks = [trunk for trunk in self._list_trunks() if len(trunk["sub_ports"]) > 0]
|
||||||
|
|
||||||
if len(trunks) < 2:
|
if len(trunks) < 2:
|
||||||
LOG.info("""Number of eligible trunks not sufficient
|
self.log_info("""Number of eligible trunks not sufficient
|
||||||
for swapping floating IPs between trunk subports""")
|
for swapping floating IPs between trunk subports""")
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -416,7 +413,7 @@ class TrunkDynamicScenario(
|
|||||||
break
|
break
|
||||||
|
|
||||||
if len(trunks_for_swapping) < 2:
|
if len(trunks_for_swapping) < 2:
|
||||||
LOG.info("""Number of unlocked trunks not sufficient
|
self.log_info("""Number of unlocked trunks not sufficient
|
||||||
for swapping floating IPs between trunk subports""")
|
for swapping floating IPs between trunk subports""")
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -455,13 +452,14 @@ class TrunkDynamicScenario(
|
|||||||
self.clients("neutron").update_floatingip(
|
self.clients("neutron").update_floatingip(
|
||||||
subport2_fip["id"], {"floatingip": fip_update_dict})
|
subport2_fip["id"], {"floatingip": fip_update_dict})
|
||||||
|
|
||||||
LOG.info("Ping until failure after dissociating subports' floating IPs, before swapping")
|
msg = "Ping until failure after dissociating subports' floating IPs, before swapping"
|
||||||
|
self.log_info(msg)
|
||||||
self.ping_subport_fip_from_jumphost(jumphost1_fip, self.jumphost_user, subport1_fip,
|
self.ping_subport_fip_from_jumphost(jumphost1_fip, self.jumphost_user, subport1_fip,
|
||||||
subport1["port"], True)
|
subport1["port"], True)
|
||||||
self.ping_subport_fip_from_jumphost(jumphost2_fip, self.jumphost_user, subport2_fip,
|
self.ping_subport_fip_from_jumphost(jumphost2_fip, self.jumphost_user, subport2_fip,
|
||||||
subport2["port"], True)
|
subport2["port"], True)
|
||||||
|
|
||||||
LOG.info("Ping until success by swapping subports' floating IPs")
|
self.log_info("Ping until success by swapping subports' floating IPs")
|
||||||
self.ping_subport_fip_from_jumphost(jumphost1_fip, self.jumphost_user, subport2_fip,
|
self.ping_subport_fip_from_jumphost(jumphost1_fip, self.jumphost_user, subport2_fip,
|
||||||
subport1["port"])
|
subport1["port"])
|
||||||
self.ping_subport_fip_from_jumphost(jumphost2_fip, self.jumphost_user, subport1_fip,
|
self.ping_subport_fip_from_jumphost(jumphost2_fip, self.jumphost_user, subport1_fip,
|
||||||
|
@ -10,13 +10,10 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
|
||||||
import random
|
import random
|
||||||
|
|
||||||
import dynamic_utils
|
import dynamic_utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class VMDynamicScenario(dynamic_utils.NovaUtils,
|
class VMDynamicScenario(dynamic_utils.NovaUtils,
|
||||||
dynamic_utils.NeutronUtils,
|
dynamic_utils.NeutronUtils,
|
||||||
@ -38,7 +35,7 @@ class VMDynamicScenario(dynamic_utils.NovaUtils,
|
|||||||
kwargs["nics"] = [{"net-id": network["network"]["id"]}]
|
kwargs["nics"] = [{"net-id": network["network"]["id"]}]
|
||||||
for _ in range(num_vms):
|
for _ in range(num_vms):
|
||||||
server = self._boot_server_with_tag(image, flavor, "create_delete", **kwargs)
|
server = self._boot_server_with_tag(image, flavor, "create_delete", **kwargs)
|
||||||
LOG.info("Created server {}".format(server))
|
self.log_info("Created server {}".format(server))
|
||||||
|
|
||||||
def delete_random_servers(self, num_vms):
|
def delete_random_servers(self, num_vms):
|
||||||
"""Delete <num_vms> randomly chosen servers
|
"""Delete <num_vms> randomly chosen servers
|
||||||
@ -52,7 +49,7 @@ class VMDynamicScenario(dynamic_utils.NovaUtils,
|
|||||||
for server in servers_to_delete:
|
for server in servers_to_delete:
|
||||||
server_id = server.id
|
server_id = server.id
|
||||||
self.acquire_lock(server_id)
|
self.acquire_lock(server_id)
|
||||||
LOG.info("Deleting server {}".format(server))
|
self.log_info("Deleting server {}".format(server))
|
||||||
self._delete_server(server, force=True)
|
self._delete_server(server, force=True)
|
||||||
self.release_lock(server_id)
|
self.release_lock(server_id)
|
||||||
|
|
||||||
@ -124,15 +121,15 @@ class VMDynamicScenario(dynamic_utils.NovaUtils,
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
fip = list(server_to_migrate.addresses.values())[0][1]['addr']
|
fip = list(server_to_migrate.addresses.values())[0][1]['addr']
|
||||||
LOG.info("ping {} before server migration".format(fip))
|
self.log_info("ping {} before server migration".format(fip))
|
||||||
self._wait_for_ping(fip)
|
self._wait_for_ping(fip)
|
||||||
self._migrate(server_to_migrate)
|
self._migrate(server_to_migrate)
|
||||||
self._resize_confirm(server_to_migrate, status="ACTIVE")
|
self._resize_confirm(server_to_migrate, status="ACTIVE")
|
||||||
LOG.info("ping {} after server migration".format(fip))
|
self.log_info("ping {} after server migration".format(fip))
|
||||||
self._wait_for_ping(fip)
|
self._wait_for_ping(fip)
|
||||||
self.release_lock(server_to_migrate.id)
|
self.release_lock(server_to_migrate.id)
|
||||||
num_migrated += 1
|
num_migrated += 1
|
||||||
|
|
||||||
if num_migrated == 0:
|
if num_migrated == 0:
|
||||||
LOG.info("""No servers which are not under lock, so
|
self.log_info("""No servers which are not under lock, so
|
||||||
cannot migrate any servers.""")
|
cannot migrate any servers.""")
|
||||||
|
Loading…
x
Reference in New Issue
Block a user