Merge "NSX-migration: Add logging and handle errors"

This commit is contained in:
Jenkins 2017-07-14 19:10:49 +00:00 committed by Gerrit Code Review
commit 47ae3a63e5
2 changed files with 251 additions and 87 deletions

View File

@ -15,6 +15,7 @@ import argparse
from vmware_nsx.api_replay import client
DEFAULT_DOMAIN_ID = 'default'
DEFAULT_LOGFILE = 'nsx_migration.log'
class ApiReplayCli(object):
@ -34,7 +35,8 @@ class ApiReplayCli(object):
dest_os_user_domain_id=args.dest_os_user_domain_id,
dest_os_password=args.dest_os_password,
dest_os_auth_url=args.dest_os_auth_url,
use_old_keystone=args.use_old_keystone)
use_old_keystone=args.use_old_keystone,
logfile=args.logfile)
def _setup_argparse(self):
parser = argparse.ArgumentParser()
@ -107,6 +109,11 @@ class ApiReplayCli(object):
action='store_true',
help="Use old keystone client for source authentication.")
parser.add_argument(
"--logfile",
default=DEFAULT_LOGFILE,
help="Output logfile.")
# NOTE: this will return an error message if any of the
# require options are missing.
return parser.parse_args()

View File

@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from keystoneauth1 import identity
@ -18,6 +20,12 @@ from neutronclient.common import exceptions as n_exc
from neutronclient.v2_0 import client
from oslo_utils import excutils
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
# For internal testing only
use_old_keystone_on_dest = False
class ApiReplayClient(object):
@ -34,7 +42,14 @@ class ApiReplayClient(object):
dest_os_username, dest_os_user_domain_id,
dest_os_tenant_name, dest_os_tenant_domain_id,
dest_os_password, dest_os_auth_url,
use_old_keystone):
use_old_keystone, logfile):
if logfile:
f_handler = logging.FileHandler(logfile)
f_formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(message)s')
f_handler.setFormatter(f_formatter)
LOG.addHandler(f_handler)
# connect to both clients
if use_old_keystone:
@ -54,6 +69,13 @@ class ApiReplayClient(object):
password=source_os_password,
auth_url=source_os_auth_url)
if use_old_keystone_on_dest:
self.dest_neutron = client.Client(
username=dest_os_username,
tenant_name=dest_os_tenant_name,
password=dest_os_password,
auth_url=dest_os_auth_url)
else:
self.dest_neutron = self.connect_to_client(
username=dest_os_username,
user_domain_id=dest_os_user_domain_id,
@ -62,6 +84,7 @@ class ApiReplayClient(object):
password=dest_os_password,
auth_url=dest_os_auth_url)
LOG.info("Starting NSX migration.")
# Migrate all the objects
self.migrate_security_groups()
self.migrate_qos_policies()
@ -69,6 +92,7 @@ class ApiReplayClient(object):
self.migrate_networks_subnets_ports(routers_gw_info)
self.migrate_floatingips()
self.migrate_routers_routes(routers_routes)
LOG.info("NSX migration is Done.")
def connect_to_client(self, username, user_domain_id,
tenant_name, tenant_domain_id,
@ -155,11 +179,13 @@ class ApiReplayClient(object):
rule = self.dest_neutron.create_dscp_marking_rule(
pol_id, body={'dscp_marking_rule': body})
else:
print("QoS rule type %s is not supported for policy %s" % (
rule_type, pol_id))
print("created QoS policy %s rule %s " % (pol_id, rule))
LOG.info("QoS rule type %(rule)s is not supported for policy "
"%(pol)s",
{'rule': rule_type, 'pol': pol_id})
LOG.info("created QoS policy %s rule %s", pol_id, rule)
except Exception as e:
print("Failed to create QoS rule for policy %s: %s" % (pol_id, e))
LOG.error("Failed to create QoS rule for policy %(pol)s: %(e)s",
{'pol': pol_id, 'e': e})
def migrate_qos_policies(self):
"""Migrates QoS policies from source to dest neutron."""
@ -170,7 +196,7 @@ class ApiReplayClient(object):
dest_qos_pols = self.dest_neutron.list_qos_policies()['policies']
except n_exc.NotFound:
# QoS disabled on dest
print("QoS is disabled on destination: ignoring QoS policies")
LOG.info("QoS is disabled on destination: ignoring QoS policies")
self.dest_qos_support = False
return
self.dest_qos_support = True
@ -201,10 +227,11 @@ class ApiReplayClient(object):
new_pol = self.dest_neutron.create_qos_policy(
body={'policy': body})
except Exception as e:
print("Failed to create QoS policy %s: %s" % (
pol['id'], e))
LOG.error("Failed to create QoS policy %(pol)s: %(e)s",
{'pol': pol['id'], 'e': e})
continue
print("Created QoS policy %s" % new_pol)
else:
LOG.info("Created QoS policy %s", new_pol)
for qos_rule in qos_rules:
self.migrate_qos_rule(new_pol['policy'], qos_rule)
@ -221,7 +248,9 @@ class ApiReplayClient(object):
drop_sg_fields = self.basic_ignore_fields + ['policy']
for sg in source_sec_groups:
total_num = len(source_sec_groups)
LOG.info("Migrating %s security groups", total_num)
for count, sg in enumerate(source_sec_groups, 1):
dest_sec_group = self.have_id(sg['id'], dest_sec_groups)
# If the security group already exists on the dest_neutron
if dest_sec_group:
@ -234,9 +263,8 @@ class ApiReplayClient(object):
try:
body = self.drop_fields(sg_rule, drop_sg_fields)
self.fix_description(body)
print(
self.dest_neutron.create_security_group_rule(
{'security_group_rule': body}))
{'security_group_rule': body})
except n_exc.Conflict:
# NOTE(arosen): when you create a default
# security group it is automatically populated
@ -253,10 +281,14 @@ class ApiReplayClient(object):
self.fix_description(body)
new_sg = self.dest_neutron.create_security_group(
{'security_group': body})
print("Created security-group %s" % new_sg)
LOG.info("Created security-group %(count)s/%(total)s: "
"%(sg)s",
{'count': count, 'total': total_num,
'sg': new_sg})
except Exception as e:
# TODO(arosen): improve exception handing here.
print(e)
LOG.error("Failed to create security group (%(sg)s): "
"%(e)s",
{'sg': sg, 'e': e})
# Note - policy security groups will have no rules, and will
# be created on the destination with the default rules only
@ -266,7 +298,7 @@ class ApiReplayClient(object):
self.fix_description(body)
rule = self.dest_neutron.create_security_group_rule(
{'security_group_rule': body})
print("created security group rule %s " % rule['id'])
LOG.debug("created security group rule %s", rule['id'])
except Exception:
# NOTE(arosen): when you create a default
# security group it is automatically populated
@ -303,40 +335,61 @@ class ApiReplayClient(object):
'availability_zones',
'distributed',
'flavor_id']
for router in source_routers:
dest_router = self.have_id(router['id'], dest_routers)
if dest_router is False:
total_num = len(source_routers)
LOG.info("Migrating %s routers", total_num)
for count, router in enumerate(source_routers, 1):
if router.get('routes'):
update_routes[router['id']] = router['routes']
if router.get('external_gateway_info'):
gw_info[router['id']] = router['external_gateway_info']
dest_router = self.have_id(router['id'], dest_routers)
if dest_router is False:
body = self.drop_fields(router, drop_router_fields)
self.fix_description(body)
try:
new_router = (self.dest_neutron.create_router(
{'router': body}))
print("created router %s" % new_router)
LOG.info("created router %(count)s/%(total)s: %(rtr)s",
{'count': count, 'total': total_num,
'rtr': new_router})
except Exception as e:
LOG.error("Failed to create router %(rtr)s: %(e)s",
{'rtr': router, 'e': e})
return update_routes, gw_info
def migrate_routers_routes(self, routers_routes):
"""Add static routes to the created routers."""
for router_id, routes in six.iteritems(routers_routes):
total_num = len(routers_routes)
LOG.info("Migrating %s routers routes", total_num)
for count, (router_id, routes) in enumerate(
six.iteritems(routers_routes), 1):
try:
self.dest_neutron.update_router(router_id,
{'router': {'routes': routes}})
print("Added routes to router %s" % router_id)
LOG.info("Added routes to router %(rtr)s %(count)s/%(total)s:",
{'count': count, 'total': total_num,
'rtr': router_id})
except Exception as e:
LOG.error("Failed to add routes %(routes)s to router "
"%(rtr)s: %(e)s",
{'routes': routes, 'rtr': router_id, 'e': e})
def migrate_subnetpools(self):
subnetpools_map = {}
try:
source_subnetpools = self.source_neutron.list_subnetpools()[
'subnetpools']
except Exception:
# pools not supported on source
return subnetpools_map
dest_subnetpools = self.dest_neutron.list_subnetpools()[
'subnetpools']
drop_subnetpool_fields = self.basic_ignore_fields + [
'id',
'ip_version']
subnetpools_map = {}
for pool in source_subnetpools:
# a default subnetpool (per ip-version) should be unique.
# so do not create one if already exists
@ -353,14 +406,33 @@ class ApiReplayClient(object):
if 'default_quota' in body and body['default_quota'] is None:
del body['default_quota']
try:
new_id = self.dest_neutron.create_subnetpool(
{'subnetpool': body})['subnetpool']['id']
subnetpools_map[old_id] = new_id
# refresh the list of existing subnetpools
dest_subnetpools = self.dest_neutron.list_subnetpools()[
'subnetpools']
except Exception as e:
LOG.error("Failed to create subnetpool %(pool)s: %(e)s",
{'pool': pool, 'e': e})
return subnetpools_map
def fix_port(self, body):
# remove allowed_address_pairs if empty:
if ('allowed_address_pairs' in body and
not body['allowed_address_pairs']):
del body['allowed_address_pairs']
# remove port security if mac learning is enabled
if (body.get('mac_learning_enabled') and
body.get('port_security_enabled')):
LOG.warning("Disabling port security of port %s: The plugin "
"doesn't support mac learning with port security",
body['id'])
body['port_security_enabled'] = False
body['security_groups'] = []
def fix_network(self, body, dest_default_public_net):
# neutron doesn't like some fields being None even though its
# what it returns to us.
@ -392,12 +464,12 @@ class ApiReplayClient(object):
fields_reset = True
del body[field]
if fields_reset:
print('Ignoring provider network fields while migrating '
'external network ' + body['id'])
LOG.warning("Ignoring provider network fields while migrating "
"external network %s", body['id'])
if body.get('is_default') and dest_default_public_net:
body['is_default'] = False
print('Public network ' + body['id'] +
'was set to non default network')
LOG.warning("Public network %s was set to non default network",
body['id'])
def migrate_networks_subnets_ports(self, routers_gw_info):
"""Migrates networks/ports/router-uplinks from src to dest neutron."""
@ -417,7 +489,6 @@ class ApiReplayClient(object):
drop_port_fields = self.basic_ignore_fields + [
'status',
'port_security_enabled',
'binding:vif_details',
'binding:vif_type',
'binding:host_id',
@ -444,25 +515,38 @@ class ApiReplayClient(object):
dest_default_public_net = True
subnetpools_map = self.migrate_subnetpools()
for network in source_networks:
total_num = len(source_networks)
LOG.info("Migrating %(nets)s networks, %(subnets)s subnets and "
"%(ports)s ports",
{'nets': total_num, 'subnets': len(source_subnets),
'ports': len(source_ports)})
for count, network in enumerate(source_networks, 1):
external_net = network.get('router:external')
body = self.drop_fields(network, drop_network_fields)
self.fix_description(body)
self.fix_network(body, dest_default_public_net)
# only create network if the dest server doesn't have it
if self.have_id(network['id'], dest_networks) is False:
if self.have_id(network['id'], dest_networks):
continue
try:
created_net = self.dest_neutron.create_network(
{'network': body})['network']
print("Created network: %s " % created_net)
LOG.info("Created network %(count)s/%(total)s: %(net)s",
{'count': count, 'total': total_num,
'net': created_net})
except Exception as e:
# Print the network and exception to help debugging
with excutils.save_and_reraise_exception():
print("Failed to create network: " + str(body))
print("Source network: " + str(network))
LOG.error("Failed to create network %s", body)
LOG.error("Source network: %s", network)
raise e
created_subnet = None
subnets_map = {}
dhcp_subnets = []
count_dhcp_subnet = 0
for subnet_id in network['subnets']:
subnet = self.find_subnet_by_id(subnet_id, source_subnets)
body = self.drop_fields(subnet, drop_subnet_fields)
@ -475,15 +559,43 @@ class ApiReplayClient(object):
if body.get('subnetpool_id'):
body['subnetpool_id'] = subnetpools_map.get(
body['subnetpool_id'])
# Handle DHCP enabled subnets
enable_dhcp = False
if body['enable_dhcp']:
count_dhcp_subnet = count_dhcp_subnet + 1
# disable dhcp on subnet: we will enable it after creating
# all the ports to avoid ip collisions
body['enable_dhcp'] = False
if count_dhcp_subnet > 1:
# Do not allow dhcp on the subnet if there is already
# another subnet with DHCP as the v3 plugin supports
# only one
LOG.warning("Disabling DHCP for subnet on net %s: "
"The plugin doesn't support multiple "
"subnets with DHCP", network['id'])
enable_dhcp = False
elif external_net:
# Do not allow dhcp on the external subnet
LOG.warning("Disabling DHCP for subnet on net %s: "
"The plugin doesn't support dhcp on "
"external networks", network['id'])
enable_dhcp = False
else:
enable_dhcp = True
try:
created_subnet = self.dest_neutron.create_subnet(
{'subnet': body})['subnet']
print("Created subnet: " + created_subnet['id'])
LOG.info("Created subnet: %s", created_subnet['id'])
subnets_map[subnet_id] = created_subnet['id']
if enable_dhcp:
dhcp_subnets.append(created_subnet)
except n_exc.BadRequest as e:
print("Failed to create subnet: " + str(e))
LOG.error("Failed to create subnet: %(subnet)s: %(e)s",
{'subnet': subnet, 'e': e})
# NOTE(arosen): this occurs here if you run the script
# multiple times as we don't currently
# perserve the subnet_id. Also, 409 would be a better
# preserve the subnet_id. Also, 409 would be a better
# response code for this in neutron :(
# create the ports on the network
@ -492,11 +604,16 @@ class ApiReplayClient(object):
body = self.drop_fields(port, drop_port_fields)
self.fix_description(body)
self.fix_port(body)
# specify the network_id that we just created above
port['network_id'] = network['id']
# remove the subnet id field from fixed_ips dict
subnet_id = None
if port.get('fixed_ips'):
old_subnet_id = port['fixed_ips'][0]['subnet_id']
subnet_id = subnets_map.get(old_subnet_id)
# remove the old subnet id field from fixed_ips dict
for fixed_ips in body['fixed_ips']:
del fixed_ips['subnet_id']
@ -515,9 +632,18 @@ class ApiReplayClient(object):
"enable_snat": enable_snat,
# keep the original GW IP
"external_fixed_ips": port.get('fixed_ips')}}
router_uplink = self.dest_neutron.update_router(
try:
self.dest_neutron.update_router(
router_id, {'router': rtr_body})
print("Uplinked router %s" % router_uplink)
LOG.info("Uplinked router %(rtr)s to external "
"network %(net)s",
{'rtr': router_id,
'net': port['network_id']})
except Exception as e:
LOG.error("Failed to add router gateway "
"(%(port)s): %(e)s",
{'port': port, 'e': e})
continue
# Let the neutron dhcp-agent recreate this on its own
@ -529,7 +655,7 @@ class ApiReplayClient(object):
continue
if (port['device_owner'] == 'network:router_interface' and
created_subnet is not None):
subnet_id):
try:
# uplink router_interface ports by creating the
# port, and attaching it to the router
@ -538,16 +664,29 @@ class ApiReplayClient(object):
del body['device_id']
created_port = self.dest_neutron.create_port(
{'port': body})['port']
LOG.info("Created interface port %(port)s (subnet "
"%(subnet)s, ip %(ip)s, mac %(mac)s)",
{'port': created_port['id'],
'subnet': subnet_id,
'ip': created_port['fixed_ips'][0][
'ip_address'],
'mac': created_port['mac_address']})
self.dest_neutron.add_interface_router(
router_id,
{'port_id': created_port['id']})
print("Uplinked router %s to subnet %s" %
(router_id, created_subnet['id']))
continue
LOG.info("Uplinked router %(rtr)s to network "
"%(net)s",
{'rtr': router_id, 'net': network['id']})
except Exception as e:
# NOTE(arosen): this occurs here if you run the
# script multiple times as we don't track this.
print("Failed to add router interface: " + str(e))
# Note(asarfaty): also if the same network in
# source is attached to 2 routers, which the v3
# plugin does not support.
LOG.error("Failed to add router interface port"
"(%(port)s): %(e)s",
{'port': port, 'e': e})
continue
try:
created_port = self.dest_neutron.create_port(
@ -555,9 +694,26 @@ class ApiReplayClient(object):
except Exception as e:
# NOTE(arosen): this occurs here if you run the
# script multiple times as we don't track this.
print("Failed to create port: " + str(e))
LOG.error("Failed to create port (%(port)s) : %(e)s",
{'port': port, 'e': e})
else:
print("Created port: " + created_port['id'])
LOG.info("Created port %(port)s (subnet "
"%(subnet)s, ip %(ip)s, mac %(mac)s)",
{'port': created_port['id'],
'subnet': subnet_id,
'ip': created_port['fixed_ips'][0][
'ip_address'],
'mac': created_port['mac_address']})
# Enable dhcp on the relevant subnets:
for subnet in dhcp_subnets:
try:
self.dest_neutron.update_subnet(subnet['id'],
{'subnet': {'enable_dhcp': True}})
except Exception as e:
LOG.error("Failed to enable DHCP on subnet %(subnet)s: "
"%(e)s",
{'subnet': subnet['id'], 'e': e})
def migrate_floatingips(self):
"""Migrates floatingips from source to dest neutron."""
@ -569,12 +725,13 @@ class ApiReplayClient(object):
drop_fip_fields = self.basic_ignore_fields + [
'status', 'router_id', 'id', 'revision']
for source_fip in source_fips:
total_num = len(source_fips)
for count, source_fip in enumerate(source_fips, 1):
body = self.drop_fields(source_fip, drop_fip_fields)
try:
fip = self.dest_neutron.create_floatingip({'floatingip': body})
print("Created floatingip %s" % fip)
LOG.info("Created floatingip %(count)s/%(total)s : %(fip)s",
{'count': count, 'total': total_num, 'fip': fip})
except Exception as e:
print("Failed to create floating ip (%s) : %s" %
(source_fip, str(e)))
LOG.error("Failed to create floating ip (%(fip)s) : %(e)s",
{'fip': source_fip, 'e': e})