TVD: Admin utility for migrating a project
Initial version for an admin utility for migration of a project from V to T This code will first dump all the objects to a file, so the data will not be lost. Then it will delete each object using the V plugin, move the project to the T plugin and recreate each object. Usage: nsxadmin -r projects -o nsx-migrate-v-v3 --property project-id=<V project to be migrated> --property external-net=<T external network to be used> Change-Id: I816b63f40ada945d321db4566224f8a964a39a8f
This commit is contained in:
parent
e9048e1712
commit
863daeafef
@ -319,9 +319,9 @@ Ports
|
|||||||
|
|
||||||
nsxadmin -r ports -o list-mismatches
|
nsxadmin -r ports -o list-mismatches
|
||||||
|
|
||||||
- Update the VMs ports on the backend after migrating nsx-v -> nsx-v3::
|
- Update the VMs ports (all or of a specific project) on the backend after migrating nsx-v -> nsx-v3::
|
||||||
|
|
||||||
nsxadmin -r ports -o nsx-migrate-v-v3
|
nsxadmin -r ports -o nsx-migrate-v-v3 (--property project-id=<>)
|
||||||
|
|
||||||
- Migrate exclude ports to use tags::
|
- Migrate exclude ports to use tags::
|
||||||
|
|
||||||
@ -504,6 +504,10 @@ NSXtvd Plugin
|
|||||||
|
|
||||||
nsxadmin -r projects -o import --property plugin=nsx-v --property project=<>
|
nsxadmin -r projects -o import --property plugin=nsx-v --property project=<>
|
||||||
|
|
||||||
|
- Migrate a specific project from V to T:
|
||||||
|
|
||||||
|
nsxadmin -r projects -o nsx-migrate-v-v3 --property project-id=<V project ID> --property external-net=<T external network ID> (--property from-file=True)
|
||||||
|
|
||||||
|
|
||||||
Upgrade Steps (Version 1.0.0 to Version 1.1.0)
|
Upgrade Steps (Version 1.0.0 to Version 1.1.0)
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
@ -20,6 +20,8 @@ from neutronclient.common import exceptions as n_exc
|
|||||||
from neutronclient.v2_0 import client
|
from neutronclient.v2_0 import client
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
|
from vmware_nsx.api_replay import utils
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -27,13 +29,7 @@ LOG = logging.getLogger(__name__)
|
|||||||
use_old_keystone_on_dest = False
|
use_old_keystone_on_dest = False
|
||||||
|
|
||||||
|
|
||||||
class ApiReplayClient(object):
|
class ApiReplayClient(utils.PrepareObjectForMigration):
|
||||||
|
|
||||||
basic_ignore_fields = ['updated_at',
|
|
||||||
'created_at',
|
|
||||||
'tags',
|
|
||||||
'revision',
|
|
||||||
'revision_number']
|
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
source_os_username, source_os_user_domain_id,
|
source_os_username, source_os_user_domain_id,
|
||||||
@ -112,18 +108,6 @@ class ApiReplayClient(object):
|
|||||||
if subnet['id'] == subnet_id:
|
if subnet['id'] == subnet_id:
|
||||||
return subnet
|
return subnet
|
||||||
|
|
||||||
def subnet_drop_ipv6_fields_if_v4(self, body):
|
|
||||||
"""
|
|
||||||
Drops v6 fields on subnets that are v4 as server doesn't allow them.
|
|
||||||
"""
|
|
||||||
v6_fields_to_remove = ['ipv6_address_mode', 'ipv6_ra_mode']
|
|
||||||
if body['ip_version'] != 4:
|
|
||||||
return
|
|
||||||
|
|
||||||
for field in v6_fields_to_remove:
|
|
||||||
if field in body:
|
|
||||||
body.pop(field)
|
|
||||||
|
|
||||||
def get_ports_on_network(self, network_id, ports):
|
def get_ports_on_network(self, network_id, ports):
|
||||||
"""Returns all the ports on a given network_id."""
|
"""Returns all the ports on a given network_id."""
|
||||||
ports_on_network = []
|
ports_on_network = []
|
||||||
@ -140,20 +124,6 @@ class ApiReplayClient(object):
|
|||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def drop_fields(self, item, drop_fields):
|
|
||||||
body = {}
|
|
||||||
for k, v in item.items():
|
|
||||||
if k in drop_fields:
|
|
||||||
continue
|
|
||||||
body[k] = v
|
|
||||||
return body
|
|
||||||
|
|
||||||
def fix_description(self, body):
|
|
||||||
# neutron doesn't like description being None even though its
|
|
||||||
# what it returns to us.
|
|
||||||
if 'description' in body and body['description'] is None:
|
|
||||||
body['description'] = ''
|
|
||||||
|
|
||||||
def migrate_qos_rule(self, dest_policy, source_rule):
|
def migrate_qos_rule(self, dest_policy, source_rule):
|
||||||
"""Add the QoS rule from the source to the QoS policy
|
"""Add the QoS rule from the source to the QoS policy
|
||||||
|
|
||||||
@ -169,8 +139,7 @@ class ApiReplayClient(object):
|
|||||||
if dest_rule['type'] == rule_type:
|
if dest_rule['type'] == rule_type:
|
||||||
return
|
return
|
||||||
pol_id = dest_policy['id']
|
pol_id = dest_policy['id']
|
||||||
drop_qos_rule_fields = ['revision', 'type', 'qos_policy_id', 'id']
|
body = self.prepare_qos_rule(source_rule)
|
||||||
body = self.drop_fields(source_rule, drop_qos_rule_fields)
|
|
||||||
try:
|
try:
|
||||||
if rule_type == 'bandwidth_limit':
|
if rule_type == 'bandwidth_limit':
|
||||||
rule = self.dest_neutron.create_bandwidth_limit_rule(
|
rule = self.dest_neutron.create_bandwidth_limit_rule(
|
||||||
@ -207,8 +176,6 @@ class ApiReplayClient(object):
|
|||||||
# QoS disabled on source
|
# QoS disabled on source
|
||||||
return
|
return
|
||||||
|
|
||||||
drop_qos_policy_fields = ['revision']
|
|
||||||
|
|
||||||
for pol in source_qos_pols:
|
for pol in source_qos_pols:
|
||||||
dest_pol = self.have_id(pol['id'], dest_qos_pols)
|
dest_pol = self.have_id(pol['id'], dest_qos_pols)
|
||||||
# If the policy already exists on the dest_neutron
|
# If the policy already exists on the dest_neutron
|
||||||
@ -222,8 +189,7 @@ class ApiReplayClient(object):
|
|||||||
else:
|
else:
|
||||||
qos_rules = pol.pop('rules')
|
qos_rules = pol.pop('rules')
|
||||||
try:
|
try:
|
||||||
body = self.drop_fields(pol, drop_qos_policy_fields)
|
body = self.prepare_qos_policy(pol)
|
||||||
self.fix_description(body)
|
|
||||||
new_pol = self.dest_neutron.create_qos_policy(
|
new_pol = self.dest_neutron.create_qos_policy(
|
||||||
body={'policy': body})
|
body={'policy': body})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -246,8 +212,6 @@ class ApiReplayClient(object):
|
|||||||
source_sec_groups = source_sec_groups['security_groups']
|
source_sec_groups = source_sec_groups['security_groups']
|
||||||
dest_sec_groups = dest_sec_groups['security_groups']
|
dest_sec_groups = dest_sec_groups['security_groups']
|
||||||
|
|
||||||
drop_sg_fields = self.basic_ignore_fields + ['policy']
|
|
||||||
|
|
||||||
total_num = len(source_sec_groups)
|
total_num = len(source_sec_groups)
|
||||||
LOG.info("Migrating %s security groups", total_num)
|
LOG.info("Migrating %s security groups", total_num)
|
||||||
for count, sg in enumerate(source_sec_groups, 1):
|
for count, sg in enumerate(source_sec_groups, 1):
|
||||||
@ -261,8 +225,7 @@ class ApiReplayClient(object):
|
|||||||
dest_sec_group['security_group_rules'])
|
dest_sec_group['security_group_rules'])
|
||||||
is False):
|
is False):
|
||||||
try:
|
try:
|
||||||
body = self.drop_fields(sg_rule, drop_sg_fields)
|
body = self.prepare_security_group_rule(sg_rule)
|
||||||
self.fix_description(body)
|
|
||||||
self.dest_neutron.create_security_group_rule(
|
self.dest_neutron.create_security_group_rule(
|
||||||
{'security_group_rule': body})
|
{'security_group_rule': body})
|
||||||
except n_exc.Conflict:
|
except n_exc.Conflict:
|
||||||
@ -277,8 +240,7 @@ class ApiReplayClient(object):
|
|||||||
else:
|
else:
|
||||||
sg_rules = sg.pop('security_group_rules')
|
sg_rules = sg.pop('security_group_rules')
|
||||||
try:
|
try:
|
||||||
body = self.drop_fields(sg, drop_sg_fields)
|
body = self.prepare_security_group(sg)
|
||||||
self.fix_description(body)
|
|
||||||
new_sg = self.dest_neutron.create_security_group(
|
new_sg = self.dest_neutron.create_security_group(
|
||||||
{'security_group': body})
|
{'security_group': body})
|
||||||
LOG.info("Created security-group %(count)s/%(total)s: "
|
LOG.info("Created security-group %(count)s/%(total)s: "
|
||||||
@ -294,8 +256,7 @@ class ApiReplayClient(object):
|
|||||||
# be created on the destination with the default rules only
|
# be created on the destination with the default rules only
|
||||||
for sg_rule in sg_rules:
|
for sg_rule in sg_rules:
|
||||||
try:
|
try:
|
||||||
body = self.drop_fields(sg_rule, drop_sg_fields)
|
body = self.prepare_security_group_rule(sg_rule)
|
||||||
self.fix_description(body)
|
|
||||||
rule = self.dest_neutron.create_security_group_rule(
|
rule = self.dest_neutron.create_security_group_rule(
|
||||||
{'security_group_rule': body})
|
{'security_group_rule': body})
|
||||||
LOG.debug("created security group rule %s", rule['id'])
|
LOG.debug("created security group rule %s", rule['id'])
|
||||||
@ -325,16 +286,6 @@ class ApiReplayClient(object):
|
|||||||
update_routes = {}
|
update_routes = {}
|
||||||
gw_info = {}
|
gw_info = {}
|
||||||
|
|
||||||
drop_router_fields = self.basic_ignore_fields + [
|
|
||||||
'status',
|
|
||||||
'routes',
|
|
||||||
'ha',
|
|
||||||
'external_gateway_info',
|
|
||||||
'router_type',
|
|
||||||
'availability_zone_hints',
|
|
||||||
'availability_zones',
|
|
||||||
'distributed',
|
|
||||||
'flavor_id']
|
|
||||||
total_num = len(source_routers)
|
total_num = len(source_routers)
|
||||||
LOG.info("Migrating %s routers", total_num)
|
LOG.info("Migrating %s routers", total_num)
|
||||||
for count, router in enumerate(source_routers, 1):
|
for count, router in enumerate(source_routers, 1):
|
||||||
@ -346,8 +297,7 @@ class ApiReplayClient(object):
|
|||||||
|
|
||||||
dest_router = self.have_id(router['id'], dest_routers)
|
dest_router = self.have_id(router['id'], dest_routers)
|
||||||
if dest_router is False:
|
if dest_router is False:
|
||||||
body = self.drop_fields(router, drop_router_fields)
|
body = self.prepare_router(router)
|
||||||
self.fix_description(body)
|
|
||||||
try:
|
try:
|
||||||
new_router = (self.dest_neutron.create_router(
|
new_router = (self.dest_neutron.create_router(
|
||||||
{'router': body}))
|
{'router': body}))
|
||||||
@ -386,9 +336,6 @@ class ApiReplayClient(object):
|
|||||||
return subnetpools_map
|
return subnetpools_map
|
||||||
dest_subnetpools = self.dest_neutron.list_subnetpools()[
|
dest_subnetpools = self.dest_neutron.list_subnetpools()[
|
||||||
'subnetpools']
|
'subnetpools']
|
||||||
drop_subnetpool_fields = self.basic_ignore_fields + [
|
|
||||||
'id',
|
|
||||||
'ip_version']
|
|
||||||
|
|
||||||
for pool in source_subnetpools:
|
for pool in source_subnetpools:
|
||||||
# a default subnetpool (per ip-version) should be unique.
|
# a default subnetpool (per ip-version) should be unique.
|
||||||
@ -401,8 +348,7 @@ class ApiReplayClient(object):
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
old_id = pool['id']
|
old_id = pool['id']
|
||||||
body = self.drop_fields(pool, drop_subnetpool_fields)
|
body = self.prepare_subnetpool(pool)
|
||||||
self.fix_description(body)
|
|
||||||
if 'default_quota' in body and body['default_quota'] is None:
|
if 'default_quota' in body and body['default_quota'] is None:
|
||||||
del body['default_quota']
|
del body['default_quota']
|
||||||
|
|
||||||
@ -418,59 +364,6 @@ class ApiReplayClient(object):
|
|||||||
{'pool': pool, 'e': e})
|
{'pool': pool, 'e': e})
|
||||||
return subnetpools_map
|
return subnetpools_map
|
||||||
|
|
||||||
def fix_port(self, body):
|
|
||||||
# remove allowed_address_pairs if empty:
|
|
||||||
if ('allowed_address_pairs' in body and
|
|
||||||
not body['allowed_address_pairs']):
|
|
||||||
del body['allowed_address_pairs']
|
|
||||||
|
|
||||||
# remove port security if mac learning is enabled
|
|
||||||
if (body.get('mac_learning_enabled') and
|
|
||||||
body.get('port_security_enabled')):
|
|
||||||
LOG.warning("Disabling port security of port %s: The plugin "
|
|
||||||
"doesn't support mac learning with port security",
|
|
||||||
body['id'])
|
|
||||||
body['port_security_enabled'] = False
|
|
||||||
body['security_groups'] = []
|
|
||||||
|
|
||||||
def fix_network(self, body, dest_default_public_net):
|
|
||||||
# neutron doesn't like some fields being None even though its
|
|
||||||
# what it returns to us.
|
|
||||||
for field in ['provider:physical_network',
|
|
||||||
'provider:segmentation_id']:
|
|
||||||
if field in body and body[field] is None:
|
|
||||||
del body[field]
|
|
||||||
|
|
||||||
# vxlan network with segmentation id should be translated to a regular
|
|
||||||
# network in nsx-v3.
|
|
||||||
if (body.get('provider:network_type') == 'vxlan' and
|
|
||||||
body.get('provider:segmentation_id') is not None):
|
|
||||||
del body['provider:network_type']
|
|
||||||
del body['provider:segmentation_id']
|
|
||||||
|
|
||||||
# flat network should be translated to a regular network in nsx-v3.
|
|
||||||
if (body.get('provider:network_type') == 'flat'):
|
|
||||||
del body['provider:network_type']
|
|
||||||
if 'provider:physical_network' in body:
|
|
||||||
del body['provider:physical_network']
|
|
||||||
|
|
||||||
# external networks needs some special care
|
|
||||||
if body.get('router:external'):
|
|
||||||
fields_reset = False
|
|
||||||
for field in ['provider:network_type', 'provider:segmentation_id',
|
|
||||||
'provider:physical_network']:
|
|
||||||
if field in body:
|
|
||||||
if body[field] is not None:
|
|
||||||
fields_reset = True
|
|
||||||
del body[field]
|
|
||||||
if fields_reset:
|
|
||||||
LOG.warning("Ignoring provider network fields while migrating "
|
|
||||||
"external network %s", body['id'])
|
|
||||||
if body.get('is_default') and dest_default_public_net:
|
|
||||||
body['is_default'] = False
|
|
||||||
LOG.warning("Public network %s was set to non default network",
|
|
||||||
body['id'])
|
|
||||||
|
|
||||||
def migrate_networks_subnets_ports(self, routers_gw_info):
|
def migrate_networks_subnets_ports(self, routers_gw_info):
|
||||||
"""Migrates networks/ports/router-uplinks from src to dest neutron."""
|
"""Migrates networks/ports/router-uplinks from src to dest neutron."""
|
||||||
source_ports = self.source_neutron.list_ports()['ports']
|
source_ports = self.source_neutron.list_ports()['ports']
|
||||||
@ -479,34 +372,9 @@ class ApiReplayClient(object):
|
|||||||
dest_networks = self.dest_neutron.list_networks()['networks']
|
dest_networks = self.dest_neutron.list_networks()['networks']
|
||||||
dest_ports = self.dest_neutron.list_ports()['ports']
|
dest_ports = self.dest_neutron.list_ports()['ports']
|
||||||
|
|
||||||
# Remove some fields before creating the new object.
|
remove_qos = False
|
||||||
# Some fields are not supported for a new object, and some are not
|
|
||||||
# supported by the nsx-v3 plugin
|
|
||||||
drop_subnet_fields = self.basic_ignore_fields + [
|
|
||||||
'advanced_service_providers',
|
|
||||||
'id',
|
|
||||||
'service_types']
|
|
||||||
|
|
||||||
drop_port_fields = self.basic_ignore_fields + [
|
|
||||||
'status',
|
|
||||||
'binding:vif_details',
|
|
||||||
'binding:vif_type',
|
|
||||||
'binding:host_id',
|
|
||||||
'vnic_index',
|
|
||||||
'dns_assignment']
|
|
||||||
|
|
||||||
drop_network_fields = self.basic_ignore_fields + [
|
|
||||||
'status',
|
|
||||||
'subnets',
|
|
||||||
'availability_zones',
|
|
||||||
'availability_zone_hints',
|
|
||||||
'ipv4_address_scope',
|
|
||||||
'ipv6_address_scope',
|
|
||||||
'mtu']
|
|
||||||
|
|
||||||
if not self.dest_qos_support:
|
if not self.dest_qos_support:
|
||||||
drop_network_fields.append('qos_policy_id')
|
remove_qos = True
|
||||||
drop_port_fields.append('qos_policy_id')
|
|
||||||
|
|
||||||
# Find out if the destination already has a default public network
|
# Find out if the destination already has a default public network
|
||||||
dest_default_public_net = False
|
dest_default_public_net = False
|
||||||
@ -523,9 +391,9 @@ class ApiReplayClient(object):
|
|||||||
'ports': len(source_ports)})
|
'ports': len(source_ports)})
|
||||||
for count, network in enumerate(source_networks, 1):
|
for count, network in enumerate(source_networks, 1):
|
||||||
external_net = network.get('router:external')
|
external_net = network.get('router:external')
|
||||||
body = self.drop_fields(network, drop_network_fields)
|
body = self.prepare_network(
|
||||||
self.fix_description(body)
|
network, remove_qos=remove_qos,
|
||||||
self.fix_network(body, dest_default_public_net)
|
dest_default_public_net=dest_default_public_net)
|
||||||
|
|
||||||
# only create network if the dest server doesn't have it
|
# only create network if the dest server doesn't have it
|
||||||
if self.have_id(network['id'], dest_networks):
|
if self.have_id(network['id'], dest_networks):
|
||||||
@ -549,12 +417,10 @@ class ApiReplayClient(object):
|
|||||||
count_dhcp_subnet = 0
|
count_dhcp_subnet = 0
|
||||||
for subnet_id in network['subnets']:
|
for subnet_id in network['subnets']:
|
||||||
subnet = self.find_subnet_by_id(subnet_id, source_subnets)
|
subnet = self.find_subnet_by_id(subnet_id, source_subnets)
|
||||||
body = self.drop_fields(subnet, drop_subnet_fields)
|
body = self.prepare_subnet(subnet)
|
||||||
|
|
||||||
# specify the network_id that we just created above
|
# specify the network_id that we just created above
|
||||||
body['network_id'] = network['id']
|
body['network_id'] = network['id']
|
||||||
self.subnet_drop_ipv6_fields_if_v4(body)
|
|
||||||
self.fix_description(body)
|
|
||||||
# translate the old subnetpool id to the new one
|
# translate the old subnetpool id to the new one
|
||||||
if body.get('subnetpool_id'):
|
if body.get('subnetpool_id'):
|
||||||
body['subnetpool_id'] = subnetpools_map.get(
|
body['subnetpool_id'] = subnetpools_map.get(
|
||||||
@ -602,9 +468,7 @@ class ApiReplayClient(object):
|
|||||||
ports = self.get_ports_on_network(network['id'], source_ports)
|
ports = self.get_ports_on_network(network['id'], source_ports)
|
||||||
for port in ports:
|
for port in ports:
|
||||||
|
|
||||||
body = self.drop_fields(port, drop_port_fields)
|
body = self.prepare_port(port, remove_qos=remove_qos)
|
||||||
self.fix_description(body)
|
|
||||||
self.fix_port(body)
|
|
||||||
|
|
||||||
# specify the network_id that we just created above
|
# specify the network_id that we just created above
|
||||||
port['network_id'] = network['id']
|
port['network_id'] = network['id']
|
||||||
@ -723,11 +587,9 @@ class ApiReplayClient(object):
|
|||||||
# L3 might be disabled in the source
|
# L3 might be disabled in the source
|
||||||
source_fips = []
|
source_fips = []
|
||||||
|
|
||||||
drop_fip_fields = self.basic_ignore_fields + [
|
|
||||||
'status', 'router_id', 'id', 'revision']
|
|
||||||
total_num = len(source_fips)
|
total_num = len(source_fips)
|
||||||
for count, source_fip in enumerate(source_fips, 1):
|
for count, source_fip in enumerate(source_fips, 1):
|
||||||
body = self.drop_fields(source_fip, drop_fip_fields)
|
body = self.prepare_floatingip(source_fip)
|
||||||
try:
|
try:
|
||||||
fip = self.dest_neutron.create_floatingip({'floatingip': body})
|
fip = self.dest_neutron.create_floatingip({'floatingip': body})
|
||||||
LOG.info("Created floatingip %(count)s/%(total)s : %(fip)s",
|
LOG.info("Created floatingip %(count)s/%(total)s : %(fip)s",
|
||||||
|
@ -12,20 +12,26 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
import logging
|
||||||
|
|
||||||
from neutron_lib.api import attributes as lib_attrs
|
from neutron_lib.api import attributes as lib_attrs
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def _fixup_res_dict(context, attr_name, res_dict, check_allow_post=True):
|
def _fixup_res_dict(context, attr_name, res_dict, check_allow_post=True):
|
||||||
# This method is a replacement of _fixup_res_dict which is used in
|
# This method is a replacement of _fixup_res_dict which is used in
|
||||||
# neutron.plugin.common.utils. All this mock does is insert a uuid
|
# neutron.plugin.common.utils. All this mock does is insert a uuid
|
||||||
# for the id field if one is not found ONLY if running in api_replay_mode.
|
# for the id field if one is not found ONLY if running in api_replay_mode.
|
||||||
if cfg.CONF.api_replay_mode and 'id' not in res_dict:
|
if cfg.CONF.api_replay_mode and 'id' not in res_dict:
|
||||||
res_dict['id'] = uuidutils.generate_uuid()
|
# exclude gateway ports from this
|
||||||
|
if (attr_name != 'ports' or
|
||||||
|
res_dict.get('device_owner') != 'network:router_gateway'):
|
||||||
|
res_dict['id'] = uuidutils.generate_uuid()
|
||||||
attr_info = lib_attrs.RESOURCES[attr_name]
|
attr_info = lib_attrs.RESOURCES[attr_name]
|
||||||
attr_ops = lib_attrs.AttributeInfo(attr_info)
|
attr_ops = lib_attrs.AttributeInfo(attr_info)
|
||||||
try:
|
try:
|
||||||
@ -40,3 +46,194 @@ def _fixup_res_dict(context, attr_name, res_dict, check_allow_post=True):
|
|||||||
attr_ops.fill_post_defaults(res_dict, check_allow_post=check_allow_post)
|
attr_ops.fill_post_defaults(res_dict, check_allow_post=check_allow_post)
|
||||||
attr_ops.convert_values(res_dict)
|
attr_ops.convert_values(res_dict)
|
||||||
return res_dict
|
return res_dict
|
||||||
|
|
||||||
|
|
||||||
|
class PrepareObjectForMigration(object):
|
||||||
|
"""Helper class to modify V objects before creating them in T"""
|
||||||
|
# Remove some fields before creating the new object.
|
||||||
|
# Some fields are not supported for a new object, and some are not
|
||||||
|
# supported by the nsx-v3 plugin
|
||||||
|
basic_ignore_fields = ['updated_at',
|
||||||
|
'created_at',
|
||||||
|
'tags',
|
||||||
|
'revision',
|
||||||
|
'revision_number']
|
||||||
|
|
||||||
|
drop_sg_rule_fields = basic_ignore_fields
|
||||||
|
drop_sg_fields = basic_ignore_fields + ['policy']
|
||||||
|
drop_router_fields = basic_ignore_fields + [
|
||||||
|
'status',
|
||||||
|
'routes',
|
||||||
|
'ha',
|
||||||
|
'external_gateway_info',
|
||||||
|
'router_type',
|
||||||
|
'availability_zone_hints',
|
||||||
|
'availability_zones',
|
||||||
|
'distributed',
|
||||||
|
'flavor_id']
|
||||||
|
drop_subnetpool_fields = basic_ignore_fields + [
|
||||||
|
'id',
|
||||||
|
'ip_version']
|
||||||
|
|
||||||
|
drop_subnet_fields = basic_ignore_fields + [
|
||||||
|
'advanced_service_providers',
|
||||||
|
'id',
|
||||||
|
'service_types']
|
||||||
|
|
||||||
|
drop_port_fields = basic_ignore_fields + [
|
||||||
|
'status',
|
||||||
|
'binding:vif_details',
|
||||||
|
'binding:vif_type',
|
||||||
|
'binding:host_id',
|
||||||
|
'vnic_index',
|
||||||
|
'dns_assignment']
|
||||||
|
|
||||||
|
drop_network_fields = basic_ignore_fields + [
|
||||||
|
'status',
|
||||||
|
'subnets',
|
||||||
|
'availability_zones',
|
||||||
|
'availability_zone_hints',
|
||||||
|
'ipv4_address_scope',
|
||||||
|
'ipv6_address_scope',
|
||||||
|
'mtu']
|
||||||
|
|
||||||
|
drop_fip_fields = basic_ignore_fields + [
|
||||||
|
'status', 'router_id', 'id', 'revision']
|
||||||
|
|
||||||
|
drop_qos_rule_fields = ['revision', 'type', 'qos_policy_id', 'id']
|
||||||
|
drop_qos_policy_fields = ['revision']
|
||||||
|
|
||||||
|
def drop_fields(self, item, drop_fields):
|
||||||
|
body = {}
|
||||||
|
for k, v in item.items():
|
||||||
|
if k in drop_fields:
|
||||||
|
continue
|
||||||
|
body[k] = v
|
||||||
|
return body
|
||||||
|
|
||||||
|
def fix_description(self, body):
|
||||||
|
# neutron doesn't like description being None even though its
|
||||||
|
# what it returns to us.
|
||||||
|
if 'description' in body and body['description'] is None:
|
||||||
|
body['description'] = ''
|
||||||
|
|
||||||
|
# direct_call arg means that the object is prepared for calling the plugin
|
||||||
|
# create method directly
|
||||||
|
def prepare_security_group_rule(self, sg_rule, direct_call=False):
|
||||||
|
self.fix_description(sg_rule)
|
||||||
|
return self.drop_fields(sg_rule, self.drop_sg_rule_fields)
|
||||||
|
|
||||||
|
def prepare_security_group(self, sg, direct_call=False):
|
||||||
|
self.fix_description(sg)
|
||||||
|
return self.drop_fields(sg, self.drop_sg_fields)
|
||||||
|
|
||||||
|
def prepare_router(self, rtr, direct_call=False):
|
||||||
|
self.fix_description(rtr)
|
||||||
|
body = self.drop_fields(rtr, self.drop_router_fields)
|
||||||
|
if direct_call:
|
||||||
|
body['availability_zone_hints'] = []
|
||||||
|
return body
|
||||||
|
|
||||||
|
def prepare_subnetpool(self, pool, direct_call=False):
|
||||||
|
self.fix_description(pool)
|
||||||
|
return self.drop_fields(pool, self.drop_subnetpool_fields)
|
||||||
|
|
||||||
|
def prepare_network(self, net, dest_default_public_net=True,
|
||||||
|
remove_qos=False, direct_call=False):
|
||||||
|
self.fix_description(net)
|
||||||
|
body = self.drop_fields(net, self.drop_network_fields)
|
||||||
|
|
||||||
|
if remove_qos:
|
||||||
|
body = self.drop_fields(body, ['qos_policy_id'])
|
||||||
|
|
||||||
|
# neutron doesn't like some fields being None even though its
|
||||||
|
# what it returns to us.
|
||||||
|
for field in ['provider:physical_network',
|
||||||
|
'provider:segmentation_id']:
|
||||||
|
if field in body and body[field] is None:
|
||||||
|
del body[field]
|
||||||
|
|
||||||
|
# vxlan network with segmentation id should be translated to a regular
|
||||||
|
# network in nsx-v3.
|
||||||
|
if (body.get('provider:network_type') == 'vxlan' and
|
||||||
|
body.get('provider:segmentation_id') is not None):
|
||||||
|
del body['provider:network_type']
|
||||||
|
del body['provider:segmentation_id']
|
||||||
|
|
||||||
|
# flat network should be translated to a regular network in nsx-v3.
|
||||||
|
if (body.get('provider:network_type') == 'flat'):
|
||||||
|
del body['provider:network_type']
|
||||||
|
if 'provider:physical_network' in body:
|
||||||
|
del body['provider:physical_network']
|
||||||
|
|
||||||
|
# external networks needs some special care
|
||||||
|
if body.get('router:external'):
|
||||||
|
fields_reset = False
|
||||||
|
for field in ['provider:network_type', 'provider:segmentation_id',
|
||||||
|
'provider:physical_network']:
|
||||||
|
if field in body:
|
||||||
|
if body[field] is not None:
|
||||||
|
fields_reset = True
|
||||||
|
del body[field]
|
||||||
|
if fields_reset:
|
||||||
|
LOG.warning("Ignoring provider network fields while migrating "
|
||||||
|
"external network %s", body['id'])
|
||||||
|
if body.get('is_default') and dest_default_public_net:
|
||||||
|
body['is_default'] = False
|
||||||
|
LOG.warning("Public network %s was set to non default network",
|
||||||
|
body['id'])
|
||||||
|
if direct_call:
|
||||||
|
body['availability_zone_hints'] = []
|
||||||
|
return body
|
||||||
|
|
||||||
|
def prepare_subnet(self, subnet, direct_call=False):
|
||||||
|
self.fix_description(subnet)
|
||||||
|
body = self.drop_fields(subnet, self.drop_subnet_fields)
|
||||||
|
|
||||||
|
# Drops v6 fields on subnets that are v4 as server doesn't allow them.
|
||||||
|
v6_fields_to_remove = ['ipv6_address_mode', 'ipv6_ra_mode']
|
||||||
|
if body['ip_version'] == 4:
|
||||||
|
for field in v6_fields_to_remove:
|
||||||
|
if field in body:
|
||||||
|
body.pop(field)
|
||||||
|
return body
|
||||||
|
|
||||||
|
def prepare_port(self, port, remove_qos=False, direct_call=False):
|
||||||
|
self.fix_description(port)
|
||||||
|
body = self.drop_fields(port, self.drop_port_fields)
|
||||||
|
if remove_qos:
|
||||||
|
body = self.drop_fields(body, ['qos_policy_id'])
|
||||||
|
|
||||||
|
# remove allowed_address_pairs if empty:
|
||||||
|
if ('allowed_address_pairs' in body and
|
||||||
|
not body['allowed_address_pairs']):
|
||||||
|
del body['allowed_address_pairs']
|
||||||
|
|
||||||
|
# remove port security if mac learning is enabled
|
||||||
|
if (body.get('mac_learning_enabled') and
|
||||||
|
body.get('port_security_enabled')):
|
||||||
|
LOG.warning("Disabling port security of port %s: The plugin "
|
||||||
|
"doesn't support mac learning with port security",
|
||||||
|
body['id'])
|
||||||
|
body['port_security_enabled'] = False
|
||||||
|
body['security_groups'] = []
|
||||||
|
|
||||||
|
if direct_call:
|
||||||
|
if 'device_id' not in body:
|
||||||
|
body['device_id'] = ""
|
||||||
|
if 'device_owner' not in body:
|
||||||
|
body['device_owner'] = ""
|
||||||
|
|
||||||
|
return body
|
||||||
|
|
||||||
|
def prepare_floatingip(self, fip, direct_call=False):
|
||||||
|
self.fix_description(fip)
|
||||||
|
return self.drop_fields(fip, self.drop_fip_fields)
|
||||||
|
|
||||||
|
def prepare_qos_rule(self, rule, direct_call=False):
|
||||||
|
self.fix_description(rule)
|
||||||
|
return self.drop_fields(rule, self.drop_qos_rule_fields)
|
||||||
|
|
||||||
|
def prepare_qos_policy(self, policy, direct_call=False):
|
||||||
|
self.fix_description(policy)
|
||||||
|
return self.drop_fields(policy, self.drop_qos_policy_fields)
|
||||||
|
@ -699,6 +699,13 @@ def get_project_plugin_mappings_by_plugin(session, plugin):
|
|||||||
plugin=plugin).all()
|
plugin=plugin).all()
|
||||||
|
|
||||||
|
|
||||||
|
def update_project_plugin_mapping(session, project, plugin):
|
||||||
|
with session.begin(subtransactions=True):
|
||||||
|
binding = (session.query(nsx_models.NsxProjectPluginMapping).
|
||||||
|
filter_by(project=project).one())
|
||||||
|
binding.plugin = plugin
|
||||||
|
|
||||||
|
|
||||||
def add_nsx_vpn_connection_mapping(session, neutron_id, session_id,
|
def add_nsx_vpn_connection_mapping(session, neutron_id, session_id,
|
||||||
dpd_profile_id, ike_profile_id,
|
dpd_profile_id, ike_profile_id,
|
||||||
ipsec_profile_id, peer_ep_id):
|
ipsec_profile_id, peer_ep_id):
|
||||||
|
@ -754,7 +754,8 @@ class NsxVMetadataProxyHandler(object):
|
|||||||
try:
|
try:
|
||||||
self.nsxv_plugin.delete_port(
|
self.nsxv_plugin.delete_port(
|
||||||
ctx, ports[0]['id'],
|
ctx, ports[0]['id'],
|
||||||
l3_port_check=False)
|
l3_port_check=False,
|
||||||
|
allow_delete_internal=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error("Failed to delete md_proxy port %(port)s: "
|
LOG.error("Failed to delete md_proxy port %(port)s: "
|
||||||
"%(e)s", {'port': ports[0]['id'], 'e': e})
|
"%(e)s", {'port': ports[0]['id'], 'e': e})
|
||||||
|
@ -4511,3 +4511,6 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
|
|||||||
nsx_router_id, ext_addr,
|
nsx_router_id, ext_addr,
|
||||||
source_net=subnet['cidr'],
|
source_net=subnet['cidr'],
|
||||||
bypass_firewall=False)
|
bypass_firewall=False)
|
||||||
|
|
||||||
|
def extend_port_portbinding(self, port_res, binding):
|
||||||
|
pass
|
||||||
|
@ -79,6 +79,8 @@ class EdgeLoadbalancerDriverV2(object):
|
|||||||
|
|
||||||
nsx_router_id = nsx_db.get_nsx_router_id(kwargs['context'].session,
|
nsx_router_id = nsx_db.get_nsx_router_id(kwargs['context'].session,
|
||||||
kwargs['router_id'])
|
kwargs['router_id'])
|
||||||
|
if not nsx_router_id:
|
||||||
|
return
|
||||||
nsxlib = self.loadbalancer.core_plugin.nsxlib
|
nsxlib = self.loadbalancer.core_plugin.nsxlib
|
||||||
service_client = nsxlib.load_balancer.service
|
service_client = nsxlib.load_balancer.service
|
||||||
lb_service = service_client.get_router_lb_service(nsx_router_id)
|
lb_service = service_client.get_router_lb_service(nsx_router_id)
|
||||||
|
@ -12,25 +12,39 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
from oslo_serialization import jsonutils
|
||||||
|
|
||||||
|
from neutron.extensions import securitygroup as ext_sg
|
||||||
from neutron_lib.callbacks import registry
|
from neutron_lib.callbacks import registry
|
||||||
from neutron_lib import context
|
from neutron_lib import context as n_context
|
||||||
|
from neutron_lib import exceptions
|
||||||
|
|
||||||
|
from vmware_nsx.api_replay import utils as replay_utils
|
||||||
from vmware_nsx.db import db
|
from vmware_nsx.db import db
|
||||||
from vmware_nsx.extensions import projectpluginmap
|
from vmware_nsx.extensions import projectpluginmap
|
||||||
from vmware_nsx.shell.admin.plugins.common import constants
|
from vmware_nsx.shell.admin.plugins.common import constants
|
||||||
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
|
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
|
||||||
|
from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as v_utils
|
||||||
|
from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as v3_utils
|
||||||
from vmware_nsx.shell import resources as shell
|
from vmware_nsx.shell import resources as shell
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
# list of supported objects to migrate in order of deletion (creation will be
|
||||||
|
# in the opposite order)
|
||||||
|
migrated_resources = ["floatingip", "router", "port", "subnet",
|
||||||
|
"network", "security_group"]
|
||||||
|
#TODO(asarfaty): add other resources of different service plugins like
|
||||||
|
#vpnaas, fwaas, lbaas, qos, subnetpool, etc
|
||||||
|
|
||||||
|
|
||||||
@admin_utils.output_header
|
@admin_utils.output_header
|
||||||
def migrate_projects(resource, event, trigger, **kwargs):
|
def import_projects(resource, event, trigger, **kwargs):
|
||||||
"""Import existing openstack projects to the current plugin"""
|
"""Import existing openstack projects to the current plugin"""
|
||||||
# TODO(asarfaty): get the projects list from keystone
|
# TODO(asarfaty): get the projects list from keystone
|
||||||
|
|
||||||
# get the plugin name from the user
|
# get the plugin name from the user
|
||||||
if not kwargs.get('property'):
|
if not kwargs.get('property'):
|
||||||
LOG.error("Need to specify plugin and project parameters")
|
LOG.error("Need to specify plugin and project parameters")
|
||||||
@ -46,11 +60,393 @@ def migrate_projects(resource, event, trigger, **kwargs):
|
|||||||
LOG.error("The supported plugins are %s", projectpluginmap.VALID_TYPES)
|
LOG.error("The supported plugins are %s", projectpluginmap.VALID_TYPES)
|
||||||
return
|
return
|
||||||
|
|
||||||
ctx = context.get_admin_context()
|
ctx = n_context.get_admin_context()
|
||||||
if not db.get_project_plugin_mapping(ctx.session, project):
|
if not db.get_project_plugin_mapping(ctx.session, project):
|
||||||
db.add_project_plugin_mapping(ctx.session, project, plugin)
|
db.add_project_plugin_mapping(ctx.session, project, plugin)
|
||||||
|
|
||||||
|
|
||||||
registry.subscribe(migrate_projects,
|
def get_resource_file_name(project_id, resource):
|
||||||
|
return "%s_nsxv_%ss" % (project_id, resource)
|
||||||
|
|
||||||
|
|
||||||
|
def read_v_resources_to_files(context, project_id):
|
||||||
|
"""Read all relevant NSX-V resources from a specific project
|
||||||
|
|
||||||
|
and write them into a json file
|
||||||
|
"""
|
||||||
|
results = {}
|
||||||
|
with v_utils.NsxVPluginWrapper() as plugin:
|
||||||
|
filters = {'project_id': [project_id]}
|
||||||
|
for resource in migrated_resources:
|
||||||
|
filename = get_resource_file_name(project_id, resource)
|
||||||
|
file = open(filename, 'w')
|
||||||
|
get_objects = getattr(plugin, "get_%ss" % resource)
|
||||||
|
objects = get_objects(context, filters=filters)
|
||||||
|
|
||||||
|
# also add router gateway ports of the relevant routers
|
||||||
|
# (don't have the project id)
|
||||||
|
if resource == 'port':
|
||||||
|
rtr_ids = [rtr['id'] for rtr in results['router']]
|
||||||
|
gw_filters = {'device_owner': ['network:router_gateway'],
|
||||||
|
'device_id': rtr_ids}
|
||||||
|
gw_ports = plugin.get_ports(context, filters=gw_filters,
|
||||||
|
filter_project=False)
|
||||||
|
# ignore metadata gw ports
|
||||||
|
objects.extend([port for port in gw_ports
|
||||||
|
if not port['tenant_id']])
|
||||||
|
|
||||||
|
file.write(jsonutils.dumps(objects, sort_keys=True, indent=4))
|
||||||
|
file.close()
|
||||||
|
results[resource] = objects
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def read_v_resources_from_files(project_id):
|
||||||
|
"""Read all relevant NSX-V resources from a json file"""
|
||||||
|
results = {}
|
||||||
|
for resource in migrated_resources:
|
||||||
|
filename = get_resource_file_name(project_id, resource)
|
||||||
|
file = open(filename, 'r')
|
||||||
|
results[resource] = jsonutils.loads(file.read())
|
||||||
|
file.close()
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def delete_router_routes_and_interfaces(context, plugin, router):
|
||||||
|
if router.get('routes'):
|
||||||
|
plugin.update_router(context, router['id'],
|
||||||
|
{'router': {'routes': []}})
|
||||||
|
|
||||||
|
interfaces = plugin._get_router_interfaces(context, router['id'])
|
||||||
|
for port in interfaces:
|
||||||
|
plugin.remove_router_interface(context, router['id'],
|
||||||
|
{'port_id': port['id']})
|
||||||
|
|
||||||
|
|
||||||
|
def delete_v_resources(context, objects):
|
||||||
|
"""Delete a list of objects from the V plugin"""
|
||||||
|
with v_utils.NsxVPluginWrapper() as plugin:
|
||||||
|
LOG.info(">>>>Deleting all NSX-V objects of the project.")
|
||||||
|
for resource in migrated_resources:
|
||||||
|
get_object = getattr(plugin, "get_%s" % resource)
|
||||||
|
del_object = getattr(plugin, "delete_%s" % resource)
|
||||||
|
for obj in objects[resource]:
|
||||||
|
# verify that this object still exists
|
||||||
|
try:
|
||||||
|
get_object(context, obj['id'])
|
||||||
|
except exceptions.NotFound:
|
||||||
|
# prevent logger from logging this exception
|
||||||
|
sys.exc_clear()
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
# handle special cases before delete
|
||||||
|
if resource == 'router':
|
||||||
|
delete_router_routes_and_interfaces(
|
||||||
|
context, plugin, obj)
|
||||||
|
elif resource == 'port':
|
||||||
|
if obj['device_owner'] == 'network:dhcp':
|
||||||
|
continue
|
||||||
|
# delete the objects from the NSX-V plugin
|
||||||
|
del_object(context, obj['id'])
|
||||||
|
LOG.info(">>Deleted %(resource)s %(name)s",
|
||||||
|
{'resource': resource,
|
||||||
|
'name': obj.get('name') or obj['id']})
|
||||||
|
except Exception as e:
|
||||||
|
LOG.warning(">>Failed to delete %(resource)s %(name)s: "
|
||||||
|
"%(e)s",
|
||||||
|
{'resource': resource,
|
||||||
|
'name': obj.get('name') or obj['id'], 'e': e})
|
||||||
|
LOG.info(">>>>Done deleting all NSX-V objects.")
|
||||||
|
|
||||||
|
|
||||||
|
def get_router_by_id(objects, router_id):
|
||||||
|
for rtr in objects.get('router', []):
|
||||||
|
if rtr['id'] == router_id:
|
||||||
|
return rtr
|
||||||
|
|
||||||
|
|
||||||
|
def create_t_resources(context, objects, ext_net):
|
||||||
|
"""Create a list of objects in the T plugin"""
|
||||||
|
LOG.info(">>>>Creating all the objects of the project in NSX-T.")
|
||||||
|
prepare = replay_utils.PrepareObjectForMigration()
|
||||||
|
with v3_utils.NsxV3PluginWrapper() as plugin:
|
||||||
|
# create the resource in the order opposite to the deletion
|
||||||
|
# (but start with routers)
|
||||||
|
ordered_resources = migrated_resources[::-1]
|
||||||
|
ordered_resources.remove('router')
|
||||||
|
ordered_resources = ['router'] + ordered_resources
|
||||||
|
dhcp_subnets = []
|
||||||
|
for resource in ordered_resources:
|
||||||
|
total_num = len(objects[resource])
|
||||||
|
LOG.info(">>>Creating %s %s%s.", total_num,
|
||||||
|
resource, 's' if total_num > 1 else '')
|
||||||
|
get_object = getattr(plugin, "get_%s" % resource)
|
||||||
|
create_object = getattr(plugin, "create_%s" % resource)
|
||||||
|
# go over the objects of this resource
|
||||||
|
for count, obj in enumerate(objects[resource], 1):
|
||||||
|
# check if this object already exists
|
||||||
|
try:
|
||||||
|
get_object(context, obj['id'])
|
||||||
|
except exceptions.NotFound:
|
||||||
|
# prevent logger from logging this exception
|
||||||
|
sys.exc_clear()
|
||||||
|
else:
|
||||||
|
# already exists (this will happen if we rerun from files,
|
||||||
|
# or if the deletion failed)
|
||||||
|
LOG.info(">>Skipping %(resource)s %(name)s %(count)s/"
|
||||||
|
"%(total)s as it was already created.",
|
||||||
|
{'resource': resource,
|
||||||
|
'name': obj.get('name') or obj['id'],
|
||||||
|
'count': count,
|
||||||
|
'total': total_num})
|
||||||
|
continue
|
||||||
|
|
||||||
|
# fix object before creation using the api replay code
|
||||||
|
orig_id = obj['id']
|
||||||
|
prepare_object = getattr(prepare, "prepare_%s" % resource)
|
||||||
|
obj_data = prepare_object(obj, direct_call=True)
|
||||||
|
enable_dhcp = False
|
||||||
|
# special cases for different objects before create:
|
||||||
|
if resource == 'subnet':
|
||||||
|
if obj_data['enable_dhcp']:
|
||||||
|
enable_dhcp = True
|
||||||
|
# disable dhcp for now, to avoid ip collisions
|
||||||
|
obj_data['enable_dhcp'] = False
|
||||||
|
elif resource == 'security_group':
|
||||||
|
# security group rules should be added separately
|
||||||
|
sg_rules = obj_data.pop('security_group_rules')
|
||||||
|
elif resource == 'floatingip':
|
||||||
|
# Create the floating IP on the T external network
|
||||||
|
obj_data['floating_network_id'] = ext_net
|
||||||
|
del obj_data['floating_ip_address']
|
||||||
|
elif resource == 'port':
|
||||||
|
# remove the old subnet id field from ports fixed_ips dict
|
||||||
|
# since the subnet ids are changed
|
||||||
|
for fixed_ips in obj_data['fixed_ips']:
|
||||||
|
del fixed_ips['subnet_id']
|
||||||
|
|
||||||
|
if obj_data['device_owner'] == 'network:dhcp':
|
||||||
|
continue
|
||||||
|
if obj_data['device_owner'] == 'network:floatingip':
|
||||||
|
continue
|
||||||
|
if obj_data['device_owner'] == 'network:router_gateway':
|
||||||
|
# add a gateway on the new ext network for this router
|
||||||
|
router_id = obj_data['device_id']
|
||||||
|
# keep the original enable-snat value
|
||||||
|
router_data = get_router_by_id(objects, router_id)
|
||||||
|
enable_snat = router_data['external_gateway_info'].get(
|
||||||
|
'enable_snat', True)
|
||||||
|
rtr_body = {
|
||||||
|
"external_gateway_info":
|
||||||
|
{"network_id": ext_net,
|
||||||
|
"enable_snat": enable_snat}}
|
||||||
|
try:
|
||||||
|
plugin.update_router(
|
||||||
|
context, router_id, {'router': rtr_body})
|
||||||
|
LOG.info(">>Uplinked router %(rtr)s to new "
|
||||||
|
"external network %(net)s",
|
||||||
|
{'rtr': router_id,
|
||||||
|
'net': ext_net})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error(">>Failed to add router %(rtr)s "
|
||||||
|
"gateway: %(e)s",
|
||||||
|
{'rtr': router_id, 'e': e})
|
||||||
|
continue
|
||||||
|
if obj_data['device_owner'] == 'network:router_interface':
|
||||||
|
try:
|
||||||
|
# uplink router_interface ports by creating the
|
||||||
|
# port, and attaching it to the router
|
||||||
|
router_id = obj_data['device_id']
|
||||||
|
obj_data['device_owner'] = ""
|
||||||
|
obj_data['device_id'] = ""
|
||||||
|
created_port = plugin.create_port(
|
||||||
|
context,
|
||||||
|
{'port': obj_data})
|
||||||
|
LOG.info(">>Created interface port %(port)s, ip "
|
||||||
|
"%(ip)s, mac %(mac)s)",
|
||||||
|
{'port': created_port['id'],
|
||||||
|
'ip': created_port['fixed_ips'][0][
|
||||||
|
'ip_address'],
|
||||||
|
'mac': created_port['mac_address']})
|
||||||
|
plugin.add_router_interface(
|
||||||
|
context,
|
||||||
|
router_id,
|
||||||
|
{'port_id': created_port['id']})
|
||||||
|
LOG.info(">>Uplinked router %(rtr)s to network "
|
||||||
|
"%(net)s",
|
||||||
|
{'rtr': router_id,
|
||||||
|
'net': obj_data['network_id']})
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error(">>Failed to add router %(rtr)s "
|
||||||
|
"interface port: %(e)s",
|
||||||
|
{'rtr': router_id, 'e': e})
|
||||||
|
continue
|
||||||
|
|
||||||
|
# create the object on the NSX-T plugin
|
||||||
|
try:
|
||||||
|
created_obj = create_object(context, {resource: obj_data})
|
||||||
|
LOG.info(">>Created %(resource)s %(name)s %(count)s/"
|
||||||
|
"%(total)s",
|
||||||
|
{'resource': resource, 'count': count,
|
||||||
|
'name': obj_data.get('name') or orig_id,
|
||||||
|
'total': total_num})
|
||||||
|
except Exception as e:
|
||||||
|
# TODO(asarfaty): subnets ids are changed, so recreating a
|
||||||
|
# subnet will fail on overlapping ips.
|
||||||
|
LOG.error(">>Failed to create %(resource)s %(name)s: "
|
||||||
|
"%(e)s",
|
||||||
|
{'resource': resource, 'e': e,
|
||||||
|
'name': obj_data.get('name') or orig_id})
|
||||||
|
continue
|
||||||
|
|
||||||
|
# special cases for different objects after create:
|
||||||
|
if resource == 'security_group':
|
||||||
|
sg_id = obj_data.get('name') or obj_data['id']
|
||||||
|
for rule in sg_rules:
|
||||||
|
rule_data = prepare.prepare_security_group_rule(rule)
|
||||||
|
try:
|
||||||
|
plugin.create_security_group_rule(
|
||||||
|
context, {'security_group_rule': rule_data})
|
||||||
|
except ext_sg.SecurityGroupRuleExists:
|
||||||
|
# default rules were already created.
|
||||||
|
# prevent logger from logging this exception
|
||||||
|
sys.exc_clear()
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error(
|
||||||
|
">>Failed to create security group %(name)s "
|
||||||
|
"rules: %(e)s",
|
||||||
|
{'name': sg_id, 'e': e})
|
||||||
|
elif resource == 'subnet':
|
||||||
|
if enable_dhcp:
|
||||||
|
dhcp_subnets.append(created_obj['id'])
|
||||||
|
|
||||||
|
# Enable dhcp on all the relevant subnets (after creating all ports,
|
||||||
|
# to maintain original IPs):
|
||||||
|
if dhcp_subnets:
|
||||||
|
for subnet_id in dhcp_subnets:
|
||||||
|
try:
|
||||||
|
plugin.update_subnet(
|
||||||
|
context, subnet_id,
|
||||||
|
{'subnet': {'enable_dhcp': True}})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Failed to enable DHCP on subnet %(subnet)s:"
|
||||||
|
" %(e)s",
|
||||||
|
{'subnet': subnet_id, 'e': e})
|
||||||
|
|
||||||
|
# Add static routes (after all router interfaces and gateways are set)
|
||||||
|
for obj_data in objects['router']:
|
||||||
|
if 'routes' in obj_data:
|
||||||
|
try:
|
||||||
|
plugin.update_router(
|
||||||
|
context, obj_data['id'],
|
||||||
|
{'router': {'routes': obj_data['routes']}})
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error("Failed to add routes to router %(rtr)s: "
|
||||||
|
"%(e)s",
|
||||||
|
{'rtr': obj_data['id'], 'e': e})
|
||||||
|
|
||||||
|
LOG.info(">>>Done Creating all objects in NSX-T.")
|
||||||
|
|
||||||
|
|
||||||
|
@admin_utils.output_header
|
||||||
|
def migrate_v_project_to_t(resource, event, trigger, **kwargs):
|
||||||
|
"""Migrate 1 project from v to t with all its resources"""
|
||||||
|
|
||||||
|
# filter out the plugins INFO logging
|
||||||
|
# TODO(asarfaty): Consider this for all admin utils
|
||||||
|
LOG.logger.setLevel(logging.INFO)
|
||||||
|
logging.getLogger(None).logger.setLevel(logging.WARN)
|
||||||
|
|
||||||
|
# get the configuration: tenant + public network + from file flag
|
||||||
|
usage = ("Usage: nsxadmin -r projects -o %s --property project-id=<> "
|
||||||
|
"--property external-net=<NSX-T external network to be used> "
|
||||||
|
"<--property from-file=True>" %
|
||||||
|
shell.Operations.NSX_MIGRATE_V_V3.value)
|
||||||
|
if not kwargs.get('property'):
|
||||||
|
LOG.error("Missing parameters: %s", usage)
|
||||||
|
return
|
||||||
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
|
project = properties.get('project-id')
|
||||||
|
ext_net_id = properties.get('external-net')
|
||||||
|
from_file = properties.get('from-file', 'false').lower() == "true"
|
||||||
|
# TODO(asarfaty): get files path
|
||||||
|
if not project:
|
||||||
|
LOG.error("Missing project-id parameter: %s", usage)
|
||||||
|
return
|
||||||
|
if not ext_net_id:
|
||||||
|
LOG.error("Missing external-net parameter: %s", usage)
|
||||||
|
return
|
||||||
|
|
||||||
|
# check if files exist in the current directory
|
||||||
|
try:
|
||||||
|
filename = get_resource_file_name(project, 'network')
|
||||||
|
file = open(filename, 'r')
|
||||||
|
if file.read():
|
||||||
|
if not from_file:
|
||||||
|
from_file = admin_utils.query_yes_no(
|
||||||
|
"Use existing resources files for this project?",
|
||||||
|
default="yes")
|
||||||
|
file.close()
|
||||||
|
except Exception:
|
||||||
|
sys.exc_clear()
|
||||||
|
if from_file:
|
||||||
|
LOG.error("Cannot run from file: files not found")
|
||||||
|
return
|
||||||
|
|
||||||
|
# validate tenant id and public network
|
||||||
|
ctx = n_context.get_admin_context()
|
||||||
|
mapping = db.get_project_plugin_mapping(ctx.session, project)
|
||||||
|
current_plugin = mapping.plugin
|
||||||
|
if not mapping:
|
||||||
|
LOG.error("Project %s is unknown", project)
|
||||||
|
return
|
||||||
|
if not from_file and current_plugin != projectpluginmap.NsxPlugins.NSX_V:
|
||||||
|
LOG.error("Project %s belongs to plugin %s.", project, mapping.plugin)
|
||||||
|
return
|
||||||
|
|
||||||
|
with v3_utils.NsxV3PluginWrapper() as plugin:
|
||||||
|
try:
|
||||||
|
plugin.get_network(ctx, ext_net_id)
|
||||||
|
except exceptions.NetworkNotFound:
|
||||||
|
LOG.error("Network %s was not found", ext_net_id)
|
||||||
|
return
|
||||||
|
if not plugin._network_is_external(ctx, ext_net_id):
|
||||||
|
LOG.error("Network %s is not external", ext_net_id)
|
||||||
|
return
|
||||||
|
|
||||||
|
if from_file:
|
||||||
|
# read resources from files
|
||||||
|
objects = read_v_resources_from_files(project)
|
||||||
|
else:
|
||||||
|
# read all V resources and dump to a file
|
||||||
|
objects = read_v_resources_to_files(ctx, project)
|
||||||
|
|
||||||
|
# delete all the V resources (reading it from the files)
|
||||||
|
if current_plugin == projectpluginmap.NsxPlugins.NSX_V:
|
||||||
|
delete_v_resources(ctx, objects)
|
||||||
|
|
||||||
|
# change the mapping of this tenant to T
|
||||||
|
db.update_project_plugin_mapping(ctx.session, project,
|
||||||
|
projectpluginmap.NsxPlugins.NSX_T)
|
||||||
|
|
||||||
|
# use api replay flag to allow keeping the IDs
|
||||||
|
cfg.CONF.set_override('api_replay_mode', True)
|
||||||
|
|
||||||
|
# add resources 1 by one after adapting them to T (api-replay code)
|
||||||
|
create_t_resources(ctx, objects, ext_net_id)
|
||||||
|
|
||||||
|
# reset api replay flag to allow keeping the IDs
|
||||||
|
cfg.CONF.set_override('api_replay_mode', False)
|
||||||
|
|
||||||
|
|
||||||
|
registry.subscribe(import_projects,
|
||||||
constants.PROJECTS,
|
constants.PROJECTS,
|
||||||
shell.Operations.IMPORT.value)
|
shell.Operations.IMPORT.value)
|
||||||
|
|
||||||
|
registry.subscribe(migrate_v_project_to_t,
|
||||||
|
constants.PROJECTS,
|
||||||
|
shell.Operations.NSX_MIGRATE_V_V3.value)
|
||||||
|
@ -118,23 +118,31 @@ class NsxVPluginWrapper(plugin.NsxVPlugin):
|
|||||||
filters.update(requested_filters)
|
filters.update(requested_filters)
|
||||||
return filters
|
return filters
|
||||||
|
|
||||||
def get_networks(self, context, filters=None, fields=None):
|
def get_networks(self, context, filters=None, fields=None,
|
||||||
filters = self._update_filters(filters)
|
filter_project=True):
|
||||||
|
if filter_project:
|
||||||
|
filters = self._update_filters(filters)
|
||||||
return super(NsxVPluginWrapper, self).get_networks(
|
return super(NsxVPluginWrapper, self).get_networks(
|
||||||
context, filters=filters, fields=fields)
|
context, filters=filters, fields=fields)
|
||||||
|
|
||||||
def get_subnets(self, context, filters=None, fields=None):
|
def get_subnets(self, context, filters=None, fields=None,
|
||||||
filters = self._update_filters(filters)
|
filter_project=True):
|
||||||
|
if filter_project:
|
||||||
|
filters = self._update_filters(filters)
|
||||||
return super(NsxVPluginWrapper, self).get_subnets(
|
return super(NsxVPluginWrapper, self).get_subnets(
|
||||||
context, filters=filters, fields=fields)
|
context, filters=filters, fields=fields)
|
||||||
|
|
||||||
def get_ports(self, context, filters=None, fields=None):
|
def get_ports(self, context, filters=None, fields=None,
|
||||||
filters = self._update_filters(filters)
|
filter_project=True):
|
||||||
|
if filter_project:
|
||||||
|
filters = self._update_filters(filters)
|
||||||
return super(NsxVPluginWrapper, self).get_ports(
|
return super(NsxVPluginWrapper, self).get_ports(
|
||||||
self.context, filters=filters, fields=fields)
|
self.context, filters=filters, fields=fields)
|
||||||
|
|
||||||
def get_routers(self, context, filters=None, fields=None):
|
def get_routers(self, context, filters=None, fields=None,
|
||||||
filters = self._update_filters(filters)
|
filter_project=True):
|
||||||
|
if filter_project:
|
||||||
|
filters = self._update_filters(filters)
|
||||||
return super(NsxVPluginWrapper, self).get_routers(
|
return super(NsxVPluginWrapper, self).get_routers(
|
||||||
self.context, filters=filters, fields=fields)
|
self.context, filters=filters, fields=fields)
|
||||||
|
|
||||||
|
@ -119,9 +119,9 @@ def nsx_update_metadata_proxy(resource, event, trigger, **kwargs):
|
|||||||
continue
|
continue
|
||||||
router_id = ports[0]['device_id']
|
router_id = ports[0]['device_id']
|
||||||
interface = {'subnet_id': network['subnets'][0]}
|
interface = {'subnet_id': network['subnets'][0]}
|
||||||
plugin.remove_router_interface(router_id, interface)
|
plugin.remove_router_interface(None, router_id, interface)
|
||||||
LOG.info("Removed metadata interface on router %s", router_id)
|
LOG.info("Removed metadata interface on router %s", router_id)
|
||||||
plugin.delete_network(network['id'])
|
plugin.delete_network(None, network['id'])
|
||||||
LOG.info("Removed metadata network %s", network['id'])
|
LOG.info("Removed metadata network %s", network['id'])
|
||||||
else:
|
else:
|
||||||
lswitch_id = neutron_client.net_id_to_lswitch_id(
|
lswitch_id = neutron_client.net_id_to_lswitch_id(
|
||||||
|
@ -226,14 +226,23 @@ def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
|
|||||||
"section in the nsx.ini file: %s", e)
|
"section in the nsx.ini file: %s", e)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Go over all the compute ports from the plugin
|
port_filters = {}
|
||||||
|
if kwargs.get('property'):
|
||||||
|
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
|
||||||
|
project = properties.get('project-id')
|
||||||
|
if project:
|
||||||
|
port_filters['project_id'] = [project]
|
||||||
|
|
||||||
|
# Go over all the ports from the plugin
|
||||||
admin_cxt = neutron_context.get_admin_context()
|
admin_cxt = neutron_context.get_admin_context()
|
||||||
port_filters = v3_utils.get_plugin_filters(admin_cxt)
|
|
||||||
port_filters['device_owner'] = ['compute:None']
|
|
||||||
with PortsPlugin() as plugin:
|
with PortsPlugin() as plugin:
|
||||||
neutron_ports = plugin.get_ports(admin_cxt, filters=port_filters)
|
neutron_ports = plugin.get_ports(admin_cxt, filters=port_filters)
|
||||||
|
|
||||||
for port in neutron_ports:
|
for port in neutron_ports:
|
||||||
|
# skip non compute ports
|
||||||
|
if (not port.get('device_owner').startswith(
|
||||||
|
const.DEVICE_OWNER_COMPUTE_PREFIX)):
|
||||||
|
continue
|
||||||
device_id = port.get('device_id')
|
device_id = port.get('device_id')
|
||||||
|
|
||||||
# get the vm moref & spec from the DVS
|
# get the vm moref & spec from the DVS
|
||||||
@ -249,7 +258,8 @@ def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
|
|||||||
if (prop.name == 'network' and
|
if (prop.name == 'network' and
|
||||||
hasattr(prop.val, 'ManagedObjectReference')):
|
hasattr(prop.val, 'ManagedObjectReference')):
|
||||||
for net in prop.val.ManagedObjectReference:
|
for net in prop.val.ManagedObjectReference:
|
||||||
if net._type == 'DistributedVirtualPortgroup':
|
if (net._type == 'DistributedVirtualPortgroup' or
|
||||||
|
net._type == 'Network'):
|
||||||
update_spec = True
|
update_spec = True
|
||||||
|
|
||||||
if not update_spec:
|
if not update_spec:
|
||||||
|
@ -171,9 +171,6 @@ class NsxV3PluginWrapper(plugin.NsxV3Plugin):
|
|||||||
def _process_security_group_logging(self):
|
def _process_security_group_logging(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def _init_port_security_profile(self):
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _extend_get_network_dict_provider(self, context, net):
|
def _extend_get_network_dict_provider(self, context, net):
|
||||||
self._extend_network_dict_provider(context, net)
|
self._extend_network_dict_provider(context, net)
|
||||||
# skip getting the Qos policy ID because get_object calls
|
# skip getting the Qos policy ID because get_object calls
|
||||||
@ -184,10 +181,14 @@ class NsxV3PluginWrapper(plugin.NsxV3Plugin):
|
|||||||
# skip getting the Qos policy ID because get_object calls
|
# skip getting the Qos policy ID because get_object calls
|
||||||
# plugin init again on admin-util environment
|
# plugin init again on admin-util environment
|
||||||
|
|
||||||
def delete_network(self, network_id):
|
def delete_network(self, context, network_id):
|
||||||
|
if not context:
|
||||||
|
context = self.context
|
||||||
return super(NsxV3PluginWrapper, self).delete_network(
|
return super(NsxV3PluginWrapper, self).delete_network(
|
||||||
self.context, network_id)
|
context, network_id)
|
||||||
|
|
||||||
def remove_router_interface(self, router_id, interface):
|
def remove_router_interface(self, context, router_id, interface):
|
||||||
|
if not context:
|
||||||
|
context = self.context
|
||||||
return super(NsxV3PluginWrapper, self).remove_router_interface(
|
return super(NsxV3PluginWrapper, self).remove_router_interface(
|
||||||
self.context, router_id, interface)
|
context, router_id, interface)
|
||||||
|
@ -213,10 +213,10 @@ nsxv_resources = {
|
|||||||
|
|
||||||
|
|
||||||
# Add supported NSX-TVD resources in this dictionary
|
# Add supported NSX-TVD resources in this dictionary
|
||||||
# TODO(asarfaty): add v+v3 resources here too
|
|
||||||
nsxtvd_resources = {
|
nsxtvd_resources = {
|
||||||
constants.PROJECTS: Resource(constants.PROJECTS,
|
constants.PROJECTS: Resource(constants.PROJECTS,
|
||||||
[Operations.IMPORT.value]),
|
[Operations.IMPORT.value,
|
||||||
|
Operations.NSX_MIGRATE_V_V3.value]),
|
||||||
}
|
}
|
||||||
|
|
||||||
nsxv3_resources_names = list(nsxv3_resources.keys())
|
nsxv3_resources_names = list(nsxv3_resources.keys())
|
||||||
|
Loading…
Reference in New Issue
Block a user