diff --git a/tricircle/common/constants.py b/tricircle/common/constants.py index 0de56e7..1020a0f 100644 --- a/tricircle/common/constants.py +++ b/tricircle/common/constants.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datetime + + # service type ST_NOVA = 'nova' # only support cinder v2 @@ -29,6 +32,7 @@ RT_NETWORK = 'network' RT_SUBNET = 'subnet' RT_PORT = 'port' RT_ROUTER = 'router' +RT_SG = 'security_group' # version list NOVA_VERSION_V21 = 'v2.1' @@ -53,3 +57,4 @@ ns_bridge_subnet_name = 'ns_bridge_subnet_%s' # project_id ns_bridge_port_name = 'ns_bridge_port_%s_%s_%s' MAX_INT = 0x7FFFFFFF +expire_time = datetime.datetime(2000, 1, 1) diff --git a/tricircle/common/lock_handle.py b/tricircle/common/lock_handle.py index 40f0f1a..5140aab 100644 --- a/tricircle/common/lock_handle.py +++ b/tricircle/common/lock_handle.py @@ -22,6 +22,13 @@ from tricircle.db import core from tricircle.db import models +ALL_DONE = 0 # both route and bottom resource exist +RES_DONE = 1 # only bottom resource exists +NONE_DONE = 2 # neither router nor bottom resources exists +# The case only router exists is not considered, there may be some manual +# operations on bottom pod which results to this problem. + + def get_or_create_route(t_ctx, q_ctx, project_id, pod, _id, _type, list_ele_method): # use configuration option later @@ -36,7 +43,7 @@ def get_or_create_route(t_ctx, q_ctx, if routes: route = routes[0] if route['bottom_id']: - return route, False + return route, ALL_DONE else: route_time = route['updated_at'] or route['created_at'] current_time = datetime.datetime.utcnow() @@ -52,7 +59,7 @@ def get_or_create_route(t_ctx, q_ctx, core.update_resource(t_ctx, models.ResourceRouting, route['id'], route) - return route, False + return route, RES_DONE try: core.delete_resource(t_ctx, models.ResourceRouting, @@ -70,10 +77,10 @@ def get_or_create_route(t_ctx, q_ctx, 'project_id': project_id, 'resource_type': _type}) t_ctx.session.commit() - return route, True + return route, NONE_DONE except db_exc.DBDuplicateEntry: t_ctx.session.rollback() - return None, False + return None, NONE_DONE finally: t_ctx.session.close() @@ -84,17 +91,15 @@ def get_or_create_element(t_ctx, q_ctx, # use configuration option later max_tries = 5 for _ in xrange(max_tries): - route, is_new = get_or_create_route( + route, status = get_or_create_route( t_ctx, q_ctx, project_id, pod, ele['id'], _type, list_ele_method) if not route: eventlet.sleep(0) continue - if not is_new and not route['bottom_id']: - eventlet.sleep(0) - continue - if not is_new and route['bottom_id']: + if status == RES_DONE or status == ALL_DONE: + # in these cases, bottom_id must exist break - if is_new: + if status == NONE_DONE: try: ele = create_ele_method(t_ctx, q_ctx, pod, body, _type) except Exception: @@ -121,4 +126,13 @@ def get_or_create_element(t_ctx, q_ctx, raise Exception('Fail to create %s routing entry' % _type) if not route['bottom_id']: raise Exception('Fail to bind top and bottom %s' % _type) - return is_new, route['bottom_id'] + # NOTE(zhiyuan) Status being ALL_DONE means that the routing entry is + # complete when we retrieve the resource, so we return False to indicate + # that we can directly use this resource safely. Status being RES_DONE and + # NONE_DONE means that the routing entry is not complete when we retrieve + # the resource but we manage to fill the entry finally, so we return True + # to indicate that we may leave some work to do. + if status == ALL_DONE: + return False, route['bottom_id'] + else: + return True, route['bottom_id'] diff --git a/tricircle/common/resource_handle.py b/tricircle/common/resource_handle.py index e7e4d02..cb3bf4f 100644 --- a/tricircle/common/resource_handle.py +++ b/tricircle/common/resource_handle.py @@ -120,8 +120,8 @@ class NeutronResourceHandle(ResourceHandle): 'subnet': LIST | CREATE | DELETE | GET, 'port': LIST | CREATE | DELETE | GET, 'router': LIST | CREATE | ACTION | UPDATE, - 'security_group': LIST, - 'security_group_rule': LIST, + 'security_group': LIST | CREATE | GET, + 'security_group_rule': LIST | CREATE | DELETE, 'floatingip': LIST | CREATE} def _get_client(self, cxt): @@ -146,8 +146,12 @@ class NeutronResourceHandle(ResourceHandle): def handle_create(self, cxt, resource, *args, **kwargs): try: client = self._get_client(cxt) - return getattr(client, 'create_%s' % resource)( - *args, **kwargs)[resource] + ret = getattr(client, 'create_%s' % resource)( + *args, **kwargs) + if resource in ret: + return ret[resource] + else: + return ret['%ss' % resource] except q_exceptions.ConnectionFailed: self.endpoint_url = None raise exceptions.EndpointNotAvailable( diff --git a/tricircle/nova_apigw/controllers/server.py b/tricircle/nova_apigw/controllers/server.py index c3fc36e..cdd544f 100644 --- a/tricircle/nova_apigw/controllers/server.py +++ b/tricircle/nova_apigw/controllers/server.py @@ -13,10 +13,13 @@ # License for the specific language governing permissions and limitations # under the License. +import netaddr import pecan from pecan import expose from pecan import rest +import neutronclient.common.exceptions as q_exceptions + from tricircle.common import az_ag import tricircle.common.client as t_client from tricircle.common import constants @@ -74,7 +77,8 @@ class ServerController(rest.RestController): } return body - def _get_create_port_body(self, port, subnet_map, bottom_net_id): + def _get_create_port_body(self, port, subnet_map, bottom_net_id, + security_group_ids=None): bottom_fixed_ips = [] for ip in port['fixed_ips']: bottom_ip = {'subnet_id': subnet_map[ip['subnet_id']], @@ -90,6 +94,8 @@ class ServerController(rest.RestController): 'fixed_ips': bottom_fixed_ips } } + if security_group_ids: + body['port']['security_groups'] = security_group_ids return body def _get_create_dhcp_port_body(self, port, bottom_subnet_id, @@ -123,28 +129,30 @@ class ServerController(rest.RestController): client = self._get_client(pod_['pod_name']) return client.create_resources(_type_, t_ctx, body_) - _, ele_id = t_lock.get_or_create_element( + return t_lock.get_or_create_element( context, None, # we don't need neutron context, so pass None self.project_id, pod, ele, _type, body, list_resources, create_resources) - return ele_id - def _handle_network(self, context, pod, net, subnets, port=None): + def _handle_network(self, context, pod, net, subnets, port=None, + top_sg_ids=None, bottom_sg_ids=None): # network net_body = self._get_create_network_body(net) - bottom_net_id = self._prepare_neutron_element(context, pod, net, - 'network', net_body) + _, bottom_net_id = self._prepare_neutron_element(context, pod, net, + 'network', net_body) # subnet subnet_map = {} for subnet in subnets: subnet_body = self._get_create_subnet_body(subnet, bottom_net_id) - bottom_subnet_id = self._prepare_neutron_element( + _, bottom_subnet_id = self._prepare_neutron_element( context, pod, subnet, 'subnet', subnet_body) subnet_map[subnet['id']] = bottom_subnet_id top_client = self._get_client() top_port_body = {'port': {'network_id': net['id'], 'admin_state_up': True}} + if top_sg_ids: + top_port_body['port']['security_groups'] = top_sg_ids # dhcp port client = self._get_client(pod['pod_name']) @@ -247,10 +255,14 @@ class ServerController(rest.RestController): # port if not port: port = top_client.create_ports(context, top_port_body) - port_body = self._get_create_port_body(port, subnet_map, bottom_net_id) - bottom_port_id = self._prepare_neutron_element(context, pod, port, - 'port', port_body) - return bottom_port_id + port_body = self._get_create_port_body( + port, subnet_map, bottom_net_id, bottom_sg_ids) + else: + port_body = self._get_create_port_body(port, subnet_map, + bottom_net_id) + _, bottom_port_id = self._prepare_neutron_element(context, pod, port, + 'port', port_body) + return port['id'], bottom_port_id def _handle_port(self, context, pod, port): top_client = self._get_client() @@ -266,7 +278,162 @@ class ServerController(rest.RestController): for fixed_ip in port['fixed_ips']: subnets.append(top_client.get_subnets(context, fixed_ip['subnet_id'])) - return self._handle_network(context, pod, net, subnets, port) + return self._handle_network(context, pod, net, subnets, port=port) + + @staticmethod + def _safe_create_security_group_rule(context, client, body): + try: + client.create_security_group_rules(context, body) + except q_exceptions.Conflict: + return + + @staticmethod + def _safe_delete_security_group_rule(context, client, _id): + try: + client.delete_security_group_rules(context, _id) + except q_exceptions.NotFound: + return + + def _handle_security_group(self, context, pod, top_sg_map, + security_groups): + t_sg_ids = [] + b_sg_ids = [] + is_news = [] + for sg_name in security_groups: + t_sg = top_sg_map[sg_name] + sg_body = { + 'security_group': { + 'name': t_sg['id'], + 'description': t_sg['description']}} + is_new, b_sg_id = self._prepare_neutron_element( + context, pod, t_sg, constants.RT_SG, sg_body) + t_sg_ids.append(t_sg['id']) + is_news.append(is_new) + b_sg_ids.append(b_sg_id) + + return t_sg_ids, b_sg_ids, is_news + + @staticmethod + def _construct_bottom_rule(rule, sg_id, ip=None): + ip = ip or rule['remote_ip_prefix'] + # if ip is passed, this is a extended rule for remote group + return {'remote_group_id': None, + 'direction': rule['direction'], + 'remote_ip_prefix': ip, + 'protocol': rule.get('protocol'), + 'ethertype': rule['ethertype'], + 'port_range_max': rule.get('port_range_max'), + 'port_range_min': rule.get('port_range_min'), + 'security_group_id': sg_id} + + @staticmethod + def _compare_rule(rule1, rule2): + for key in ('direction', 'remote_ip_prefix', 'protocol', 'ethertype', + 'port_range_max', 'port_range_min'): + if rule1[key] != rule2[key]: + return False + return True + + def _handle_sg_rule_for_default_group(self, context, pod, default_sg, + project_id): + top_client = self._get_client() + new_b_rules = [] + for t_rule in default_sg['security_group_rules']: + if not t_rule['remote_group_id']: + # leave sg_id empty here + new_b_rules.append( + self._construct_bottom_rule(t_rule, '')) + continue + if t_rule['ethertype'] != 'IPv4': + continue + subnets = top_client.list_subnets( + context, [{'key': 'tenant_id', 'comparator': 'eq', + 'value': project_id}]) + bridge_ip_net = netaddr.IPNetwork('100.0.0.0/8') + for subnet in subnets: + ip_net = netaddr.IPNetwork(subnet['cidr']) + if ip_net in bridge_ip_net: + continue + # leave sg_id empty here + new_b_rules.append( + self._construct_bottom_rule(t_rule, '', + subnet['cidr'])) + + mappings = db_api.get_bottom_mappings_by_top_id( + context, default_sg['id'], constants.RT_SG) + for pod, b_sg_id in mappings: + client = self._get_client(pod['pod_name']) + b_sg = client.get_security_groups(context, b_sg_id) + add_rules = [] + del_rules = [] + match_index = set() + for b_rule in b_sg['security_group_rules']: + match = False + for i, rule in enumerate(new_b_rules): + if self._compare_rule(b_rule, rule): + match = True + match_index.add(i) + break + if not match: + del_rules.append(b_rule) + for i, rule in enumerate(new_b_rules): + if i not in match_index: + add_rules.append(rule) + + for del_rule in del_rules: + self._safe_delete_security_group_rule( + context, client, del_rule['id']) + if add_rules: + rule_body = {'security_group_rules': []} + for add_rule in add_rules: + add_rule['security_group_id'] = b_sg_id + rule_body['security_group_rules'].append(add_rule) + self._safe_create_security_group_rule(context, + client, rule_body) + + def _handle_sg_rule_for_new_group(self, context, pod, top_sgs, + bottom_sg_ids): + client = self._get_client(pod['pod_name']) + for i, t_sg in enumerate(top_sgs): + b_sg_id = bottom_sg_ids[i] + new_b_rules = [] + for t_rule in t_sg['security_group_rules']: + if t_rule['remote_group_id']: + # we do not handle remote group rule for non-default + # security group, actually tricircle plugin in neutron + # will reject such rule + # default security group is not passed with top_sgs so + # t_rule will not belong to default security group + continue + new_b_rules.append( + self._construct_bottom_rule(t_rule, b_sg_id)) + try: + b_sg = client.get_security_groups(context, b_sg_id) + for b_rule in b_sg['security_group_rules']: + self._safe_delete_security_group_rule( + context, client, b_rule['id']) + if new_b_rules: + rule_body = {'security_group_rules': new_b_rules} + self._safe_create_security_group_rule(context, client, + rule_body) + except Exception: + # if we fails when operating bottom security group rule, we + # update the security group mapping to set bottom_id to None + # and expire the mapping, so next time the security group rule + # operations can be redone + with context.session.begin(): + routes = core.query_resource( + context, models.ResourceRouting, + [{'key': 'top_id', 'comparator': 'eq', + 'value': t_sg['id']}, + {'key': 'bottom_id', 'comparator': 'eq', + 'value': b_sg_id}], []) + update_dict = {'bottom_id': None, + 'created_at': constants.expire_time, + 'updated_at': constants.expire_time} + core.update_resource(context, models.ResourceRouting, + routes[0]['id'], update_dict) + raise @staticmethod def _get_create_server_body(origin, bottom_az): @@ -367,6 +534,28 @@ class ServerController(rest.RestController): server_body = self._get_create_server_body(kw['server'], b_az) top_client = self._get_client() + + sg_filters = [{'key': 'tenant_id', 'comparator': 'eq', + 'value': self.project_id}] + top_sgs = top_client.list_security_groups(context, sg_filters) + top_sg_map = dict((sg['name'], sg) for sg in top_sgs) + + if 'security_groups' not in kw['server']: + security_groups = ['default'] + else: + security_groups = [] + for sg in kw['server']['security_groups']: + if 'name' not in sg: + pecan.abort(404, 'Security group name not specify') + return + if sg['name'] not in top_sg_map: + pecan.abort(404, + 'Security group %s not found' % sg['name']) + return + security_groups.append(sg['name']) + t_sg_ids, b_sg_ids, is_news = self._handle_security_group( + context, pod, top_sg_map, security_groups) + if 'networks' in kw['server']: server_body['networks'] = [] for net_info in kw['server']['networks']: @@ -390,24 +579,50 @@ class ServerController(rest.RestController): if not subnets: pecan.abort(400, 'Network not contain subnets') return - bottom_port_id = self._handle_network(context, pod, - network, subnets) + t_port_id, b_port_id = self._handle_network( + context, pod, network, subnets, + top_sg_ids=t_sg_ids, bottom_sg_ids=b_sg_ids) elif 'port' in net_info: port = top_client.get_ports(context, net_info['port']) if not port: pecan.abort(400, 'Port not found') return - bottom_port_id = self._handle_port(context, pod, port) - server_body['networks'].append({'port': bottom_port_id}) + t_port_id, b_port_id = self._handle_port( + context, pod, port) + server_body['networks'].append({'port': b_port_id}) + + # only for security group first created in a pod, we invoke + # _handle_sg_rule_for_new_group to initialize rules in that group, this + # method removes all the rules in the new group then add new rules + top_sg_id_map = dict((sg['id'], sg) for sg in top_sgs) + new_top_sgs = [] + new_bottom_sg_ids = [] + default_sg = None + for t_id, b_id, is_new in zip(t_sg_ids, b_sg_ids, is_news): + sg_name = top_sg_id_map[t_id]['name'] + if sg_name == 'default': + default_sg = top_sg_id_map[t_id] + continue + if not is_new: + continue + new_top_sgs.append(top_sg_id_map[t_id]) + new_bottom_sg_ids.append(b_id) + self._handle_sg_rule_for_new_group(context, pod, new_top_sgs, + new_bottom_sg_ids) + if default_sg: + self._handle_sg_rule_for_default_group( + context, pod, default_sg, self.project_id) client = self._get_client(pod['pod_name']) nics = [ {'port-id': _port['port']} for _port in server_body['networks']] + server = client.create_servers(context, name=server_body['name'], image=server_body['imageRef'], flavor=server_body['flavorRef'], - nics=nics) + nics=nics, + security_groups=b_sg_ids) with context.session.begin(): core.create_resource(context, models.ResourceRouting, {'top_id': server['id'], diff --git a/tricircle/tests/unit/nova_apigw/controllers/test_server.py b/tricircle/tests/unit/nova_apigw/controllers/test_server.py index c618ecc..42801ad 100644 --- a/tricircle/tests/unit/nova_apigw/controllers/test_server.py +++ b/tricircle/tests/unit/nova_apigw/controllers/test_server.py @@ -13,15 +13,18 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import datetime import mock from mock import patch import pecan import unittest +import neutronclient.common.exceptions as q_exceptions from oslo_utils import uuidutils from tricircle.common import context +from tricircle.common import lock_handle from tricircle.db import api from tricircle.db import core from tricircle.db import models @@ -31,11 +34,24 @@ from tricircle.nova_apigw.controllers import server TOP_NETS = [] TOP_SUBNETS = [] TOP_PORTS = [] -BOTTOM_NETS = [] -BOTTOM_SUBNETS = [] -BOTTOM_PORTS = [] -RES_LIST = [TOP_NETS, TOP_SUBNETS, TOP_PORTS, - BOTTOM_NETS, BOTTOM_SUBNETS, BOTTOM_PORTS] +TOP_SGS = [] +BOTTOM1_NETS = [] +BOTTOM1_SUBNETS = [] +BOTTOM1_PORTS = [] +BOTTOM1_SGS = [] +BOTTOM2_NETS = [] +BOTTOM2_SUBNETS = [] +BOTTOM2_PORTS = [] +BOTTOM2_SGS = [] + +BOTTOM_NETS = BOTTOM1_NETS +BOTTOM_SUBNETS = BOTTOM1_SUBNETS +BOTTOM_PORTS = BOTTOM1_PORTS +BOTTOM_SGS = BOTTOM1_SGS + +RES_LIST = [TOP_NETS, TOP_SUBNETS, TOP_PORTS, TOP_SGS, + BOTTOM1_NETS, BOTTOM1_SUBNETS, BOTTOM1_PORTS, BOTTOM1_SGS, + BOTTOM2_NETS, BOTTOM2_SUBNETS, BOTTOM2_PORTS, BOTTOM2_SGS] class FakeException(Exception): @@ -60,16 +76,28 @@ class FakeClient(object): _res_map = {'top': {'network': TOP_NETS, 'subnet': TOP_SUBNETS, - 'port': TOP_PORTS}, + 'port': TOP_PORTS, + 'security_group': TOP_SGS}, 'bottom': {'network': BOTTOM_NETS, 'subnet': BOTTOM_SUBNETS, - 'port': BOTTOM_PORTS}} + 'port': BOTTOM_PORTS, + 'security_group': BOTTOM_SGS}, + 'bottom2': {'network': BOTTOM2_NETS, + 'subnet': BOTTOM2_SUBNETS, + 'port': BOTTOM2_PORTS, + 'security_group': BOTTOM2_SGS}} def __init__(self, pod_name): self.pod_name = pod_name + self.ip_suffix_gen = self._get_ip_suffix() def _get_res_list(self, _type): - pod = 'top' if self.pod_name == 't_region' else 'bottom' + if self.pod_name == 'b_region_2': + pod = 'bottom2' + elif self.pod_name == 't_region': + pod = 'top' + else: + pod = 'bottom' return self._res_map[pod][_type] def _check_port_ip_conflict(self, subnet_id, ip): @@ -88,6 +116,25 @@ class FakeClient(object): ip_dict = body[_type]['fixed_ips'][0] self._check_port_ip_conflict(ip_dict['subnet_id'], ip_dict['ip_address']) + if _type == 'security_group': + body[_type]['security_group_rules'] = [ + {'remote_group_id': None, + 'direction': 'egress', + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None, + 'ethertype': 'IPv4', + 'id': uuidutils.generate_uuid()}, + {'remote_group_id': None, + 'direction': 'egress', + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None, + 'ethertype': 'IPv6', + 'id': uuidutils.generate_uuid()}, + ] res_list = self._get_res_list(_type) res = dict(body[_type]) res_list.append(res) @@ -109,6 +156,16 @@ class FakeClient(object): ret_list.append(res) return ret_list + @staticmethod + def _get_ip_suffix(): + # three elements should be enough + suffix_list = ['3', '4', '5'] + index = 0 + while True: + yield suffix_list[index] + index += 1 + index %= 3 + def create_ports(self, ctx, body): if 'fixed_ips' in body['port']: return self.create_resources('port', ctx, body) @@ -124,8 +181,9 @@ class FakeClient(object): ip = ip_prefix + '2' body['port']['mac_address'] = mac_prefix + '2' else: - ip = ip_prefix + '3' - body['port']['mac_address'] = mac_prefix + '3' + suffix = self.ip_suffix_gen.next() + ip = ip_prefix + suffix + body['port']['mac_address'] = mac_prefix + suffix fixed_ip_list.append({'ip_address': ip, 'subnet_id': subnet['id']}) body['port']['fixed_ips'] = fixed_ip_list @@ -134,6 +192,9 @@ class FakeClient(object): def list_ports(self, ctx, filters): return self.list_resources('port', ctx, filters) + def list_security_groups(self, ctx, filters): + return self.list_resources('security_group', ctx, filters) + def delete_ports(self, ctx, port_id): port_list = self._get_res_list('port') for i, port in enumerate(port_list): @@ -158,6 +219,40 @@ class FakeClient(object): # do nothing here since it will be mocked pass + def get_security_groups(self, ctx, sg_id): + sg = self.list_resources( + 'security_group', ctx, + [{'key': 'id', 'comparator': 'eq', 'value': sg_id}])[0] + # need to do a deep copy because we will traverse the security group's + # 'security_group_rules' field and make change to the group + ret_sg = copy.deepcopy(sg) + return ret_sg + + def create_security_group_rules(self, ctx, body): + for _rule in body['security_group_rules']: + sg_id = _rule['security_group_id'] + sg = self.list_resources( + 'security_group', ctx, + [{'key': 'id', 'comparator': 'eq', 'value': sg_id}])[0] + new_rule = copy.copy(_rule) + match_found = False + for rule in sg['security_group_rules']: + old_rule = copy.copy(rule) + if new_rule == old_rule: + match_found = True + break + if match_found: + raise q_exceptions.Conflict() + sg['security_group_rules'].append(new_rule) + + def delete_security_group_rules(self, ctx, rule_id): + res_list = self._get_res_list('security_group') + for sg in res_list: + for rule in sg['security_group_rules']: + if rule['id'] == rule_id: + sg['security_group_rules'].remove(rule) + return + class ServerTest(unittest.TestCase): def setUp(self): @@ -167,14 +262,23 @@ class ServerTest(unittest.TestCase): self.project_id = 'test_project' self.controller = FakeServerController(self.project_id) - def _prepare_pod(self): + def _prepare_pod(self, bottom_pod_num=1): t_pod = {'pod_id': 't_pod_uuid', 'pod_name': 't_region', 'az_name': ''} - b_pod = {'pod_id': 'b_pod_uuid', 'pod_name': 'b_region', - 'az_name': 'b_az'} api.create_pod(self.context, t_pod) - api.create_pod(self.context, b_pod) - return t_pod, b_pod + if bottom_pod_num == 1: + b_pod = {'pod_id': 'b_pod_uuid', 'pod_name': 'b_region', + 'az_name': 'b_az'} + api.create_pod(self.context, b_pod) + return t_pod, b_pod + b_pods = [] + for i in xrange(1, bottom_pod_num + 1): + b_pod = {'pod_id': 'b_pod_%d_uuid' % i, + 'pod_name': 'b_region_%d' % i, + 'az_name': 'b_az_%d' % i} + api.create_pod(self.context, b_pod) + b_pods.append(b_pod) + return t_pod, b_pods def test_get_or_create_route(self): t_pod, b_pod = self._prepare_pod() @@ -190,9 +294,9 @@ class ServerTest(unittest.TestCase): t_pod, b_pod = self._prepare_pod() self.controller._get_or_create_route( self.context, b_pod, 'test_top_id', 'port') - route, is_own = self.controller._get_or_create_route( + route, status = self.controller._get_or_create_route( self.context, b_pod, 'test_top_id', 'port') - self.assertFalse(is_own) + self.assertEqual(lock_handle.NONE_DONE, status) self.assertIsNone(route) def test_get_or_create_route_conflict_expire(self): @@ -223,9 +327,9 @@ class ServerTest(unittest.TestCase): route['id'], {'updated_at': update_time}) # insert a fake bottom port BOTTOM_PORTS.append({'id': 'test_bottom_id', 'name': 'test_top_id'}) - new_route, is_own = self.controller._get_or_create_route( + new_route, status = self.controller._get_or_create_route( self.context, b_pod, 'test_top_id', 'port') - self.assertFalse(is_own) + self.assertEqual(lock_handle.RES_DONE, status) self.assertEqual('test_top_id', new_route['top_id']) self.assertEqual('test_bottom_id', new_route['bottom_id']) self.assertEqual('port', new_route['resource_type']) @@ -235,7 +339,7 @@ class ServerTest(unittest.TestCase): t_pod, b_pod = self._prepare_pod() port = {'id': 'top_port_id'} body = {'port': {'name': 'top_port_id'}} - bottom_port_id = self.controller._prepare_neutron_element( + _, bottom_port_id = self.controller._prepare_neutron_element( self.context, b_pod, port, 'port', body) mappings = api.get_bottom_mappings_by_top_id(self.context, 'top_port_id', 'port') @@ -382,59 +486,6 @@ class ServerTest(unittest.TestCase): def test_handle_network_dhcp_port_exist_diff_ip(self): self._test_handle_network_dhcp_port('10.0.0.4') - @patch.object(FakeClient, 'create_servers') - @patch.object(context, 'extract_context_from_environ') - def test_post(self, mock_ctx, mock_create): - t_pod, b_pod = self._prepare_pod() - top_net_id = 'top_net_id' - top_subnet_id = 'top_subnet_id' - t_net = {'id': top_net_id} - t_subnet = {'id': top_subnet_id, - 'network_id': top_net_id, - 'ip_version': 4, - 'cidr': '10.0.0.0/24', - 'gateway_ip': '10.0.0.1', - 'allocation_pools': {'start': '10.0.0.2', - 'end': '10.0.0.254'}, - 'enable_dhcp': True} - TOP_NETS.append(t_net) - TOP_SUBNETS.append(t_subnet) - - server_name = 'test_server' - image_id = 'image_id' - flavor_id = 1 - body = { - 'server': { - 'name': server_name, - 'imageRef': image_id, - 'flavorRef': flavor_id, - 'availability_zone': b_pod['az_name'], - 'networks': [{'uuid': top_net_id}] - } - } - mock_create.return_value = {'id': 'bottom_server_id'} - mock_ctx.return_value = self.context - - server_dict = self.controller.post(**body)['server'] - - bottom_port_id = '' - for port in BOTTOM_PORTS: - if 'device_id' not in port: - bottom_port_id = port['id'] - mock_create.assert_called_with(self.context, name=server_name, - image=image_id, flavor=flavor_id, - nics=[{'port-id': bottom_port_id}]) - with self.context.session.begin(): - routes = core.query_resource(self.context, models.ResourceRouting, - [{'key': 'resource_type', - 'comparator': 'eq', - 'value': 'server'}], []) - self.assertEqual(1, len(routes)) - self.assertEqual(server_dict['id'], routes[0]['top_id']) - self.assertEqual(server_dict['id'], routes[0]['bottom_id']) - self.assertEqual(b_pod['pod_id'], routes[0]['pod_id']) - self.assertEqual(self.project_id, routes[0]['project_id']) - @patch.object(pecan, 'abort') @patch.object(FakeClient, 'create_servers') @patch.object(context, 'extract_context_from_environ') @@ -442,6 +493,7 @@ class ServerTest(unittest.TestCase): t_pod, b_pod = self._prepare_pod() top_net_id = 'top_net_id' top_subnet_id = 'top_subnet_id' + top_sg_id = 'top_sg_id' t_net = {'id': top_net_id} t_subnet = {'id': top_subnet_id, 'network_id': top_net_id, @@ -451,8 +503,27 @@ class ServerTest(unittest.TestCase): 'allocation_pools': {'start': '10.0.0.2', 'end': '10.0.0.254'}, 'enable_dhcp': True} + t_sg = {'id': top_sg_id, 'name': 'default', 'description': '', + 'tenant_id': self.project_id, + 'security_group_rules': [ + {'remote_group_id': top_sg_id, + 'direction': 'ingress', + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None, + 'ethertype': 'IPv4'}, + {'remote_group_id': None, + 'direction': 'egress', + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None, + 'ethertype': 'IPv4'}, + ]} TOP_NETS.append(t_net) TOP_SUBNETS.append(t_subnet) + TOP_SGS.append(t_sg) server_name = 'test_server' image_id = 'image_id' @@ -486,6 +557,308 @@ class ServerTest(unittest.TestCase): calls = [mock.call(400, msg), mock.call(400, msg)] mock_abort.assert_has_calls(calls) + @patch.object(FakeClient, 'create_servers') + @patch.object(context, 'extract_context_from_environ') + def test_post(self, mock_ctx, mock_create): + t_pod, b_pod = self._prepare_pod() + top_net_id = 'top_net_id' + top_subnet_id = 'top_subnet_id' + top_sg_id = 'top_sg_id' + + t_net = {'id': top_net_id} + t_subnet = {'id': top_subnet_id, + 'network_id': top_net_id, + 'ip_version': 4, + 'cidr': '10.0.0.0/24', + 'gateway_ip': '10.0.0.1', + 'allocation_pools': {'start': '10.0.0.2', + 'end': '10.0.0.254'}, + 'enable_dhcp': True} + t_sg = {'id': top_sg_id, 'name': 'default', 'description': '', + 'tenant_id': self.project_id, + 'security_group_rules': [ + {'remote_group_id': top_sg_id, + 'direction': 'ingress', + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None, + 'ethertype': 'IPv4'}, + {'remote_group_id': None, + 'direction': 'egress', + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None, + 'ethertype': 'IPv4'}, + ]} + TOP_NETS.append(t_net) + TOP_SUBNETS.append(t_subnet) + TOP_SGS.append(t_sg) + + server_name = 'test_server' + image_id = 'image_id' + flavor_id = 1 + body = { + 'server': { + 'name': server_name, + 'imageRef': image_id, + 'flavorRef': flavor_id, + 'availability_zone': b_pod['az_name'], + 'networks': [{'uuid': top_net_id}] + } + } + mock_create.return_value = {'id': 'bottom_server_id'} + mock_ctx.return_value = self.context + + server_dict = self.controller.post(**body)['server'] + + for port in BOTTOM_PORTS: + if 'device_id' not in port: + bottom_port_id = port['id'] + for sg in BOTTOM_SGS: + if sg['name'] == top_sg_id: + bottom_sg = sg + + mock_create.assert_called_with(self.context, name=server_name, + image=image_id, flavor=flavor_id, + nics=[{'port-id': bottom_port_id}], + security_groups=[bottom_sg['id']]) + # make sure remote group is extended to ip addresses + for rule in bottom_sg['security_group_rules']: + if rule['ethertype'] == 'IPv4' and rule['direction'] == 'ingress': + self.assertIsNone(rule['remote_group_id']) + self.assertEqual('10.0.0.0/24', rule['remote_ip_prefix']) + with self.context.session.begin(): + routes = core.query_resource(self.context, models.ResourceRouting, + [{'key': 'resource_type', + 'comparator': 'eq', + 'value': 'server'}], []) + self.assertEqual(1, len(routes)) + self.assertEqual(server_dict['id'], routes[0]['top_id']) + self.assertEqual(server_dict['id'], routes[0]['bottom_id']) + self.assertEqual(b_pod['pod_id'], routes[0]['pod_id']) + self.assertEqual(self.project_id, routes[0]['project_id']) + + # make sure security group mapping is built + routes = core.query_resource(self.context, models.ResourceRouting, + [{'key': 'resource_type', + 'comparator': 'eq', + 'value': 'security_group'}], []) + self.assertEqual(1, len(routes)) + self.assertEqual(top_sg_id, routes[0]['top_id']) + self.assertEqual(bottom_sg['id'], routes[0]['bottom_id']) + self.assertEqual(b_pod['pod_id'], routes[0]['pod_id']) + self.assertEqual(self.project_id, routes[0]['project_id']) + + @patch.object(FakeClient, 'create_servers') + @patch.object(context, 'extract_context_from_environ') + def test_post_exception_retry(self, mock_ctx, mock_server): + t_pod, b_pod = self._prepare_pod() + top_net_id = 'top_net_id' + top_subnet_id = 'top_subnet_id' + top_sg_id = 'top_sg_id' + + t_net = {'id': top_net_id} + t_subnet = {'id': top_subnet_id, + 'network_id': top_net_id, + 'ip_version': 4, + 'cidr': '10.0.0.0/24', + 'gateway_ip': '10.0.0.1', + 'allocation_pools': {'start': '10.0.0.2', + 'end': '10.0.0.254'}, + 'enable_dhcp': True} + t_sg = {'id': top_sg_id, 'name': 'test_sg', 'description': '', + 'tenant_id': self.project_id, + 'security_group_rules': [ + {'remote_group_id': None, + 'direction': 'ingress', + 'remote_ip_prefix': '10.0.1.0/24', + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None, + 'ethertype': 'IPv4'}, + {'remote_group_id': None, + 'direction': 'egress', + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None, + 'ethertype': 'IPv4'}, + ]} + TOP_NETS.append(t_net) + TOP_SUBNETS.append(t_subnet) + TOP_SGS.append(t_sg) + + server_name = 'test_server' + image_id = 'image_id' + flavor_id = 1 + body = { + 'server': { + 'name': server_name, + 'imageRef': image_id, + 'flavorRef': flavor_id, + 'availability_zone': b_pod['az_name'], + 'networks': [{'uuid': top_net_id}], + 'security_groups': [{'name': 'test_sg'}] + } + } + mock_server.return_value = {'id': 'bottom_server_id'} + mock_ctx.return_value = self.context + + create_security_group_rules = FakeClient.create_security_group_rules + FakeClient.create_security_group_rules = mock.Mock() + FakeClient.create_security_group_rules.side_effect = \ + q_exceptions.ConnectionFailed + + self.assertRaises(q_exceptions.ConnectionFailed, self.controller.post, + **body) + with self.context.session.begin(): + routes = core.query_resource( + self.context, models.ResourceRouting, + [{'key': 'top_sg_id', 'comparator': 'eq', + 'value': t_sg['id']}, + {'key': 'pod_id', 'comparator': 'eq', + 'value': 'b_pod_uuid'}], []) + self.assertIsNone(routes[0]['bottom_id']) + + # test we can redo after exception + FakeClient.create_security_group_rules = create_security_group_rules + self.controller.post(**body) + + for port in BOTTOM_PORTS: + if 'device_id' not in port: + bottom_port_id = port['id'] + for sg in BOTTOM_SGS: + if sg['name'] == top_sg_id: + bottom_sg = sg + + mock_server.assert_called_with(self.context, name=server_name, + image=image_id, flavor=flavor_id, + nics=[{'port-id': bottom_port_id}], + security_groups=[bottom_sg['id']]) + + @patch.object(FakeClient, 'create_servers') + @patch.object(context, 'extract_context_from_environ') + def test_post_across_pods(self, mock_ctx, mock_create): + t_pod, b_pods = self._prepare_pod(2) + b_pod1, b_pod2 = b_pods + top_net1_id = 'top_net1_id' + top_subnet1_id = 'top_subnet1_id' + top_net2_id = 'top_net2_id' + top_subnet2_id = 'top_subnet2_id' + top_sg_id = 'top_sg_id' + + t_net1 = {'id': top_net1_id} + t_subnet1 = {'id': top_subnet1_id, + 'tenant_id': self.project_id, + 'network_id': top_net1_id, + 'ip_version': 4, + 'cidr': '10.0.1.0/24', + 'gateway_ip': '10.0.1.1', + 'allocation_pools': {'start': '10.0.1.2', + 'end': '10.0.1.254'}, + 'enable_dhcp': True} + t_net2 = {'id': top_net2_id} + t_subnet2 = {'id': top_subnet2_id, + 'tenant_id': self.project_id, + 'network_id': top_net2_id, + 'ip_version': 4, + 'cidr': '10.0.2.0/24', + 'gateway_ip': '10.0.2.1', + 'allocation_pools': {'start': '10.0.2.2', + 'end': '10.0.2.254'}, + 'enable_dhcp': True} + t_sg = {'id': top_sg_id, 'name': 'default', 'description': '', + 'tenant_id': self.project_id, + 'security_group_rules': [ + {'remote_group_id': top_sg_id, + 'direction': 'ingress', + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None, + 'ethertype': 'IPv4'}, + {'remote_group_id': None, + 'direction': 'egress', + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None, + 'ethertype': 'IPv4'}, + ]} + TOP_NETS.append(t_net1) + TOP_SUBNETS.append(t_subnet1) + TOP_NETS.append(t_net2) + TOP_SUBNETS.append(t_subnet2) + TOP_SGS.append(t_sg) + + image_id = 'image_id' + flavor_id = 1 + mock_ctx.return_value = self.context + + body = { + 'server': { + 'name': 'test_server1', + 'imageRef': image_id, + 'flavorRef': flavor_id, + 'availability_zone': b_pod1['az_name'], + 'networks': [{'uuid': top_net1_id}] + } + } + mock_create.return_value = {'id': 'bottom_server1_id'} + self.controller.post(**body)['server'] + + body = { + 'server': { + 'name': 'test_server2', + 'imageRef': image_id, + 'flavorRef': flavor_id, + 'availability_zone': b_pod2['az_name'], + 'networks': [{'uuid': top_net2_id}] + } + } + mock_create.return_value = {'id': 'bottom_server2_id'} + self.controller.post(**body)['server'] + + for port in BOTTOM1_PORTS: + if 'device_id' not in port: + bottom_port1_id = port['id'] + for port in BOTTOM2_PORTS: + if 'device_id' not in port: + bottom_port2_id = port['id'] + for sg in BOTTOM1_SGS: + if sg['name'] == top_sg_id: + bottom_sg1 = sg + for sg in BOTTOM2_SGS: + if sg['name'] == top_sg_id: + bottom_sg2 = sg + + calls = [mock.call(self.context, name='test_server1', image=image_id, + flavor=flavor_id, + nics=[{'port-id': bottom_port1_id}], + security_groups=[bottom_sg1['id']]), + mock.call(self.context, name='test_server2', image=image_id, + flavor=flavor_id, + nics=[{'port-id': bottom_port2_id}], + security_groups=[bottom_sg2['id']])] + mock_create.assert_has_calls(calls) + + # make sure remote group is extended to ip addresses + expected_ips = ['10.0.1.0/24', '10.0.2.0/24'] + ips = [] + for rule in bottom_sg1['security_group_rules']: + if rule['ethertype'] == 'IPv4' and rule['direction'] == 'ingress': + self.assertIsNone(rule['remote_group_id']) + ips.append(rule['remote_ip_prefix']) + self.assertEqual(expected_ips, ips) + ips = [] + for rule in bottom_sg2['security_group_rules']: + if rule['ethertype'] == 'IPv4' and rule['direction'] == 'ingress': + self.assertIsNone(rule['remote_group_id']) + ips.append(rule['remote_ip_prefix']) + self.assertEqual(expected_ips, ips) + def tearDown(self): core.ModelBase.metadata.drop_all(core.get_engine()) for res in RES_LIST: