Orchestration of MaaS enlistment (#42)
* WIP - Initial API implementation with Falcon * API service for /designs and /tasks endpoints REST API using falcon library Middleware for authentication (stubbed until Keystone is avail) Middleware for context and logging Request logging and initial error logging README updates * Cleanup readme formatting * Rename helm_drydock to drydock_provisioner * Fixed missing except statement * Fixed missing except statement * Reorganize API paths in a list Fix duplication of context init Add API version prefix to URLs * DRYD-2 MVP - phase 1 - node enlistment Add node driver task for IdentifyNode Implement MaaS API interface for Machines and Interfaces
This commit is contained in:
parent
2b5f575e13
commit
80b3a1e99d
@ -1,6 +1,13 @@
|
|||||||
# helm_drydock
|
# drydock_provisioner
|
||||||
A python REST orchestrator to translate a YAML host topology to a provisioned set of hosts and provide a set of cloud-init post-provisioning instructions.
|
A python REST orchestrator to translate a YAML host topology to a provisioned set of hosts and provide a set of cloud-init post-provisioning instructions.
|
||||||
|
|
||||||
|
To run:
|
||||||
|
|
||||||
|
$ virtualenv -p python3 /var/tmp/drydock
|
||||||
|
$ . /var/tmp/drydock/bin/activate
|
||||||
|
$ python setup.py install
|
||||||
|
$ uwsgi --http :9000 -w drydock_provisioner.drydock --callable drydock --enable-threads -L
|
||||||
|
|
||||||
## Modular service
|
## Modular service
|
||||||
|
|
||||||
### Design Consumer ###
|
### Design Consumer ###
|
||||||
|
@ -21,13 +21,24 @@
|
|||||||
|
|
||||||
class DrydockConfig(object):
|
class DrydockConfig(object):
|
||||||
|
|
||||||
|
global_config = {
|
||||||
|
'log_level': 'DEBUG',
|
||||||
|
}
|
||||||
|
|
||||||
node_driver = {
|
node_driver = {
|
||||||
'maasdriver': {
|
'maasdriver': {
|
||||||
'api_key': 'KTMHgA42cNSMnfmJ82:cdg4yQUhp542aHsCTV:7Dc2KB9hQpWq3LfQAAAKAj6wdg22yWxZ',
|
'api_key': 'UTBfxGL69XWjaffQek:NuKZSYGuBs6ZpYC6B9:byvXBgY8CsW5VQKxGdQjvJXtjXwr5G4U',
|
||||||
'api_url': 'http://localhost:5240/MAAS/api/2.0/'
|
'api_url': 'http://10.23.19.16:30773/MAAS/api/2.0/',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
ingester_config = {
|
ingester_config = {
|
||||||
'plugins': ['helm_drydock.ingester.plugins.yaml']
|
'plugins': ['drydock_provisioner.ingester.plugins.yaml.YamlIngester'],
|
||||||
|
}
|
||||||
|
|
||||||
|
orchestrator_config = {
|
||||||
|
'drivers': {
|
||||||
|
'oob': 'drydock_provisioner.drivers.oob.pyghmi_driver.PyghmiDriver',
|
||||||
|
'node': 'drydock_provisioner.drivers.node.maasdriver.driver.MaasNodeDriver',
|
||||||
|
}
|
||||||
}
|
}
|
51
drydock_provisioner/control/api.py
Normal file
51
drydock_provisioner/control/api.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import falcon
|
||||||
|
|
||||||
|
from .designs import *
|
||||||
|
from .tasks import *
|
||||||
|
|
||||||
|
from .base import DrydockRequest
|
||||||
|
from .middleware import AuthMiddleware, ContextMiddleware, LoggingMiddleware
|
||||||
|
|
||||||
|
def start_api(state_manager=None, ingester=None, orchestrator=None):
|
||||||
|
"""
|
||||||
|
Start the Drydock API service
|
||||||
|
|
||||||
|
:param state_manager: Instance of drydock_provisioner.statemgmt.manager.DesignState for accessing
|
||||||
|
state persistence
|
||||||
|
:param ingester: Instance of drydock_provisioner.ingester.ingester.Ingester for handling design
|
||||||
|
part input
|
||||||
|
"""
|
||||||
|
control_api = falcon.API(request_type=DrydockRequest,
|
||||||
|
middleware=[AuthMiddleware(), ContextMiddleware(), LoggingMiddleware()])
|
||||||
|
|
||||||
|
# v1.0 of Drydock API
|
||||||
|
v1_0_routes = [
|
||||||
|
# API for managing orchestrator tasks
|
||||||
|
('/tasks', TasksResource(state_manager=state_manager, orchestrator=orchestrator)),
|
||||||
|
('/tasks/{task_id}', TaskResource(state_manager=state_manager)),
|
||||||
|
|
||||||
|
# API for managing site design data
|
||||||
|
('/designs', DesignsResource(state_manager=state_manager)),
|
||||||
|
('/designs/{design_id}', DesignResource(state_manager=state_manager, orchestrator=orchestrator)),
|
||||||
|
('/designs/{design_id}/parts', DesignsPartsResource(state_manager=state_manager, ingester=ingester)),
|
||||||
|
('/designs/{design_id}/parts/{kind}', DesignsPartsKindsResource(state_manager=state_manager)),
|
||||||
|
('/designs/{design_id}/parts/{kind}/{name}', DesignsPartResource(state_manager=state_manager, orchestrator=orchestrator))
|
||||||
|
]
|
||||||
|
|
||||||
|
for path, res in v1_0_routes:
|
||||||
|
control_api.add_route('/api/v1.0' + path, res)
|
||||||
|
|
||||||
|
return control_api
|
144
drydock_provisioner/control/base.py
Normal file
144
drydock_provisioner/control/base.py
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import falcon.request as request
|
||||||
|
import uuid
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import drydock_provisioner.error as errors
|
||||||
|
|
||||||
|
class BaseResource(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.logger = logging.getLogger('control')
|
||||||
|
self.authorized_roles = []
|
||||||
|
|
||||||
|
def on_options(self, req, resp):
|
||||||
|
self_attrs = dir(self)
|
||||||
|
methods = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'PATCH']
|
||||||
|
allowed_methods = []
|
||||||
|
|
||||||
|
for m in methods:
|
||||||
|
if 'on_' + m.lower() in self_attrs:
|
||||||
|
allowed_methods.append(m)
|
||||||
|
|
||||||
|
resp.headers['Allow'] = ','.join(allowed_methods)
|
||||||
|
resp.status = falcon.HTTP_200
|
||||||
|
|
||||||
|
# For authorizing access at the Resource level. A Resource requiring
|
||||||
|
# finer grained authorization at the method or instance level must
|
||||||
|
# implement that in the request handlers
|
||||||
|
def authorize_roles(self, role_list):
|
||||||
|
authorized = set(self.authorized_roles)
|
||||||
|
applied = set(role_list)
|
||||||
|
|
||||||
|
if authorized.isdisjoint(applied):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def req_json(self, req):
|
||||||
|
if req.content_length is None or req.content_length == 0:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if req.content_type is not None and req.content_type.lower() == 'application/json':
|
||||||
|
raw_body = req.stream.read(req.content_length or 0)
|
||||||
|
|
||||||
|
if raw_body is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
json_body = json.loads(raw_body.decode('utf-8'))
|
||||||
|
return json_body
|
||||||
|
except json.JSONDecodeError as jex:
|
||||||
|
raise errors.InvalidFormat("%s: Invalid JSON in body: %s" % (req.path, jex))
|
||||||
|
else:
|
||||||
|
raise errors.InvalidFormat("Requires application/json payload")
|
||||||
|
|
||||||
|
def return_error(self, resp, status_code, message="", retry=False):
|
||||||
|
resp.body = json.dumps({'type': 'error', 'message': message, 'retry': retry})
|
||||||
|
resp.status = status_code
|
||||||
|
|
||||||
|
def log_error(self, ctx, level, msg):
|
||||||
|
extra = {
|
||||||
|
'user': 'N/A',
|
||||||
|
'req_id': 'N/A',
|
||||||
|
'external_ctx': 'N/A'
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx is not None:
|
||||||
|
extra = {
|
||||||
|
'user': ctx.user,
|
||||||
|
'req_id': ctx.request_id,
|
||||||
|
'external_ctx': ctx.external_marker,
|
||||||
|
}
|
||||||
|
|
||||||
|
self.logger.log(level, msg, extra=extra)
|
||||||
|
|
||||||
|
def debug(self, ctx, msg):
|
||||||
|
self.log_error(ctx, logging.DEBUG, msg)
|
||||||
|
|
||||||
|
def info(self, ctx, msg):
|
||||||
|
self.log_error(ctx, logging.INFO, msg)
|
||||||
|
|
||||||
|
def warn(self, ctx, msg):
|
||||||
|
self.log_error(ctx, logging.WARN, msg)
|
||||||
|
|
||||||
|
def error(self, ctx, msg):
|
||||||
|
self.log_error(ctx, logging.ERROR, msg)
|
||||||
|
|
||||||
|
|
||||||
|
class StatefulResource(BaseResource):
|
||||||
|
|
||||||
|
def __init__(self, state_manager=None):
|
||||||
|
super(StatefulResource, self).__init__()
|
||||||
|
|
||||||
|
if state_manager is None:
|
||||||
|
self.error(None, "StatefulResource:init - StatefulResources require a state manager be set")
|
||||||
|
raise ValueError("StatefulResources require a state manager be set")
|
||||||
|
|
||||||
|
self.state_manager = state_manager
|
||||||
|
|
||||||
|
|
||||||
|
class DrydockRequestContext(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.log_level = 'ERROR'
|
||||||
|
self.user = None
|
||||||
|
self.roles = ['anyone']
|
||||||
|
self.req_id = str(uuid.uuid4())
|
||||||
|
self.external_marker = None
|
||||||
|
|
||||||
|
def set_log_level(self, level):
|
||||||
|
if level in ['error', 'info', 'debug']:
|
||||||
|
self.log_level = level
|
||||||
|
|
||||||
|
def set_user(self, user):
|
||||||
|
self.user = user
|
||||||
|
|
||||||
|
def add_role(self, role):
|
||||||
|
self.roles.append(role)
|
||||||
|
|
||||||
|
def add_roles(self, roles):
|
||||||
|
self.roles.extend(roles)
|
||||||
|
|
||||||
|
def remove_role(self, role):
|
||||||
|
self.roles = [x for x in self.roles
|
||||||
|
if x != role]
|
||||||
|
|
||||||
|
def set_external_marker(self, marker):
|
||||||
|
self.external_marker = str(marker)[:20]
|
||||||
|
|
||||||
|
class DrydockRequest(request.Request):
|
||||||
|
context_type = DrydockRequestContext
|
164
drydock_provisioner/control/designs.py
Normal file
164
drydock_provisioner/control/designs.py
Normal file
@ -0,0 +1,164 @@
|
|||||||
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import falcon
|
||||||
|
import json
|
||||||
|
import uuid
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import drydock_provisioner.objects as hd_objects
|
||||||
|
import drydock_provisioner.error as errors
|
||||||
|
|
||||||
|
from .base import StatefulResource
|
||||||
|
|
||||||
|
class DesignsResource(StatefulResource):
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super(DesignsResource, self).__init__(**kwargs)
|
||||||
|
self.authorized_roles = ['user']
|
||||||
|
|
||||||
|
def on_get(self, req, resp):
|
||||||
|
state = self.state_manager
|
||||||
|
|
||||||
|
designs = list(state.designs.keys())
|
||||||
|
|
||||||
|
resp.body = json.dumps(designs)
|
||||||
|
resp.status = falcon.HTTP_200
|
||||||
|
|
||||||
|
def on_post(self, req, resp):
|
||||||
|
try:
|
||||||
|
json_data = self.req_json(req)
|
||||||
|
design = None
|
||||||
|
if json_data is not None:
|
||||||
|
base_design = json_data.get('base_design_id', None)
|
||||||
|
|
||||||
|
if base_design is not None:
|
||||||
|
base_design = uuid.UUID(base_design)
|
||||||
|
design = hd_objects.SiteDesign(base_design_id=base_design_uuid)
|
||||||
|
else:
|
||||||
|
design = hd_objects.SiteDesign()
|
||||||
|
design.assign_id()
|
||||||
|
design.create(req.context, self.state_manager)
|
||||||
|
|
||||||
|
resp.body = json.dumps(design.obj_to_simple())
|
||||||
|
resp.status = falcon.HTTP_201
|
||||||
|
except errors.StateError as stex:
|
||||||
|
self.error(req.context, "Error updating persistence")
|
||||||
|
self.return_error(resp, falcon.HTTP_500, message="Error updating persistence", retry=True)
|
||||||
|
except errors.InvalidFormat as fex:
|
||||||
|
self.error(req.context, str(fex))
|
||||||
|
self.return_error(resp, falcon.HTTP_400, message=str(fex), retry=False)
|
||||||
|
|
||||||
|
|
||||||
|
class DesignResource(StatefulResource):
|
||||||
|
|
||||||
|
def __init__(self, orchestrator=None, **kwargs):
|
||||||
|
super(DesignResource, self).__init__(**kwargs)
|
||||||
|
self.authorized_roles = ['user']
|
||||||
|
self.orchestrator = orchestrator
|
||||||
|
|
||||||
|
def on_get(self, req, resp, design_id):
|
||||||
|
source = req.params.get('source', 'designed')
|
||||||
|
|
||||||
|
try:
|
||||||
|
design = None
|
||||||
|
if source == 'compiled':
|
||||||
|
design = self.orchestrator.get_effective_site(design_id)
|
||||||
|
elif source == 'designed':
|
||||||
|
design = self.orchestrator.get_described_site(design_id)
|
||||||
|
|
||||||
|
resp.body = json.dumps(design.obj_to_simple())
|
||||||
|
except errors.DesignError:
|
||||||
|
self.error(req.context, "Design %s not found" % design_id)
|
||||||
|
self.return_error(resp, falcon.HTTP_404, message="Design %s not found" % design_id, retry=False)
|
||||||
|
|
||||||
|
class DesignsPartsResource(StatefulResource):
|
||||||
|
|
||||||
|
def __init__(self, ingester=None, **kwargs):
|
||||||
|
super(DesignsPartsResource, self).__init__(**kwargs)
|
||||||
|
self.ingester = ingester
|
||||||
|
self.authorized_roles = ['user']
|
||||||
|
|
||||||
|
if ingester is None:
|
||||||
|
self.error(None, "DesignsPartsResource requires a configured Ingester instance")
|
||||||
|
raise ValueError("DesignsPartsResource requires a configured Ingester instance")
|
||||||
|
|
||||||
|
def on_post(self, req, resp, design_id):
|
||||||
|
ingester_name = req.params.get('ingester', None)
|
||||||
|
|
||||||
|
if ingester_name is None:
|
||||||
|
self.error(None, "DesignsPartsResource POST requires parameter 'ingester'")
|
||||||
|
self.return_error(resp, falcon.HTTP_400, message="POST requires parameter 'ingester'", retry=False)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
raw_body = req.stream.read(req.content_length or 0)
|
||||||
|
if raw_body is not None and len(raw_body) > 0:
|
||||||
|
parsed_items = self.ingester.ingest_data(plugin_name=ingester_name, design_state=self.state_manager,
|
||||||
|
content=raw_body, design_id=design_id, context=req.context)
|
||||||
|
resp.status = falcon.HTTP_201
|
||||||
|
resp.body = json.dumps([x.obj_to_simple() for x in parsed_items])
|
||||||
|
else:
|
||||||
|
self.return_error(resp, falcon.HTTP_400, message="Empty body not supported", retry=False)
|
||||||
|
except ValueError:
|
||||||
|
self.return_error(resp, falcon.HTTP_500, message="Error processing input", retry=False)
|
||||||
|
except LookupError:
|
||||||
|
self.return_error(resp, falcon.HTTP_400, message="Ingester %s not registered" % ingester_name, retry=False)
|
||||||
|
|
||||||
|
|
||||||
|
class DesignsPartsKindsResource(StatefulResource):
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super(DesignsPartsKindsResource, self).__init__(**kwargs)
|
||||||
|
self.authorized_roles = ['user']
|
||||||
|
|
||||||
|
def on_get(self, req, resp, design_id, kind):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class DesignsPartResource(StatefulResource):
|
||||||
|
|
||||||
|
def __init__(self, orchestrator=None, **kwargs):
|
||||||
|
super(DesignsPartResource, self).__init__(**kwargs)
|
||||||
|
self.authorized_roles = ['user']
|
||||||
|
self.orchestrator = orchestrator
|
||||||
|
|
||||||
|
def on_get(self, req , resp, design_id, kind, name):
|
||||||
|
source = req.params.get('source', 'designed')
|
||||||
|
|
||||||
|
try:
|
||||||
|
design = None
|
||||||
|
if source == 'compiled':
|
||||||
|
design = self.orchestrator.get_effective_site(design_id)
|
||||||
|
elif source == 'designed':
|
||||||
|
design = self.orchestrator.get_described_site(design_id)
|
||||||
|
|
||||||
|
part = None
|
||||||
|
if kind == 'Site':
|
||||||
|
part = design.get_site()
|
||||||
|
elif kind == 'Network':
|
||||||
|
part = design.get_network(name)
|
||||||
|
elif kind == 'NetworkLink':
|
||||||
|
part = design.get_network_link(name)
|
||||||
|
elif kind == 'HardwareProfile':
|
||||||
|
part = design.get_hardware_profile(name)
|
||||||
|
elif kind == 'HostProfile':
|
||||||
|
part = design.get_host_profile(name)
|
||||||
|
elif kind == 'BaremetalNode':
|
||||||
|
part = design.get_baremetal_node(name)
|
||||||
|
else:
|
||||||
|
self.error(req.context, "Kind %s unknown" % kind)
|
||||||
|
self.return_error(resp, falcon.HTTP_404, message="Kind %s unknown" % kind, retry=False)
|
||||||
|
return
|
||||||
|
|
||||||
|
resp.body = json.dumps(part.obj_to_simple())
|
||||||
|
except errors.DesignError as dex:
|
||||||
|
self.error(req.context, str(dex))
|
||||||
|
self.return_error(resp, falcon.HTTP_404, message=str(dex), retry=False)
|
92
drydock_provisioner/control/middleware.py
Normal file
92
drydock_provisioner/control/middleware.py
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import falcon
|
||||||
|
import logging
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import drydock_provisioner.config as config
|
||||||
|
|
||||||
|
class AuthMiddleware(object):
|
||||||
|
|
||||||
|
# Authentication
|
||||||
|
def process_request(self, req, resp):
|
||||||
|
ctx = req.context
|
||||||
|
token = req.get_header('X-Auth-Token')
|
||||||
|
|
||||||
|
user = self.validate_token(token)
|
||||||
|
|
||||||
|
if user is not None:
|
||||||
|
ctx.set_user(user)
|
||||||
|
user_roles = self.role_list(user)
|
||||||
|
ctx.add_roles(user_roles)
|
||||||
|
else:
|
||||||
|
ctx.add_role('anyone')
|
||||||
|
|
||||||
|
# Authorization
|
||||||
|
def process_resource(self, req, resp, resource, params):
|
||||||
|
ctx = req.context
|
||||||
|
|
||||||
|
if not resource.authorize_roles(ctx.roles):
|
||||||
|
raise falcon.HTTPUnauthorized('Authentication required',
|
||||||
|
('This resource requires an authorized role.'))
|
||||||
|
|
||||||
|
# Return the username associated with an authenticated token or None
|
||||||
|
def validate_token(self, token):
|
||||||
|
if token == '42':
|
||||||
|
return 'scott'
|
||||||
|
elif token == 'bigboss':
|
||||||
|
return 'admin'
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Return the list of roles assigned to the username
|
||||||
|
# Roles need to be an enum
|
||||||
|
def role_list(self, username):
|
||||||
|
if username == 'scott':
|
||||||
|
return ['user']
|
||||||
|
elif username == 'admin':
|
||||||
|
return ['user', 'admin']
|
||||||
|
|
||||||
|
class ContextMiddleware(object):
|
||||||
|
|
||||||
|
def process_request(self, req, resp):
|
||||||
|
ctx = req.context
|
||||||
|
|
||||||
|
requested_logging = req.get_header('X-Log-Level')
|
||||||
|
|
||||||
|
if (config.DrydockConfig.global_config.get('log_level', '') == 'DEBUG' or
|
||||||
|
(requested_logging == 'DEBUG' and 'admin' in ctx.roles)):
|
||||||
|
ctx.set_log_level('DEBUG')
|
||||||
|
elif requested_logging == 'INFO':
|
||||||
|
ctx.set_log_level('INFO')
|
||||||
|
|
||||||
|
ext_marker = req.get_header('X-Context-Marker')
|
||||||
|
|
||||||
|
ctx.set_external_marker(ext_marker if ext_marker is not None else '')
|
||||||
|
|
||||||
|
class LoggingMiddleware(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.logger = logging.getLogger('drydock.control')
|
||||||
|
|
||||||
|
def process_response(self, req, resp, resource, req_succeeded):
|
||||||
|
ctx = req.context
|
||||||
|
extra = {
|
||||||
|
'user': ctx.user,
|
||||||
|
'req_id': ctx.req_id,
|
||||||
|
'external_ctx': ctx.external_marker,
|
||||||
|
}
|
||||||
|
resp.append_header('X-Drydock-Req', ctx.req_id)
|
||||||
|
self.logger.info("%s - %s" % (req.uri, resp.status), extra=extra)
|
30
drydock_provisioner/control/readme.md
Normal file
30
drydock_provisioner/control/readme.md
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
# Control #
|
||||||
|
|
||||||
|
This is the external facing API service to control the rest
|
||||||
|
of Drydock and query Drydock-managed data.
|
||||||
|
|
||||||
|
## v1.0 Endpoints ##
|
||||||
|
|
||||||
|
### /api/v1.0/tasks ###
|
||||||
|
|
||||||
|
POST - Create a new orchestration task and submit it for execution
|
||||||
|
GET - Get status of a task
|
||||||
|
DELETE - Cancel execution of a task if permitted
|
||||||
|
|
||||||
|
### /api/v1.0/designs ###
|
||||||
|
|
||||||
|
POST - Create a new site design so design parts can be added
|
||||||
|
|
||||||
|
### /api/v1.0/designs/{id}
|
||||||
|
|
||||||
|
GET - Get a current design if available. Param 'source=compiled' to calculate the inheritance chain and compile the effective design.
|
||||||
|
|
||||||
|
### /api/v1.0/designs/{id}/parts
|
||||||
|
|
||||||
|
POST - Submit a new design part to be ingested and added to this design
|
||||||
|
GET - View a currently defined design part
|
||||||
|
PUT - Replace an existing design part *Not Implemented*
|
||||||
|
|
||||||
|
### /api/v1.0/designs/{id}/parts/{kind}/{name}
|
||||||
|
|
||||||
|
GET - View a single design part. param 'source=compiled' to calculate the inheritance chain and compile the effective configuration for the design part.
|
79
drydock_provisioner/control/tasks.py
Normal file
79
drydock_provisioner/control/tasks.py
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import falcon
|
||||||
|
import json
|
||||||
|
import threading
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
import drydock_provisioner.objects.task as obj_task
|
||||||
|
from .base import StatefulResource
|
||||||
|
|
||||||
|
class TasksResource(StatefulResource):
|
||||||
|
|
||||||
|
def __init__(self, orchestrator=None, **kwargs):
|
||||||
|
super(TasksResource, self).__init__(**kwargs)
|
||||||
|
self.authorized_roles = ['user']
|
||||||
|
self.orchestrator = orchestrator
|
||||||
|
|
||||||
|
def on_get(self, req, resp):
|
||||||
|
task_id_list = [str(x.get_id()) for x in self.state_manager.tasks]
|
||||||
|
resp.body = json.dumps(task_id_list)
|
||||||
|
|
||||||
|
def on_post(self, req, resp):
|
||||||
|
try:
|
||||||
|
json_data = self.req_json(req)
|
||||||
|
|
||||||
|
sitename = json_data.get('sitename', None)
|
||||||
|
design_id = json_data.get('design_id', None)
|
||||||
|
action = json_data.get('action', None)
|
||||||
|
|
||||||
|
if sitename is None or design_id is None or action is None:
|
||||||
|
self.info(req.context, "Task creation requires fields sitename, design_id, action")
|
||||||
|
self.return_error(resp, falcon.HTTP_400, message="Task creation requires fields sitename, design_id, action", retry=False)
|
||||||
|
return
|
||||||
|
|
||||||
|
task = self.orchestrator.create_task(obj_task.OrchestratorTask, site=sitename,
|
||||||
|
design_id=design_id, action=action)
|
||||||
|
|
||||||
|
task_thread = threading.Thread(target=self.orchestrator.execute_task, args=[task.get_id()])
|
||||||
|
task_thread.start()
|
||||||
|
|
||||||
|
resp.body = json.dumps(task.to_dict())
|
||||||
|
resp.status = falcon.HTTP_201
|
||||||
|
except Exception as ex:
|
||||||
|
self.error(req.context, "Unknown error: %s\n%s" % (str(ex), traceback.format_exc()))
|
||||||
|
self.return_error(resp, falcon.HTTP_500, message="Unknown error", retry=False)
|
||||||
|
|
||||||
|
|
||||||
|
class TaskResource(StatefulResource):
|
||||||
|
|
||||||
|
def __init__(self, orchestrator=None, **kwargs):
|
||||||
|
super(TaskResource, self).__init__(**kwargs)
|
||||||
|
self.authorized_roles = ['user']
|
||||||
|
self.orchestrator = orchestrator
|
||||||
|
|
||||||
|
def on_get(self, req, resp, task_id):
|
||||||
|
try:
|
||||||
|
task = self.state_manager.get_task(task_id)
|
||||||
|
|
||||||
|
if task is None:
|
||||||
|
self.info(req.context, "Task %s does not exist" % task_id )
|
||||||
|
self.return_error(resp, falcon.HTTP_404, message="Task %s does not exist" % task_id, retry=False)
|
||||||
|
return
|
||||||
|
|
||||||
|
resp.body = json.dumps(task.to_dict())
|
||||||
|
resp.status = falcon.HTTP_200
|
||||||
|
except Exception as ex:
|
||||||
|
self.error(req.context, "Unknown error: %s" % (str(ex)))
|
||||||
|
self.return_error(resp, falcon.HTTP_500, message="Unknown error", retry=False)
|
@ -15,10 +15,10 @@ from threading import Thread, Lock
|
|||||||
import uuid
|
import uuid
|
||||||
import time
|
import time
|
||||||
|
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
import helm_drydock.statemgmt as statemgmt
|
import drydock_provisioner.statemgmt as statemgmt
|
||||||
import helm_drydock.objects.task as tasks
|
import drydock_provisioner.objects.task as tasks
|
||||||
import helm_drydock.error as errors
|
import drydock_provisioner.error as errors
|
||||||
|
|
||||||
# This is the interface for the orchestrator to access a driver
|
# This is the interface for the orchestrator to access a driver
|
||||||
# TODO Need to have each driver spin up a seperate thread to manage
|
# TODO Need to have each driver spin up a seperate thread to manage
|
@ -13,10 +13,10 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
import helm_drydock.error as errors
|
import drydock_provisioner.error as errors
|
||||||
|
|
||||||
from helm_drydock.drivers import ProviderDriver
|
from drydock_provisioner.drivers import ProviderDriver
|
||||||
|
|
||||||
class NodeDriver(ProviderDriver):
|
class NodeDriver(ProviderDriver):
|
||||||
|
|
||||||
@ -28,6 +28,7 @@ class NodeDriver(ProviderDriver):
|
|||||||
hd_fields.OrchestratorAction.CreateStorageTemplate,
|
hd_fields.OrchestratorAction.CreateStorageTemplate,
|
||||||
hd_fields.OrchestratorAction.CreateBootMedia,
|
hd_fields.OrchestratorAction.CreateBootMedia,
|
||||||
hd_fields.OrchestratorAction.PrepareHardwareConfig,
|
hd_fields.OrchestratorAction.PrepareHardwareConfig,
|
||||||
|
hd_fields.OrchestratorAction.IdentifyNode,
|
||||||
hd_fields.OrchestratorAction.ConfigureHardware,
|
hd_fields.OrchestratorAction.ConfigureHardware,
|
||||||
hd_fields.OrchestratorAction.InterrogateNode,
|
hd_fields.OrchestratorAction.InterrogateNode,
|
||||||
hd_fields.OrchestratorAction.ApplyNodeNetworking,
|
hd_fields.OrchestratorAction.ApplyNodeNetworking,
|
447
drydock_provisioner/drivers/node/maasdriver/driver.py
Normal file
447
drydock_provisioner/drivers/node/maasdriver/driver.py
Normal file
@ -0,0 +1,447 @@
|
|||||||
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import time
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import drydock_provisioner.error as errors
|
||||||
|
import drydock_provisioner.config as config
|
||||||
|
import drydock_provisioner.drivers as drivers
|
||||||
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
|
import drydock_provisioner.objects.task as task_model
|
||||||
|
|
||||||
|
from drydock_provisioner.drivers.node import NodeDriver
|
||||||
|
from .api_client import MaasRequestFactory
|
||||||
|
import drydock_provisioner.drivers.node.maasdriver.models.fabric as maas_fabric
|
||||||
|
import drydock_provisioner.drivers.node.maasdriver.models.vlan as maas_vlan
|
||||||
|
import drydock_provisioner.drivers.node.maasdriver.models.subnet as maas_subnet
|
||||||
|
import drydock_provisioner.drivers.node.maasdriver.models.machine as maas_machine
|
||||||
|
|
||||||
|
class MaasNodeDriver(NodeDriver):
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super(MaasNodeDriver, self).__init__(**kwargs)
|
||||||
|
|
||||||
|
self.driver_name = "maasdriver"
|
||||||
|
self.driver_key = "maasdriver"
|
||||||
|
self.driver_desc = "MaaS Node Provisioning Driver"
|
||||||
|
|
||||||
|
self.config = config.DrydockConfig.node_driver[self.driver_key]
|
||||||
|
|
||||||
|
self.logger = logging.getLogger('drydock.nodedriver.maasdriver')
|
||||||
|
|
||||||
|
def execute_task(self, task_id):
|
||||||
|
task = self.state_manager.get_task(task_id)
|
||||||
|
|
||||||
|
if task is None:
|
||||||
|
raise errors.DriverError("Invalid task %s" % (task_id))
|
||||||
|
|
||||||
|
if task.action not in self.supported_actions:
|
||||||
|
raise errors.DriverError("Driver %s doesn't support task action %s"
|
||||||
|
% (self.driver_desc, task.action))
|
||||||
|
|
||||||
|
if task.action == hd_fields.OrchestratorAction.ValidateNodeServices:
|
||||||
|
self.orchestrator.task_field_update(task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Running)
|
||||||
|
maas_client = MaasRequestFactory(self.config['api_url'], self.config['api_key'])
|
||||||
|
|
||||||
|
try:
|
||||||
|
if maas_client.test_connectivity():
|
||||||
|
if maas_client.test_authentication():
|
||||||
|
self.orchestrator.task_field_update(task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Complete,
|
||||||
|
result=hd_fields.ActionResult.Success)
|
||||||
|
return
|
||||||
|
except errors.TransientDriverError(ex):
|
||||||
|
result = {
|
||||||
|
'retry': True,
|
||||||
|
'detail': str(ex),
|
||||||
|
}
|
||||||
|
self.orchestrator.task_field_update(task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Complete,
|
||||||
|
result=hd_fields.ActionResult.Failure,
|
||||||
|
result_details=result)
|
||||||
|
return
|
||||||
|
except errors.PersistentDriverError(ex):
|
||||||
|
result = {
|
||||||
|
'retry': False,
|
||||||
|
'detail': str(ex),
|
||||||
|
}
|
||||||
|
self.orchestrator.task_field_update(task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Complete,
|
||||||
|
result=hd_fields.ActionResult.Failure,
|
||||||
|
result_details=result)
|
||||||
|
return
|
||||||
|
except Exception(ex):
|
||||||
|
result = {
|
||||||
|
'retry': False,
|
||||||
|
'detail': str(ex),
|
||||||
|
}
|
||||||
|
self.orchestrator.task_field_update(task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Complete,
|
||||||
|
result=hd_fields.ActionResult.Failure,
|
||||||
|
result_details=result)
|
||||||
|
return
|
||||||
|
|
||||||
|
design_id = getattr(task, 'design_id', None)
|
||||||
|
|
||||||
|
if design_id is None:
|
||||||
|
raise errors.DriverError("No design ID specified in task %s" %
|
||||||
|
(task_id))
|
||||||
|
|
||||||
|
|
||||||
|
if task.site_name is None:
|
||||||
|
raise errors.DriverError("No site specified for task %s." %
|
||||||
|
(task_id))
|
||||||
|
|
||||||
|
self.orchestrator.task_field_update(task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Running)
|
||||||
|
|
||||||
|
site_design = self.orchestrator.get_effective_site(design_id)
|
||||||
|
|
||||||
|
if task.action == hd_fields.OrchestratorAction.CreateNetworkTemplate:
|
||||||
|
self.orchestrator.task_field_update(task.get_id(), status=hd_fields.TaskStatus.Running)
|
||||||
|
|
||||||
|
subtask = self.orchestrator.create_task(task_model.DriverTask,
|
||||||
|
parent_task_id=task.get_id(), design_id=design_id,
|
||||||
|
action=task.action, site_name=task.site_name,
|
||||||
|
task_scope={'site': task.site_name})
|
||||||
|
runner = MaasTaskRunner(state_manager=self.state_manager,
|
||||||
|
orchestrator=self.orchestrator,
|
||||||
|
task_id=subtask.get_id(),config=self.config)
|
||||||
|
|
||||||
|
self.logger.info("Starting thread for task %s to create network templates" % (subtask.get_id()))
|
||||||
|
|
||||||
|
runner.start()
|
||||||
|
|
||||||
|
# TODO Figure out coherent system for putting all the timeouts in
|
||||||
|
# the config
|
||||||
|
runner.join(timeout=120)
|
||||||
|
|
||||||
|
if runner.is_alive():
|
||||||
|
result = {
|
||||||
|
'retry': False,
|
||||||
|
'detail': 'MaaS Network creation timed-out'
|
||||||
|
}
|
||||||
|
self.logger.warn("Thread for task %s timed out after 120s" % (subtask.get_id()))
|
||||||
|
self.orchestrator.task_field_update(task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Complete,
|
||||||
|
result=hd_fields.ActionResult.Failure,
|
||||||
|
result_detail=result)
|
||||||
|
else:
|
||||||
|
subtask = self.state_manager.get_task(subtask.get_id())
|
||||||
|
self.logger.info("Thread for task %s completed - result %s" % (subtask.get_id(), subtask.get_result()))
|
||||||
|
self.orchestrator.task_field_update(task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Complete,
|
||||||
|
result=subtask.get_result())
|
||||||
|
|
||||||
|
return
|
||||||
|
elif task.action == hd_fields.OrchestratorAction.IdentifyNode:
|
||||||
|
self.orchestrator.task_field_update(task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Running)
|
||||||
|
|
||||||
|
subtasks = []
|
||||||
|
|
||||||
|
result_detail = {
|
||||||
|
'detail': []
|
||||||
|
}
|
||||||
|
|
||||||
|
for n in task.node_list:
|
||||||
|
subtask = self.orchestrator.create_task(task_model.DriverTask,
|
||||||
|
parent_task_id=task.get_id(), design_id=design_id,
|
||||||
|
action=hd_fields.OrchestratorAction.IdentifyNode,
|
||||||
|
site_name=task.site_name,
|
||||||
|
task_scope={'site': task.site_name, 'node_names': [n]})
|
||||||
|
runner = MaasTaskRunner(state_manager=self.state_manager,
|
||||||
|
orchestrator=self.orchestrator,
|
||||||
|
task_id=subtask.get_id(),config=self.config)
|
||||||
|
|
||||||
|
self.logger.info("Starting thread for task %s to identify node %s" % (subtask.get_id(), n))
|
||||||
|
|
||||||
|
runner.start()
|
||||||
|
subtasks.append(subtask.get_id())
|
||||||
|
|
||||||
|
running_subtasks = len(subtasks)
|
||||||
|
attempts = 0
|
||||||
|
worked = failed = False
|
||||||
|
|
||||||
|
#TODO Add timeout to config
|
||||||
|
while running_subtasks > 0 and attempts < 3:
|
||||||
|
for t in subtasks:
|
||||||
|
subtask = self.state_manager.get_task(t)
|
||||||
|
|
||||||
|
if subtask.status == hd_fields.TaskStatus.Complete:
|
||||||
|
self.logger.info("Task %s to identify node %s complete - status %s" %
|
||||||
|
(subtask.get_id(), n, subtask.get_result()))
|
||||||
|
|
||||||
|
result_detail['detail'].extend(subtask.result_detail['detail'])
|
||||||
|
running_subtasks = running_subtasks - 1
|
||||||
|
|
||||||
|
if subtask.result in [hd_fields.ActionResult.Success,
|
||||||
|
hd_fields.ActionResult.PartialSuccess]:
|
||||||
|
worked = True
|
||||||
|
elif subtask.result in [hd_fields.ActionResult.Failure,
|
||||||
|
hd_fields.ActionResult.PartialSuccess]:
|
||||||
|
failed = True
|
||||||
|
|
||||||
|
time.sleep(1 * 60)
|
||||||
|
attempts = attempts + 1
|
||||||
|
|
||||||
|
if running_subtasks > 0:
|
||||||
|
self.logger.warn("Time out for task %s before all subtask threads complete" % (task.get_id()))
|
||||||
|
result = hd_fields.ActionResult.DependentFailure
|
||||||
|
result_detail['detail'].append('Some subtasks did not complete before the timeout threshold')
|
||||||
|
if worked and failed:
|
||||||
|
result = hd_fields.ActionResult.PartialSuccess
|
||||||
|
elif worked:
|
||||||
|
result = hd_fields.ActionResult.Success
|
||||||
|
else:
|
||||||
|
result = hd_fields.ActionResult.Failure
|
||||||
|
|
||||||
|
self.orchestrator.task_field_update(task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Complete,
|
||||||
|
result=result,
|
||||||
|
result_detail=result_detail)
|
||||||
|
|
||||||
|
class MaasTaskRunner(drivers.DriverTaskRunner):
|
||||||
|
|
||||||
|
def __init__(self, config=None, **kwargs):
|
||||||
|
super(MaasTaskRunner, self).__init__(**kwargs)
|
||||||
|
|
||||||
|
self.driver_config = config
|
||||||
|
self.logger = logging.getLogger('drydock.nodedriver.maasdriver')
|
||||||
|
|
||||||
|
def execute_task(self):
|
||||||
|
task_action = self.task.action
|
||||||
|
|
||||||
|
self.orchestrator.task_field_update(self.task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Running,
|
||||||
|
result=hd_fields.ActionResult.Incomplete)
|
||||||
|
|
||||||
|
self.maas_client = MaasRequestFactory(self.driver_config['api_url'],
|
||||||
|
self.driver_config['api_key'])
|
||||||
|
|
||||||
|
site_design = self.orchestrator.get_effective_site(self.task.design_id)
|
||||||
|
|
||||||
|
if task_action == hd_fields.OrchestratorAction.CreateNetworkTemplate:
|
||||||
|
# Try to true up MaaS definitions of fabrics/vlans/subnets
|
||||||
|
# with the networks defined in Drydock
|
||||||
|
design_networks = site_design.networks
|
||||||
|
|
||||||
|
subnets = maas_subnet.Subnets(self.maas_client)
|
||||||
|
subnets.refresh()
|
||||||
|
|
||||||
|
result_detail = {
|
||||||
|
'detail': []
|
||||||
|
}
|
||||||
|
|
||||||
|
for n in design_networks:
|
||||||
|
try:
|
||||||
|
subnet = subnets.singleton({'cidr': n.cidr})
|
||||||
|
|
||||||
|
if subnet is not None:
|
||||||
|
subnet.name = n.name
|
||||||
|
subnet.dns_servers = n.dns_servers
|
||||||
|
|
||||||
|
vlan_list = maas_vlan.Vlans(self.maas_client, fabric_id=subnet.fabric)
|
||||||
|
vlan_list.refresh()
|
||||||
|
|
||||||
|
vlan = vlan_list.select(subnet.vlan)
|
||||||
|
|
||||||
|
if vlan is not None:
|
||||||
|
if ((n.vlan_id is None and vlan.vid != 0) or
|
||||||
|
(n.vlan_id is not None and vlan.vid != n.vlan_id)):
|
||||||
|
|
||||||
|
# if the VLAN name matches, assume this is the correct resource
|
||||||
|
# and it needs to be updated
|
||||||
|
if vlan.name == n.name:
|
||||||
|
vlan.set_vid(n.vlan_id)
|
||||||
|
vlan.mtu = n.mtu
|
||||||
|
vlan.update()
|
||||||
|
result_detail['detail'].append("VLAN %s found for network %s, updated attributes"
|
||||||
|
% (vlan.resource_id, n.name))
|
||||||
|
else:
|
||||||
|
# Found a VLAN with the correct VLAN tag, update subnet to use it
|
||||||
|
target_vlan = vlan_list.singleton({'vid': n.vlan_id if n.vlan_id is not None else 0})
|
||||||
|
if target_vlan is not None:
|
||||||
|
subnet.vlan = target_vlan.resource_id
|
||||||
|
else:
|
||||||
|
# This is a flag that after creating a fabric and
|
||||||
|
# VLAN below, update the subnet
|
||||||
|
subnet.vlan = None
|
||||||
|
else:
|
||||||
|
subnet.vlan = None
|
||||||
|
|
||||||
|
# Check if the routes have a default route
|
||||||
|
subnet.gateway_ip = n.get_default_gateway()
|
||||||
|
|
||||||
|
|
||||||
|
result_detail['detail'].append("Subnet %s found for network %s, updated attributes"
|
||||||
|
% (subnet.resource_id, n.name))
|
||||||
|
|
||||||
|
# Need to find or create a Fabric/Vlan for this subnet
|
||||||
|
if (subnet is None or (subnet is not None and subnet.vlan is None)):
|
||||||
|
fabric_list = maas_fabric.Fabrics(self.maas_client)
|
||||||
|
fabric_list.refresh()
|
||||||
|
fabric = fabric_list.singleton({'name': n.name})
|
||||||
|
|
||||||
|
vlan = None
|
||||||
|
|
||||||
|
if fabric is not None:
|
||||||
|
vlan_list = maas_vlan.Vlans(self.maas_client, fabric_id=fabric.resource_id)
|
||||||
|
vlan_list.refresh()
|
||||||
|
|
||||||
|
vlan = vlan_list.singleton({'vid': n.vlan_id if n.vlan_id is not None else 0})
|
||||||
|
|
||||||
|
if vlan is not None:
|
||||||
|
vlan = matching_vlans[0]
|
||||||
|
|
||||||
|
vlan.name = n.name
|
||||||
|
if getattr(n, 'mtu', None) is not None:
|
||||||
|
vlan.mtu = n.mtu
|
||||||
|
|
||||||
|
if subnet is not None:
|
||||||
|
subnet.vlan = vlan.resource_id
|
||||||
|
subnet.update()
|
||||||
|
|
||||||
|
vlan.update()
|
||||||
|
result_detail['detail'].append("VLAN %s found for network %s, updated attributes"
|
||||||
|
% (vlan.resource_id, n.name))
|
||||||
|
else:
|
||||||
|
# Create a new VLAN in this fabric and assign subnet to it
|
||||||
|
vlan = maas_vlan.Vlan(self.maas_client, name=n.name, vid=vlan_id,
|
||||||
|
mtu=getattr(n, 'mtu', None),fabric_id=fabric.resource_id)
|
||||||
|
vlan = vlan_list.add(vlan)
|
||||||
|
|
||||||
|
result_detail['detail'].append("VLAN %s created for network %s"
|
||||||
|
% (vlan.resource_id, n.name))
|
||||||
|
if subnet is not None:
|
||||||
|
subnet.vlan = vlan.resource_id
|
||||||
|
subnet.update()
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Create new fabric and VLAN
|
||||||
|
fabric = maas_fabric.Fabric(self.maas_client, name=n.name)
|
||||||
|
fabric = fabric_list.add(fabric)
|
||||||
|
fabric_list.refresh()
|
||||||
|
|
||||||
|
result_detail['detail'].append("Fabric %s created for network %s"
|
||||||
|
% (fabric.resource_id, n.name))
|
||||||
|
|
||||||
|
vlan_list = maas_vlan.Vlans(self.maas_client, fabric_id=new_fabric.resource_id)
|
||||||
|
vlan_list.refresh()
|
||||||
|
|
||||||
|
# A new fabric comes with a single default VLAN. Retrieve it and update attributes
|
||||||
|
vlan = vlan_list.single()
|
||||||
|
|
||||||
|
vlan.name = n.name
|
||||||
|
vlan.vid = n.vlan_id if n.vlan_id is not None else 0
|
||||||
|
if getattr(n, 'mtu', None) is not None:
|
||||||
|
vlan.mtu = n.mtu
|
||||||
|
|
||||||
|
vlan.update()
|
||||||
|
result_detail['detail'].append("VLAN %s updated for network %s"
|
||||||
|
% (vlan.resource_id, n.name))
|
||||||
|
if subnet is not None:
|
||||||
|
# If subnet was found above, but needed attached to a new fabric/vlan then
|
||||||
|
# attach it
|
||||||
|
subnet.vlan = vlan.resource_id
|
||||||
|
subnet.update()
|
||||||
|
|
||||||
|
if subnet is None:
|
||||||
|
# If subnet did not exist, create it here and attach it to the fabric/VLAN
|
||||||
|
subnet = maas_subnet.Subnet(self.maas_client, name=n.name, cidr=n.cidr, fabric=fabric.resource_id,
|
||||||
|
vlan=vlan.resource_id, gateway_ip=n.get_default_gateway())
|
||||||
|
|
||||||
|
subnet_list = maas_subnet.Subnets(self.maas_client)
|
||||||
|
subnet = subnet_list.add(subnet)
|
||||||
|
except ValueError as vex:
|
||||||
|
raise errors.DriverError("Inconsistent data from MaaS")
|
||||||
|
|
||||||
|
subnet_list = maas_subnet.Subnets(self.maas_client)
|
||||||
|
subnet_list.refresh()
|
||||||
|
|
||||||
|
action_result = hd_fields.ActionResult.Incomplete
|
||||||
|
|
||||||
|
success_rate = 0
|
||||||
|
|
||||||
|
for n in design_networks:
|
||||||
|
exists = subnet_list.query({'cidr': n.cidr})
|
||||||
|
if len(exists) > 0:
|
||||||
|
subnet = exists[0]
|
||||||
|
if subnet.name == n.name:
|
||||||
|
success_rate = success_rate + 1
|
||||||
|
else:
|
||||||
|
success_rate = success_rate + 1
|
||||||
|
else:
|
||||||
|
success_rate = success_rate + 1
|
||||||
|
|
||||||
|
if success_rate == len(design_networks):
|
||||||
|
action_result = hd_fields.ActionResult.Success
|
||||||
|
elif success_rate == - (len(design_networks)):
|
||||||
|
action_result = hd_fields.ActionResult.Failure
|
||||||
|
else:
|
||||||
|
action_result = hd_fields.ActionResult.PartialSuccess
|
||||||
|
|
||||||
|
self.orchestrator.task_field_update(self.task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Complete,
|
||||||
|
result=action_result,
|
||||||
|
result_detail=result_detail)
|
||||||
|
elif task_action == hd_fields.OrchestratorAction.IdentifyNode:
|
||||||
|
try:
|
||||||
|
machine_list = maas_machine.Machines(self.maas_client)
|
||||||
|
machine_list.refresh()
|
||||||
|
except:
|
||||||
|
self.orchestrator.task_field_update(self.task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Complete,
|
||||||
|
result=hd_fields.ActionResult.Failure,
|
||||||
|
result_detail={'detail': 'Error accessing MaaS Machines API', 'retry': True})
|
||||||
|
return
|
||||||
|
|
||||||
|
nodes = self.task.node_list
|
||||||
|
|
||||||
|
result_detail = {'detail': []}
|
||||||
|
|
||||||
|
worked = failed = False
|
||||||
|
|
||||||
|
for n in nodes:
|
||||||
|
try:
|
||||||
|
node = site_design.get_baremetal_node(n)
|
||||||
|
machine = machine_list.identify_baremetal_node(node)
|
||||||
|
if machine is not None:
|
||||||
|
worked = True
|
||||||
|
result_detail['detail'].append("Node %s identified in MaaS" % n)
|
||||||
|
else:
|
||||||
|
failed = True
|
||||||
|
result_detail['detail'].append("Node %s not found in MaaS" % n)
|
||||||
|
except Exception as ex:
|
||||||
|
failed = True
|
||||||
|
result_detail['detail'].append("Error identifying node %s: %s" % (n, str(ex)))
|
||||||
|
|
||||||
|
result = None
|
||||||
|
if worked and failed:
|
||||||
|
result = hd_fields.ActionResult.PartialSuccess
|
||||||
|
elif worked:
|
||||||
|
result = hd_fields.ActionResult.Success
|
||||||
|
elif failed:
|
||||||
|
result = hd_fields.ActionResult.Failure
|
||||||
|
|
||||||
|
self.orchestrator.task_field_update(self.task.get_id(),
|
||||||
|
status=hd_fields.TaskStatus.Complete,
|
||||||
|
result=result,
|
||||||
|
result_detail=result_detail)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -13,8 +13,9 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import logging
|
||||||
|
|
||||||
import helm_drydock.error as errors
|
import drydock_provisioner.error as errors
|
||||||
"""
|
"""
|
||||||
A representation of a MaaS REST resource. Should be subclassed
|
A representation of a MaaS REST resource. Should be subclassed
|
||||||
for different resources and augmented with operations specific
|
for different resources and augmented with operations specific
|
||||||
@ -28,6 +29,7 @@ class ResourceBase(object):
|
|||||||
|
|
||||||
def __init__(self, api_client, **kwargs):
|
def __init__(self, api_client, **kwargs):
|
||||||
self.api_client = api_client
|
self.api_client = api_client
|
||||||
|
self.logger = logging.getLogger('drydock.drivers.maasdriver')
|
||||||
|
|
||||||
for f in self.fields:
|
for f in self.fields:
|
||||||
if f in kwargs.keys():
|
if f in kwargs.keys():
|
||||||
@ -143,13 +145,16 @@ class ResourceBase(object):
|
|||||||
return i
|
return i
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
A collection of MaaS resources.
|
|
||||||
|
|
||||||
Rather than a simple list, we will key the collection on resource
|
|
||||||
ID for more efficient access.
|
|
||||||
"""
|
|
||||||
class ResourceCollectionBase(object):
|
class ResourceCollectionBase(object):
|
||||||
|
"""
|
||||||
|
A collection of MaaS resources.
|
||||||
|
|
||||||
|
Rather than a simple list, we will key the collection on resource
|
||||||
|
ID for more efficient access.
|
||||||
|
|
||||||
|
:param api_client: An instance of api_client.MaasRequestFactory
|
||||||
|
"""
|
||||||
|
|
||||||
collection_url = ''
|
collection_url = ''
|
||||||
collection_resource = ResourceBase
|
collection_resource = ResourceBase
|
||||||
@ -157,12 +162,13 @@ class ResourceCollectionBase(object):
|
|||||||
def __init__(self, api_client):
|
def __init__(self, api_client):
|
||||||
self.api_client = api_client
|
self.api_client = api_client
|
||||||
self.resources = {}
|
self.resources = {}
|
||||||
|
self.logger = logging.getLogger('drydock.drivers.maasdriver')
|
||||||
|
|
||||||
|
def interpolate_url(self):
|
||||||
"""
|
"""
|
||||||
Parse URL for placeholders and replace them with current
|
Parse URL for placeholders and replace them with current
|
||||||
instance values
|
instance values
|
||||||
"""
|
"""
|
||||||
def interpolate_url(self):
|
|
||||||
pattern = '\{([a-z_]+)\}'
|
pattern = '\{([a-z_]+)\}'
|
||||||
regex = re.compile(pattern)
|
regex = re.compile(pattern)
|
||||||
start = 0
|
start = 0
|
||||||
@ -250,8 +256,23 @@ class ResourceCollectionBase(object):
|
|||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def singleton(self, query):
|
||||||
"""
|
"""
|
||||||
If the collection has a single item, return it
|
A query that requires a single item response
|
||||||
|
|
||||||
|
:param query: A dict of k:v pairs defining the query parameters
|
||||||
|
"""
|
||||||
|
result = self.query(query)
|
||||||
|
|
||||||
|
if len(result) > 1:
|
||||||
|
raise ValueError("Multiple results found")
|
||||||
|
elif len(result) == 1:
|
||||||
|
return result[0]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
"""
|
||||||
|
If the collection contains a single item, return it
|
||||||
"""
|
"""
|
||||||
def single(self):
|
def single(self):
|
||||||
if self.len() == 1:
|
if self.len() == 1:
|
@ -13,8 +13,8 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import helm_drydock.drivers.node.maasdriver.models.base as model_base
|
import drydock_provisioner.drivers.node.maasdriver.models.base as model_base
|
||||||
import helm_drydock.drivers.node.maasdriver.models.vlan as model_vlan
|
import drydock_provisioner.drivers.node.maasdriver.models.vlan as model_vlan
|
||||||
|
|
||||||
class Fabric(model_base.ResourceBase):
|
class Fabric(model_base.ResourceBase):
|
||||||
|
|
@ -0,0 +1,34 @@
|
|||||||
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import drydock_provisioner.drivers.node.maasdriver.models.base as model_base
|
||||||
|
|
||||||
|
class Interface(model_base.ResourceBase):
|
||||||
|
|
||||||
|
resource_url = 'nodes/{system_id}/interfaces/{resource_id}/'
|
||||||
|
fields = ['resource_id', 'system_id', 'name', 'type', 'mac_address', 'vlan',
|
||||||
|
'links', 'effective_mtu']
|
||||||
|
json_fields = ['name', 'type', 'mac_address', 'vlan', 'links', 'effective_mtu']
|
||||||
|
|
||||||
|
def __init__(self, api_client, **kwargs):
|
||||||
|
super(Interface, self).__init__(api_client, **kwargs)
|
||||||
|
|
||||||
|
class Interfaces(model_base.ResourceCollectionBase):
|
||||||
|
|
||||||
|
collection_url = 'nodes/{system_id}/interfaces/'
|
||||||
|
collection_resource = Interface
|
||||||
|
|
||||||
|
def __init__(self, api_client, **kwargs):
|
||||||
|
super(Interfaces, self).__init__(api_client)
|
||||||
|
self.system_id = kwargs.get('system_id', None)
|
185
drydock_provisioner/drivers/node/maasdriver/models/machine.py
Normal file
185
drydock_provisioner/drivers/node/maasdriver/models/machine.py
Normal file
@ -0,0 +1,185 @@
|
|||||||
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import drydock_provisioner.drivers.node.maasdriver.models.base as model_base
|
||||||
|
import drydock_provisioner.drivers.node.maasdriver.models.interface as maas_interface
|
||||||
|
import bson
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
class Machine(model_base.ResourceBase):
|
||||||
|
|
||||||
|
resource_url = 'machines/{resource_id}/'
|
||||||
|
fields = ['resource_id', 'hostname', 'power_type', 'power_state', 'power_parameters', 'interfaces',
|
||||||
|
'boot_interface', 'memory', 'cpu_count', 'tag_names']
|
||||||
|
json_fields = ['hostname', 'power_type']
|
||||||
|
|
||||||
|
def __init__(self, api_client, **kwargs):
|
||||||
|
super(Machine, self).__init__(api_client, **kwargs)
|
||||||
|
|
||||||
|
# Replace generic dicts with interface collection model
|
||||||
|
if getattr(self, 'resource_id', None) is not None:
|
||||||
|
self.interfaces = maas_interface.Interfaces(api_client, system_id=self.resource_id)
|
||||||
|
self.interfaces.refresh()
|
||||||
|
|
||||||
|
def get_power_params(self):
|
||||||
|
url = self.interpolate_url()
|
||||||
|
|
||||||
|
resp = self.api_client.get(url, op='power_parameters')
|
||||||
|
|
||||||
|
if resp.status_code == 200:
|
||||||
|
self.power_parameters = resp.json()
|
||||||
|
|
||||||
|
def commission(self, debug=False):
|
||||||
|
url = self.interpolate_url()
|
||||||
|
|
||||||
|
# If we want to debug this node commissioning, enable SSH
|
||||||
|
# after commissioning and leave the node powered up
|
||||||
|
|
||||||
|
options = {'enable_ssh': '1' if debug else '0'}
|
||||||
|
|
||||||
|
resp = self.api_client.post(url, op='commission', files=options)
|
||||||
|
|
||||||
|
# Need to sort out how to handle exceptions
|
||||||
|
if not resp.ok:
|
||||||
|
raise Exception()
|
||||||
|
|
||||||
|
def get_details(self):
|
||||||
|
url = self.interpolate_url()
|
||||||
|
|
||||||
|
resp = self.api_client.get(url, op='details')
|
||||||
|
|
||||||
|
if resp.status_code == 200:
|
||||||
|
detail_config = bson.loads(resp.text)
|
||||||
|
return detail_config
|
||||||
|
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
"""
|
||||||
|
Serialize this resource instance into a dict matching the
|
||||||
|
MAAS representation of the resource
|
||||||
|
"""
|
||||||
|
data_dict = {}
|
||||||
|
|
||||||
|
for f in self.json_fields:
|
||||||
|
if getattr(self, f, None) is not None:
|
||||||
|
if f == 'resource_id':
|
||||||
|
data_dict['system_id'] = getattr(self, f)
|
||||||
|
else:
|
||||||
|
data_dict[f] = getattr(self, f)
|
||||||
|
|
||||||
|
return data_dict
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, api_client, obj_dict):
|
||||||
|
"""
|
||||||
|
Create a instance of this resource class based on a dict
|
||||||
|
of MaaS type attributes
|
||||||
|
|
||||||
|
Customized for Machine due to use of system_id instead of id
|
||||||
|
as resource key
|
||||||
|
|
||||||
|
:param api_client: Instance of api_client.MaasRequestFactory for accessing MaaS API
|
||||||
|
:param obj_dict: Python dict as parsed from MaaS API JSON representing this resource type
|
||||||
|
"""
|
||||||
|
|
||||||
|
refined_dict = {k: obj_dict.get(k, None) for k in cls.fields}
|
||||||
|
|
||||||
|
if 'system_id' in obj_dict.keys():
|
||||||
|
refined_dict['resource_id'] = obj_dict.get('system_id')
|
||||||
|
|
||||||
|
i = cls(api_client, **refined_dict)
|
||||||
|
return i
|
||||||
|
|
||||||
|
class Machines(model_base.ResourceCollectionBase):
|
||||||
|
|
||||||
|
collection_url = 'machines/'
|
||||||
|
collection_resource = Machine
|
||||||
|
|
||||||
|
def __init__(self, api_client, **kwargs):
|
||||||
|
super(Machines, self).__init__(api_client)
|
||||||
|
|
||||||
|
# Add the OOB power parameters to each machine instance
|
||||||
|
def collect_power_params(self):
|
||||||
|
for k, v in self.resources.items():
|
||||||
|
v.get_power_params()
|
||||||
|
|
||||||
|
|
||||||
|
def identify_baremetal_node(self, node_model, update_name=True):
|
||||||
|
"""
|
||||||
|
Search all the defined MaaS Machines and attempt to match
|
||||||
|
one against the provided Drydock BaremetalNode model. Update
|
||||||
|
the MaaS instance with the correct hostname
|
||||||
|
|
||||||
|
:param node_model: Instance of objects.node.BaremetalNode to search MaaS for matching resource
|
||||||
|
:param update_name: Whether Drydock should update the MaaS resource name to match the Drydock design
|
||||||
|
"""
|
||||||
|
node_oob_network = node_model.oob_network
|
||||||
|
node_oob_ip = node_model.get_network_address(node_oob_network)
|
||||||
|
|
||||||
|
if node_oob_ip is None:
|
||||||
|
self.logger.warn("Node model missing OOB IP address")
|
||||||
|
raise ValueError('Node model missing OOB IP address')
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.collect_power_params()
|
||||||
|
|
||||||
|
maas_node = self.singleton({'power_params.power_address': node_oob_ip})
|
||||||
|
|
||||||
|
self.logger.debug("Found MaaS resource %s matching Node %s" % (maas_node.resource_id, node_model.get_id()))
|
||||||
|
|
||||||
|
if maas_node.hostname != node_model.name and update_name:
|
||||||
|
maas_node.hostname = node_model.name
|
||||||
|
maas_node.update()
|
||||||
|
self.logger.debug("Updated MaaS resource %s hostname to %s" % (maas_node.resource_id, node_model.name))
|
||||||
|
return maas_node
|
||||||
|
|
||||||
|
except ValueError as ve:
|
||||||
|
self.logger.warn("Error locating matching MaaS resource for OOB IP %s" % (node_oob_ip))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def query(self, query):
|
||||||
|
"""
|
||||||
|
Custom query method to deal with complex fields
|
||||||
|
"""
|
||||||
|
result = list(self.resources.values())
|
||||||
|
for (k, v) in query.items():
|
||||||
|
if k.startswith('power_params.'):
|
||||||
|
field = k[13:]
|
||||||
|
result = [i for i in result
|
||||||
|
if str(getattr(i,'power_parameters', {}).get(field, None)) == str(v)]
|
||||||
|
else:
|
||||||
|
result = [i for i in result
|
||||||
|
if str(getattr(i, k, None)) == str(v)]
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def add(self, res):
|
||||||
|
"""
|
||||||
|
Create a new resource in this collection in MaaS
|
||||||
|
|
||||||
|
Customize as Machine resources use 'system_id' instead of 'id'
|
||||||
|
"""
|
||||||
|
data_dict = res.to_dict()
|
||||||
|
url = self.interpolate_url()
|
||||||
|
|
||||||
|
resp = self.api_client.post(url, files=data_dict)
|
||||||
|
|
||||||
|
if resp.status_code == 200:
|
||||||
|
resp_json = resp.json()
|
||||||
|
res.set_resource_id(resp_json.get('system_id'))
|
||||||
|
return res
|
||||||
|
|
||||||
|
raise errors.DriverError("Failed updating MAAS url %s - return code %s"
|
||||||
|
% (url, resp.status_code))
|
@ -12,7 +12,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import helm_drydock.drivers.node.maasdriver.models.base as model_base
|
import drydock_provisioner.drivers.node.maasdriver.models.base as model_base
|
||||||
|
|
||||||
class Subnet(model_base.ResourceBase):
|
class Subnet(model_base.ResourceBase):
|
||||||
|
|
@ -13,8 +13,8 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import helm_drydock.error as errors
|
import drydock_provisioner.error as errors
|
||||||
import helm_drydock.drivers.node.maasdriver.models.base as model_base
|
import drydock_provisioner.drivers.node.maasdriver.models.base as model_base
|
||||||
|
|
||||||
class Vlan(model_base.ResourceBase):
|
class Vlan(model_base.ResourceBase):
|
||||||
|
|
@ -12,17 +12,17 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
import helm_drydock.error as errors
|
import drydock_provisioner.error as errors
|
||||||
|
|
||||||
from helm_drydock.drivers import ProviderDriver
|
from drydock_provisioner.drivers import ProviderDriver
|
||||||
|
|
||||||
class OobDriver(ProviderDriver):
|
class OobDriver(ProviderDriver):
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
super(OobDriver, self).__init__(**kwargs)
|
super(OobDriver, self).__init__(**kwargs)
|
||||||
|
|
||||||
self.supported_actions = [hd_fields.OrchestrationAction.ValidateOobServices,
|
self.supported_actions = [hd_fields.OrchestratorAction.ValidateOobServices,
|
||||||
hd_fields.OrchestratorAction.ConfigNodePxe,
|
hd_fields.OrchestratorAction.ConfigNodePxe,
|
||||||
hd_fields.OrchestratorAction.SetNodeBoot,
|
hd_fields.OrchestratorAction.SetNodeBoot,
|
||||||
hd_fields.OrchestratorAction.PowerOffNode,
|
hd_fields.OrchestratorAction.PowerOffNode,
|
@ -12,17 +12,18 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import time
|
import time
|
||||||
|
import logging
|
||||||
|
|
||||||
from pyghmi.ipmi.command import Command
|
from pyghmi.ipmi.command import Command
|
||||||
|
|
||||||
import helm_drydock.error as errors
|
import drydock_provisioner.error as errors
|
||||||
import helm_drydock.config as config
|
import drydock_provisioner.config as config
|
||||||
|
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
import helm_drydock.objects.task as task_model
|
import drydock_provisioner.objects.task as task_model
|
||||||
|
|
||||||
import helm_drydock.drivers.oob as oob
|
import drydock_provisioner.drivers.oob as oob
|
||||||
import helm_drydock.drivers as drivers
|
import drydock_provisioner.drivers as drivers
|
||||||
|
|
||||||
|
|
||||||
class PyghmiDriver(oob.OobDriver):
|
class PyghmiDriver(oob.OobDriver):
|
||||||
@ -34,15 +35,19 @@ class PyghmiDriver(oob.OobDriver):
|
|||||||
self.driver_key = "pyghmi_driver"
|
self.driver_key = "pyghmi_driver"
|
||||||
self.driver_desc = "Pyghmi OOB Driver"
|
self.driver_desc = "Pyghmi OOB Driver"
|
||||||
|
|
||||||
self.config = config.DrydockConfig.node_driver[self.driver_key]
|
self.logger = logging.getLogger('drydock.oobdriver.pyghmi')
|
||||||
|
self.config = config.DrydockConfig.node_driver.get(self.driver_key, {})
|
||||||
|
|
||||||
def execute_task(self, task_id):
|
def execute_task(self, task_id):
|
||||||
task = self.state_manager.get_task(task_id)
|
task = self.state_manager.get_task(task_id)
|
||||||
|
|
||||||
if task is None:
|
if task is None:
|
||||||
|
self.logger.error("Invalid task %s" % (task_id))
|
||||||
raise errors.DriverError("Invalid task %s" % (task_id))
|
raise errors.DriverError("Invalid task %s" % (task_id))
|
||||||
|
|
||||||
if task.action not in self.supported_actions:
|
if task.action not in self.supported_actions:
|
||||||
|
self.logger.error("Driver %s doesn't support task action %s"
|
||||||
|
% (self.driver_desc, task.action))
|
||||||
raise errors.DriverError("Driver %s doesn't support task action %s"
|
raise errors.DriverError("Driver %s doesn't support task action %s"
|
||||||
% (self.driver_desc, task.action))
|
% (self.driver_desc, task.action))
|
||||||
|
|
||||||
@ -66,7 +71,7 @@ class PyghmiDriver(oob.OobDriver):
|
|||||||
result=hd_fields.ActionResult.Success)
|
result=hd_fields.ActionResult.Success)
|
||||||
return
|
return
|
||||||
|
|
||||||
site_design = self.orchestrator.get_effective_site(design_id, task.site_name)
|
site_design = self.orchestrator.get_effective_site(design_id)
|
||||||
|
|
||||||
target_nodes = []
|
target_nodes = []
|
||||||
|
|
||||||
@ -118,13 +123,6 @@ class PyghmiDriver(oob.OobDriver):
|
|||||||
if x.get_result() in [hd_fields.ActionResult.PartialSuccess,
|
if x.get_result() in [hd_fields.ActionResult.PartialSuccess,
|
||||||
hd_fields.ActionResult.Failure]]
|
hd_fields.ActionResult.Failure]]
|
||||||
|
|
||||||
print("Task %s successful subtasks: %s" %
|
|
||||||
(task.get_id(), len(success_subtasks)))
|
|
||||||
print("Task %s unsuccessful subtasks: %s" %
|
|
||||||
(task.get_id(), len(nosuccess_subtasks)))
|
|
||||||
print("Task %s total subtasks: %s" %
|
|
||||||
(task.get_id(), len(task.get_subtasks())))
|
|
||||||
|
|
||||||
task_result = None
|
task_result = None
|
||||||
if len(success_subtasks) > 0 and len(nosuccess_subtasks) > 0:
|
if len(success_subtasks) > 0 and len(nosuccess_subtasks) > 0:
|
||||||
task_result = hd_fields.ActionResult.PartialSuccess
|
task_result = hd_fields.ActionResult.PartialSuccess
|
||||||
@ -145,9 +143,11 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner):
|
|||||||
def __init__(self, node=None, **kwargs):
|
def __init__(self, node=None, **kwargs):
|
||||||
super(PyghmiTaskRunner, self).__init__(**kwargs)
|
super(PyghmiTaskRunner, self).__init__(**kwargs)
|
||||||
|
|
||||||
|
self.logger = logging.getLogger('drydock.oobdriver.pyghmi')
|
||||||
# We cheat here by providing the Node model instead
|
# We cheat here by providing the Node model instead
|
||||||
# of making the runner source it from statemgmt
|
# of making the runner source it from statemgmt
|
||||||
if node is None:
|
if node is None:
|
||||||
|
self.logger.error("Did not specify target node")
|
||||||
raise errors.DriverError("Did not specify target node")
|
raise errors.DriverError("Did not specify target node")
|
||||||
|
|
||||||
self.node = node
|
self.node = node
|
||||||
@ -172,7 +172,7 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner):
|
|||||||
"task node scope")
|
"task node scope")
|
||||||
|
|
||||||
|
|
||||||
ipmi_network = self.node.applied.get('oob_network')
|
ipmi_network = self.node.oob_network
|
||||||
ipmi_address = self.node.get_network_address(ipmi_network)
|
ipmi_address = self.node.get_network_address(ipmi_network)
|
||||||
|
|
||||||
if ipmi_address is None:
|
if ipmi_address is None:
|
||||||
@ -184,8 +184,8 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner):
|
|||||||
|
|
||||||
self.orchestrator.task_field_update(self.task.get_id(),
|
self.orchestrator.task_field_update(self.task.get_id(),
|
||||||
status=hd_fields.TaskStatus.Running)
|
status=hd_fields.TaskStatus.Running)
|
||||||
ipmi_account = self.node.applied.get('oob_account', '')
|
ipmi_account = self.node.oob_account
|
||||||
ipmi_credential = self.node.applied.get('oob_credential', '')
|
ipmi_credential = self.node.oob_credential
|
||||||
|
|
||||||
ipmi_session = Command(bmc=ipmi_address, userid=ipmi_account,
|
ipmi_session = Command(bmc=ipmi_address, userid=ipmi_account,
|
||||||
password=ipmi_credential)
|
password=ipmi_credential)
|
@ -32,6 +32,7 @@ and storage.
|
|||||||
* CreateStorageTemplate - Configure site-wide storage information in bootstrapper
|
* CreateStorageTemplate - Configure site-wide storage information in bootstrapper
|
||||||
* CreateBootMedia - Ensure all needed boot media is available to the bootstrapper including external repositories
|
* CreateBootMedia - Ensure all needed boot media is available to the bootstrapper including external repositories
|
||||||
* PrepareHardwareConfig - Prepare the bootstrapper to handle all hardware configuration actions (firmware updates, RAID configuration, driver installation)
|
* PrepareHardwareConfig - Prepare the bootstrapper to handle all hardware configuration actions (firmware updates, RAID configuration, driver installation)
|
||||||
|
* IdentifyNode - Correlate a node definition in the Drydock internal model with a node detected by the downstream node bootstrapper.
|
||||||
* ConfigureHardware - Update and validate all hardware configurations on a node prior to deploying the OS on it
|
* ConfigureHardware - Update and validate all hardware configurations on a node prior to deploying the OS on it
|
||||||
* InterrogateNode - Interrogate the bootstrapper about node information. Depending on the current state of the node, this interrogation will produce different information.
|
* InterrogateNode - Interrogate the bootstrapper about node information. Depending on the current state of the node, this interrogation will produce different information.
|
||||||
* ApplyNodeNetworking - Configure networking for a node
|
* ApplyNodeNetworking - Configure networking for a node
|
55
drydock_provisioner/drydock.py
Normal file
55
drydock_provisioner/drydock.py
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import drydock_provisioner.config as config
|
||||||
|
import drydock_provisioner.objects as objects
|
||||||
|
import drydock_provisioner.ingester as ingester
|
||||||
|
import drydock_provisioner.statemgmt as statemgmt
|
||||||
|
import drydock_provisioner.orchestrator as orch
|
||||||
|
import drydock_provisioner.control.api as api
|
||||||
|
|
||||||
|
def start_drydock():
|
||||||
|
objects.register_all()
|
||||||
|
|
||||||
|
# Setup root logger
|
||||||
|
logger = logging.getLogger('drydock')
|
||||||
|
|
||||||
|
logger.setLevel(config.DrydockConfig.global_config.get('log_level'))
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(filename)s:%(funcName)s - %(message)s')
|
||||||
|
ch.setFormatter(formatter)
|
||||||
|
logger.addHandler(ch)
|
||||||
|
|
||||||
|
# Specalized format for API logging
|
||||||
|
logger = logging.getLogger('drydock.control')
|
||||||
|
logger.propagate = False
|
||||||
|
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(user)s - %(req_id)s - %(external_ctx)s - %(message)s')
|
||||||
|
|
||||||
|
ch = logging.StreamHandler()
|
||||||
|
ch.setFormatter(formatter)
|
||||||
|
logger.addHandler(ch)
|
||||||
|
|
||||||
|
state = statemgmt.DesignState()
|
||||||
|
|
||||||
|
orchestrator = orch.Orchestrator(config.DrydockConfig.orchestrator_config.get('drivers', {}),
|
||||||
|
state_manager=state)
|
||||||
|
input_ingester = ingester.Ingester()
|
||||||
|
input_ingester.enable_plugins(config.DrydockConfig.ingester_config.get('plugins', []))
|
||||||
|
|
||||||
|
return api.start_api(state_manager=state, ingester=input_ingester,
|
||||||
|
orchestrator=orchestrator)
|
||||||
|
|
||||||
|
drydock = start_drydock()
|
||||||
|
|
@ -35,3 +35,9 @@ class TransientDriverError(DriverError):
|
|||||||
|
|
||||||
class PersistentDriverError(DriverError):
|
class PersistentDriverError(DriverError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
class ApiError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class InvalidFormat(ApiError):
|
||||||
|
pass
|
120
drydock_provisioner/ingester/__init__.py
Normal file
120
drydock_provisioner/ingester/__init__.py
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
# ingester - Ingest host topologies to define site design and
|
||||||
|
# persist design to helm-drydock's statemgmt service
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import yaml
|
||||||
|
import uuid
|
||||||
|
import importlib
|
||||||
|
|
||||||
|
import drydock_provisioner.objects as objects
|
||||||
|
import drydock_provisioner.objects.site as site
|
||||||
|
import drydock_provisioner.objects.network as network
|
||||||
|
import drydock_provisioner.objects.hwprofile as hwprofile
|
||||||
|
import drydock_provisioner.objects.node as node
|
||||||
|
import drydock_provisioner.objects.hostprofile as hostprofile
|
||||||
|
|
||||||
|
from drydock_provisioner.statemgmt import DesignState
|
||||||
|
|
||||||
|
class Ingester(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.logger = logging.getLogger("drydock.ingester")
|
||||||
|
self.registered_plugins = {}
|
||||||
|
|
||||||
|
def enable_plugins(self, plugins=[]):
|
||||||
|
"""
|
||||||
|
enable_plugins
|
||||||
|
|
||||||
|
:params plugins: - A list of strings naming class objects denoting the ingester plugins to be enabled
|
||||||
|
|
||||||
|
Enable plugins that can be used for ingest_data calls. Each plugin should use
|
||||||
|
drydock_provisioner.ingester.plugins.IngesterPlugin as its base class. As long as one
|
||||||
|
enabled plugin successfully initializes, the call is considered successful. Otherwise
|
||||||
|
it will throw an exception
|
||||||
|
"""
|
||||||
|
if len(plugins) == 0:
|
||||||
|
self.log.error("Cannot have an empty plugin list.")
|
||||||
|
|
||||||
|
for plugin in plugins:
|
||||||
|
try:
|
||||||
|
(module, x, classname) = plugin.rpartition('.')
|
||||||
|
|
||||||
|
if module == '':
|
||||||
|
raise Exception()
|
||||||
|
mod = importlib.import_module(module)
|
||||||
|
klass = getattr(mod, classname)
|
||||||
|
new_plugin = klass()
|
||||||
|
plugin_name = new_plugin.get_name()
|
||||||
|
self.registered_plugins[plugin_name] = new_plugin
|
||||||
|
except Exception as ex:
|
||||||
|
self.logger.error("Could not enable plugin %s - %s" % (plugin, str(ex)))
|
||||||
|
|
||||||
|
if len(self.registered_plugins) == 0:
|
||||||
|
self.logger.error("Could not enable at least one plugin")
|
||||||
|
raise Exception("Could not enable at least one plugin")
|
||||||
|
|
||||||
|
|
||||||
|
def ingest_data(self, plugin_name='', design_state=None, design_id=None, context=None, **kwargs):
|
||||||
|
if design_state is None:
|
||||||
|
self.logger.error("Ingester:ingest_data called without valid DesignState handler")
|
||||||
|
raise ValueError("Invalid design_state handler")
|
||||||
|
|
||||||
|
# If no design_id is specified, instantiate a new one
|
||||||
|
if 'design_id' is None:
|
||||||
|
self.logger.error("Ingester:ingest_data required kwarg 'design_id' missing")
|
||||||
|
raise ValueError("Ingester:ingest_data required kwarg 'design_id' missing")
|
||||||
|
|
||||||
|
design_data = design_state.get_design(design_id)
|
||||||
|
|
||||||
|
self.logger.debug("Ingester:ingest_data ingesting design parts for design %s" % design_id)
|
||||||
|
|
||||||
|
if plugin_name in self.registered_plugins:
|
||||||
|
try:
|
||||||
|
design_items = self.registered_plugins[plugin_name].ingest_data(**kwargs)
|
||||||
|
except ValueError as vex:
|
||||||
|
self.logger.warn("Ingester:ingest_data - Error process data - %s" % (str(vex)))
|
||||||
|
return None
|
||||||
|
self.logger.debug("Ingester:ingest_data parsed %s design parts" % str(len(design_items)))
|
||||||
|
for m in design_items:
|
||||||
|
if context is not None:
|
||||||
|
m.set_create_fields(context)
|
||||||
|
if type(m) is site.Site:
|
||||||
|
design_data.set_site(m)
|
||||||
|
elif type(m) is network.Network:
|
||||||
|
design_data.add_network(m)
|
||||||
|
elif type(m) is network.NetworkLink:
|
||||||
|
design_data.add_network_link(m)
|
||||||
|
elif type(m) is hostprofile.HostProfile:
|
||||||
|
design_data.add_host_profile(m)
|
||||||
|
elif type(m) is hwprofile.HardwareProfile:
|
||||||
|
design_data.add_hardware_profile(m)
|
||||||
|
elif type(m) is node.BaremetalNode:
|
||||||
|
design_data.add_baremetal_node(m)
|
||||||
|
design_state.put_design(design_data)
|
||||||
|
return design_items
|
||||||
|
else:
|
||||||
|
self.logger.error("Could not find plugin %s to ingest data." % (plugin_name))
|
||||||
|
raise LookupError("Could not find plugin %s" % plugin_name)
|
||||||
|
"""
|
||||||
|
ingest_data
|
||||||
|
|
||||||
|
params: plugin_name - Which plugin should be used for ingestion
|
||||||
|
params: params - A map of parameters that will be passed to the plugin's ingest_data method
|
||||||
|
|
||||||
|
Execute a data ingestion using the named plugin (assuming it is enabled)
|
||||||
|
"""
|
||||||
|
|
@ -19,15 +19,16 @@
|
|||||||
import yaml
|
import yaml
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
|
|
||||||
from helm_drydock import objects
|
from drydock_provisioner import objects
|
||||||
from helm_drydock.ingester.plugins import IngesterPlugin
|
from drydock_provisioner.ingester.plugins import IngesterPlugin
|
||||||
|
|
||||||
class YamlIngester(IngesterPlugin):
|
class YamlIngester(IngesterPlugin):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(YamlIngester, self).__init__()
|
super(YamlIngester, self).__init__()
|
||||||
|
self.logger = logging.getLogger('drydock.ingester.yaml')
|
||||||
|
|
||||||
def get_name(self):
|
def get_name(self):
|
||||||
return "yaml"
|
return "yaml"
|
||||||
@ -37,7 +38,7 @@ class YamlIngester(IngesterPlugin):
|
|||||||
|
|
||||||
filenames - Array of absolute path to the YAML files to ingest
|
filenames - Array of absolute path to the YAML files to ingest
|
||||||
|
|
||||||
returns an array of objects from helm_drydock.model
|
returns an array of objects from drydock_provisioner.model
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def ingest_data(self, **kwargs):
|
def ingest_data(self, **kwargs):
|
||||||
@ -52,12 +53,10 @@ class YamlIngester(IngesterPlugin):
|
|||||||
file.close()
|
file.close()
|
||||||
models.extend(self.parse_docs(contents))
|
models.extend(self.parse_docs(contents))
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
self.log.error(
|
self.logger.error(
|
||||||
"Error opening input file %s for ingestion: %s"
|
"Error opening input file %s for ingestion: %s"
|
||||||
% (filename, err))
|
% (filename, err))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
elif 'content' in kwargs:
|
elif 'content' in kwargs:
|
||||||
models.extend(self.parse_docs(kwargs.get('content')))
|
models.extend(self.parse_docs(kwargs.get('content')))
|
||||||
else:
|
else:
|
||||||
@ -71,6 +70,8 @@ class YamlIngester(IngesterPlugin):
|
|||||||
def parse_docs(self, yaml_string):
|
def parse_docs(self, yaml_string):
|
||||||
models = []
|
models = []
|
||||||
|
|
||||||
|
self.logger.debug("yamlingester:parse_docs - Parsing YAML string \n%s" % (yaml_string))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
parsed_data = yaml.load_all(yaml_string)
|
parsed_data = yaml.load_all(yaml_string)
|
||||||
except yaml.YAMLError as err:
|
except yaml.YAMLError as err:
|
@ -6,7 +6,7 @@ different sources.
|
|||||||
|
|
||||||
Each ingester plugin should be able source data
|
Each ingester plugin should be able source data
|
||||||
based on user-provided parameters and parse that data
|
based on user-provided parameters and parse that data
|
||||||
into the Drydock internal model (helm_drydock.model).
|
into the Drydock internal model (drydock_provisioner.model).
|
||||||
|
|
||||||
Each plugin does not need to support every type of design
|
Each plugin does not need to support every type of design
|
||||||
data as a single site design may be federated from multiple
|
data as a single site design may be federated from multiple
|
@ -12,7 +12,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
# Models for helm_drydock
|
# Models for drydock_provisioner
|
||||||
#
|
#
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@ -23,11 +23,11 @@ def register_all():
|
|||||||
# NOTE(sh8121att) - Import all versioned objects so
|
# NOTE(sh8121att) - Import all versioned objects so
|
||||||
# they are available via RPC. Any new object definitions
|
# they are available via RPC. Any new object definitions
|
||||||
# need to be added here.
|
# need to be added here.
|
||||||
__import__('helm_drydock.objects.network')
|
__import__('drydock_provisioner.objects.network')
|
||||||
__import__('helm_drydock.objects.node')
|
__import__('drydock_provisioner.objects.node')
|
||||||
__import__('helm_drydock.objects.hostprofile')
|
__import__('drydock_provisioner.objects.hostprofile')
|
||||||
__import__('helm_drydock.objects.hwprofile')
|
__import__('drydock_provisioner.objects.hwprofile')
|
||||||
__import__('helm_drydock.objects.site')
|
__import__('drydock_provisioner.objects.site')
|
||||||
|
|
||||||
# Utility class for calculating inheritance
|
# Utility class for calculating inheritance
|
||||||
|
|
@ -11,16 +11,17 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import datetime
|
||||||
|
|
||||||
from oslo_versionedobjects import base
|
from oslo_versionedobjects import base
|
||||||
from oslo_versionedobjects import fields as obj_fields
|
from oslo_versionedobjects import fields as obj_fields
|
||||||
|
|
||||||
import helm_drydock.objects as objects
|
import drydock_provisioner.objects as objects
|
||||||
|
|
||||||
class DrydockObjectRegistry(base.VersionedObjectRegistry):
|
class DrydockObjectRegistry(base.VersionedObjectRegistry):
|
||||||
|
|
||||||
# Steal this from Cinder to bring all registered objects
|
# Steal this from Cinder to bring all registered objects
|
||||||
# into the helm_drydock.objects namespace
|
# into the drydock_provisioner.objects namespace
|
||||||
|
|
||||||
def registration_hook(self, cls, index):
|
def registration_hook(self, cls, index):
|
||||||
setattr(objects, cls.obj_name(), cls)
|
setattr(objects, cls.obj_name(), cls)
|
||||||
@ -29,7 +30,7 @@ class DrydockObject(base.VersionedObject):
|
|||||||
|
|
||||||
VERSION = '1.0'
|
VERSION = '1.0'
|
||||||
|
|
||||||
OBJ_PROJECT_NAMESPACE = 'helm_drydock.objects'
|
OBJ_PROJECT_NAMESPACE = 'drydock_provisioner.objects'
|
||||||
|
|
||||||
# Return None for undefined attributes
|
# Return None for undefined attributes
|
||||||
def obj_load_attr(self, attrname):
|
def obj_load_attr(self, attrname):
|
||||||
@ -38,6 +39,32 @@ class DrydockObject(base.VersionedObject):
|
|||||||
else:
|
else:
|
||||||
raise ValueError("Unknown field %s" % (attrname))
|
raise ValueError("Unknown field %s" % (attrname))
|
||||||
|
|
||||||
|
def obj_to_simple(self):
|
||||||
|
"""
|
||||||
|
Create a simple primitive representation of this object excluding
|
||||||
|
all the versioning stuff. Used to serialize an object for public
|
||||||
|
consumption, not intended to be deserialized by OVO
|
||||||
|
"""
|
||||||
|
|
||||||
|
primitive = dict()
|
||||||
|
|
||||||
|
primitive['model_type'] = self.__class__.__name__
|
||||||
|
primitive['model_version'] = self.VERSION
|
||||||
|
|
||||||
|
for name, field in self.fields.items():
|
||||||
|
if self.obj_attr_is_set(name):
|
||||||
|
value = getattr(self, name)
|
||||||
|
if (hasattr(value, 'obj_to_simple') and
|
||||||
|
callable(value.obj_to_simple)):
|
||||||
|
primitive[name] = value.obj_to_simple()
|
||||||
|
else:
|
||||||
|
value = field.to_primitive(self, name, value)
|
||||||
|
if value is not None:
|
||||||
|
primitive[name] = value
|
||||||
|
|
||||||
|
return primitive
|
||||||
|
|
||||||
|
|
||||||
class DrydockPersistentObject(base.VersionedObject):
|
class DrydockPersistentObject(base.VersionedObject):
|
||||||
|
|
||||||
fields = {
|
fields = {
|
||||||
@ -47,6 +74,15 @@ class DrydockPersistentObject(base.VersionedObject):
|
|||||||
'updated_by': obj_fields.StringField(nullable=True),
|
'updated_by': obj_fields.StringField(nullable=True),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def set_create_fields(self, context):
|
||||||
|
self.created_at = datetime.datetime.now()
|
||||||
|
self.created_by = context.user
|
||||||
|
|
||||||
|
def set_update_fields(self, context):
|
||||||
|
self.updated_at = datetime.datetime.now()
|
||||||
|
self.updated_by = context.user
|
||||||
|
|
||||||
|
|
||||||
class DrydockObjectListBase(base.ObjectListBase):
|
class DrydockObjectListBase(base.ObjectListBase):
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
@ -73,3 +109,11 @@ class DrydockObjectListBase(base.ObjectListBase):
|
|||||||
model_list.append(o)
|
model_list.append(o)
|
||||||
|
|
||||||
return model_list
|
return model_list
|
||||||
|
|
||||||
|
def obj_to_simple(self):
|
||||||
|
primitive_list = list()
|
||||||
|
|
||||||
|
for o in self.objects:
|
||||||
|
primitive_list.append(o.obj_to_simple())
|
||||||
|
|
||||||
|
return primitive_list
|
@ -44,6 +44,7 @@ class OrchestratorAction(BaseDrydockEnum):
|
|||||||
CreateStorageTemplate = 'create_storage_template'
|
CreateStorageTemplate = 'create_storage_template'
|
||||||
CreateBootMedia = 'create_boot_media'
|
CreateBootMedia = 'create_boot_media'
|
||||||
PrepareHardwareConfig = 'prepare_hardware_config'
|
PrepareHardwareConfig = 'prepare_hardware_config'
|
||||||
|
IdentifyNode = 'identify_node'
|
||||||
ConfigureHardware = 'configure_hardware'
|
ConfigureHardware = 'configure_hardware'
|
||||||
InterrogateNode = 'interrogate_node'
|
InterrogateNode = 'interrogate_node'
|
||||||
ApplyNodeNetworking = 'apply_node_networking'
|
ApplyNodeNetworking = 'apply_node_networking'
|
@ -16,9 +16,9 @@ from copy import deepcopy
|
|||||||
|
|
||||||
import oslo_versionedobjects.fields as obj_fields
|
import oslo_versionedobjects.fields as obj_fields
|
||||||
|
|
||||||
import helm_drydock.objects as objects
|
import drydock_provisioner.objects as objects
|
||||||
import helm_drydock.objects.base as base
|
import drydock_provisioner.objects.base as base
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
|
|
||||||
|
|
||||||
@base.DrydockObjectRegistry.register
|
@base.DrydockObjectRegistry.register
|
@ -16,9 +16,9 @@ from copy import deepcopy
|
|||||||
|
|
||||||
from oslo_versionedobjects import fields as ovo_fields
|
from oslo_versionedobjects import fields as ovo_fields
|
||||||
|
|
||||||
import helm_drydock.objects as objects
|
import drydock_provisioner.objects as objects
|
||||||
import helm_drydock.objects.base as base
|
import drydock_provisioner.objects.base as base
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
|
|
||||||
@base.DrydockObjectRegistry.register
|
@base.DrydockObjectRegistry.register
|
||||||
class HardwareProfile(base.DrydockPersistentObject, base.DrydockObject):
|
class HardwareProfile(base.DrydockPersistentObject, base.DrydockObject):
|
@ -12,7 +12,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
# Models for helm_drydock
|
# Models for drydock_provisioner
|
||||||
#
|
#
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@ -20,9 +20,9 @@ from copy import deepcopy
|
|||||||
|
|
||||||
import oslo_versionedobjects.fields as ovo_fields
|
import oslo_versionedobjects.fields as ovo_fields
|
||||||
|
|
||||||
import helm_drydock.objects as objects
|
import drydock_provisioner.objects as objects
|
||||||
import helm_drydock.objects.base as base
|
import drydock_provisioner.objects.base as base
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
|
|
||||||
@base.DrydockObjectRegistry.register
|
@base.DrydockObjectRegistry.register
|
||||||
class NetworkLink(base.DrydockPersistentObject, base.DrydockObject):
|
class NetworkLink(base.DrydockPersistentObject, base.DrydockObject):
|
@ -12,7 +12,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
# Models for helm_drydock
|
# Models for drydock_provisioner
|
||||||
#
|
#
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@ -20,13 +20,13 @@ from copy import deepcopy
|
|||||||
|
|
||||||
from oslo_versionedobjects import fields as ovo_fields
|
from oslo_versionedobjects import fields as ovo_fields
|
||||||
|
|
||||||
import helm_drydock.objects as objects
|
import drydock_provisioner.objects as objects
|
||||||
import helm_drydock.objects.hostprofile
|
import drydock_provisioner.objects.hostprofile
|
||||||
import helm_drydock.objects.base as base
|
import drydock_provisioner.objects.base as base
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
|
|
||||||
@base.DrydockObjectRegistry.register
|
@base.DrydockObjectRegistry.register
|
||||||
class BaremetalNode(helm_drydock.objects.hostprofile.HostProfile):
|
class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
|
||||||
|
|
||||||
VERSION = '1.0'
|
VERSION = '1.0'
|
||||||
|
|
@ -12,16 +12,17 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
# Models for helm_drydock
|
# Models for drydock_provisioner
|
||||||
#
|
#
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
import uuid
|
import uuid
|
||||||
|
import datetime
|
||||||
|
|
||||||
import oslo_versionedobjects.fields as ovo_fields
|
import oslo_versionedobjects.fields as ovo_fields
|
||||||
|
|
||||||
import helm_drydock.objects as objects
|
import drydock_provisioner.objects as objects
|
||||||
import helm_drydock.objects.base as base
|
import drydock_provisioner.objects.base as base
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
|
|
||||||
|
|
||||||
@base.DrydockObjectRegistry.register
|
@base.DrydockObjectRegistry.register
|
||||||
@ -126,8 +127,6 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
|
|||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
super(SiteDesign, self).__init__(**kwargs)
|
super(SiteDesign, self).__init__(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Assign UUID id
|
# Assign UUID id
|
||||||
def assign_id(self):
|
def assign_id(self):
|
||||||
self.id = uuid.uuid4()
|
self.id = uuid.uuid4()
|
||||||
@ -228,6 +227,18 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
|
|||||||
raise DesignError("BaremetalNode %s not found in design state"
|
raise DesignError("BaremetalNode %s not found in design state"
|
||||||
% node_key)
|
% node_key)
|
||||||
|
|
||||||
|
def create(self, ctx, state_manager):
|
||||||
|
self.created_at = datetime.datetime.now()
|
||||||
|
self.created_by = ctx.user
|
||||||
|
|
||||||
|
state_manager.post_design(self)
|
||||||
|
|
||||||
|
def save(self, ctx, state_manager):
|
||||||
|
self.updated_at = datetime.datetime.now()
|
||||||
|
self.updated_by = ctx.user
|
||||||
|
|
||||||
|
state_manager.put_design(self)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Support filtering on rack name, node name or node tag
|
Support filtering on rack name, node name or node tag
|
||||||
for now. Each filter can be a comma-delimited list of
|
for now. Each filter can be a comma-delimited list of
|
@ -15,9 +15,9 @@ import uuid
|
|||||||
|
|
||||||
from threading import Lock
|
from threading import Lock
|
||||||
|
|
||||||
import helm_drydock.error as errors
|
import drydock_provisioner.error as errors
|
||||||
|
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
|
|
||||||
class Task(object):
|
class Task(object):
|
||||||
|
|
||||||
@ -66,18 +66,29 @@ class Task(object):
|
|||||||
def get_subtasks(self):
|
def get_subtasks(self):
|
||||||
return self.subtasks
|
return self.subtasks
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'task_id': str(self.task_id),
|
||||||
|
'action': self.action,
|
||||||
|
'parent_task': str(self.parent_task_id),
|
||||||
|
'status': self.status,
|
||||||
|
'result': self.result,
|
||||||
|
'result_detail': self.result_detail,
|
||||||
|
'subtasks': [str(x) for x in self.subtasks],
|
||||||
|
}
|
||||||
|
|
||||||
class OrchestratorTask(Task):
|
class OrchestratorTask(Task):
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, site=None, design_id=None, **kwargs):
|
||||||
super(OrchestratorTask, self).__init__(**kwargs)
|
super(OrchestratorTask, self).__init__(**kwargs)
|
||||||
|
|
||||||
# Validate parameters based on action
|
# Validate parameters based on action
|
||||||
self.site = kwargs.get('site', '')
|
self.site = site
|
||||||
|
|
||||||
if self.site == '':
|
if self.site is None:
|
||||||
raise ValueError("Orchestration Task requires 'site' parameter")
|
raise ValueError("Orchestration Task requires 'site' parameter")
|
||||||
|
|
||||||
self.design_id = kwargs.get('design_id', 0)
|
self.design_id = design_id
|
||||||
|
|
||||||
if self.action in [hd_fields.OrchestratorAction.VerifyNode,
|
if self.action in [hd_fields.OrchestratorAction.VerifyNode,
|
||||||
hd_fields.OrchestratorAction.PrepareNode,
|
hd_fields.OrchestratorAction.PrepareNode,
|
||||||
@ -85,6 +96,14 @@ class OrchestratorTask(Task):
|
|||||||
hd_fields.OrchestratorAction.DestroyNode]:
|
hd_fields.OrchestratorAction.DestroyNode]:
|
||||||
self.node_filter = kwargs.get('node_filter', None)
|
self.node_filter = kwargs.get('node_filter', None)
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
_dict = super(OrchestratorTask, self).to_dict()
|
||||||
|
|
||||||
|
_dict['site'] = self.site
|
||||||
|
_dict['design_id'] = self.design_id
|
||||||
|
_dict['node_filter'] = getattr(self, 'node_filter', None)
|
||||||
|
|
||||||
|
return _dict
|
||||||
|
|
||||||
class DriverTask(Task):
|
class DriverTask(Task):
|
||||||
def __init__(self, task_scope={}, **kwargs):
|
def __init__(self, task_scope={}, **kwargs):
|
||||||
@ -95,3 +114,12 @@ class DriverTask(Task):
|
|||||||
self.site_name = task_scope.get('site', None)
|
self.site_name = task_scope.get('site', None)
|
||||||
|
|
||||||
self.node_list = task_scope.get('node_names', [])
|
self.node_list = task_scope.get('node_names', [])
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
_dict = super(DriverTask, self).to_dict()
|
||||||
|
|
||||||
|
_dict['site_name'] = self.site_name
|
||||||
|
_dict['design_id'] = self.design_id
|
||||||
|
_dict['node_list'] = self.node_list
|
||||||
|
|
||||||
|
return _dict
|
@ -1,3 +1,4 @@
|
|||||||
|
|
||||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
@ -15,13 +16,14 @@ import uuid
|
|||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
import importlib
|
import importlib
|
||||||
|
import logging
|
||||||
|
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
|
||||||
import helm_drydock.drivers as drivers
|
import drydock_provisioner.drivers as drivers
|
||||||
import helm_drydock.objects.task as tasks
|
import drydock_provisioner.objects.task as tasks
|
||||||
import helm_drydock.error as errors
|
import drydock_provisioner.error as errors
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
|
|
||||||
class Orchestrator(object):
|
class Orchestrator(object):
|
||||||
|
|
||||||
@ -32,6 +34,8 @@ class Orchestrator(object):
|
|||||||
|
|
||||||
self.state_manager = state_manager
|
self.state_manager = state_manager
|
||||||
|
|
||||||
|
self.logger = logging.getLogger('drydock.orchestrator')
|
||||||
|
|
||||||
if enabled_drivers is not None:
|
if enabled_drivers is not None:
|
||||||
oob_driver_name = enabled_drivers.get('oob', None)
|
oob_driver_name = enabled_drivers.get('oob', None)
|
||||||
if oob_driver_name is not None:
|
if oob_driver_name is not None:
|
||||||
@ -106,8 +110,7 @@ class Orchestrator(object):
|
|||||||
self.task_field_update(task_id,
|
self.task_field_update(task_id,
|
||||||
status=hd_fields.TaskStatus.Running)
|
status=hd_fields.TaskStatus.Running)
|
||||||
try:
|
try:
|
||||||
site_design = self.get_effective_site(task_site,
|
site_design = self.get_effective_site(design_id)
|
||||||
change_id=design_id)
|
|
||||||
self.task_field_update(task_id,
|
self.task_field_update(task_id,
|
||||||
result=hd_fields.ActionResult.Success)
|
result=hd_fields.ActionResult.Success)
|
||||||
except:
|
except:
|
||||||
@ -155,10 +158,14 @@ class Orchestrator(object):
|
|||||||
task_scope=task_scope,
|
task_scope=task_scope,
|
||||||
action=hd_fields.OrchestratorAction.CreateNetworkTemplate)
|
action=hd_fields.OrchestratorAction.CreateNetworkTemplate)
|
||||||
|
|
||||||
|
self.logger.info("Starting node driver task %s to create network templates" % (driver_task.get_id()))
|
||||||
|
|
||||||
driver.execute_task(driver_task.get_id())
|
driver.execute_task(driver_task.get_id())
|
||||||
|
|
||||||
driver_task = self.state_manager.get_task(driver_task.get_id())
|
driver_task = self.state_manager.get_task(driver_task.get_id())
|
||||||
|
|
||||||
|
self.logger.info("Node driver task %s complete" % (driver_task.get_id()))
|
||||||
|
|
||||||
self.task_field_update(task_id,
|
self.task_field_update(task_id,
|
||||||
status=hd_fields.TaskStatus.Complete,
|
status=hd_fields.TaskStatus.Complete,
|
||||||
result=driver_task.get_result())
|
result=driver_task.get_result())
|
||||||
@ -167,15 +174,16 @@ class Orchestrator(object):
|
|||||||
self.task_field_update(task_id,
|
self.task_field_update(task_id,
|
||||||
status=hd_fields.TaskStatus.Running)
|
status=hd_fields.TaskStatus.Running)
|
||||||
|
|
||||||
driver = self.enabled_drivers['oob']
|
oob_driver = self.enabled_drivers['oob']
|
||||||
|
|
||||||
if driver is None:
|
if oob_driver is None:
|
||||||
self.task_field_update(task_id,
|
self.task_field_update(task_id,
|
||||||
status=hd_fields.TaskStatus.Errored,
|
status=hd_fields.TaskStatus.Errored,
|
||||||
result=hd_fields.ActionResult.Failure)
|
result=hd_fields.ActionResult.Failure,
|
||||||
|
result_detail={'detail': 'Error: No oob driver configured', 'retry': False})
|
||||||
return
|
return
|
||||||
|
|
||||||
site_design = self.get_effective_site(design_id, task_site)
|
site_design = self.get_effective_site(design_id)
|
||||||
|
|
||||||
node_filter = task.node_filter
|
node_filter = task.node_filter
|
||||||
|
|
||||||
@ -186,34 +194,45 @@ class Orchestrator(object):
|
|||||||
task_scope = {'site' : task_site,
|
task_scope = {'site' : task_site,
|
||||||
'node_names' : target_names}
|
'node_names' : target_names}
|
||||||
|
|
||||||
driver_task = self.create_task(tasks.DriverTask,
|
oob_driver_task = self.create_task(tasks.DriverTask,
|
||||||
parent_task_id=task.get_id(),
|
parent_task_id=task.get_id(),
|
||||||
design_id=design_id,
|
design_id=design_id,
|
||||||
action=hd_fields.OrchestratorAction.InterrogateNode,
|
action=hd_fields.OrchestratorAction.InterrogateOob,
|
||||||
task_scope=task_scope)
|
task_scope=task_scope)
|
||||||
|
|
||||||
driver.execute_task(driver_task.get_id())
|
oob_driver.execute_task(oob_driver_task.get_id())
|
||||||
|
|
||||||
driver_task = self.state_manager.get_task(driver_task.get_id())
|
oob_driver_task = self.state_manager.get_task(oob_driver_task.get_id())
|
||||||
|
|
||||||
self.task_field_update(task_id,
|
self.task_field_update(task_id,
|
||||||
status=hd_fields.TaskStatus.Complete,
|
status=hd_fields.TaskStatus.Complete,
|
||||||
result=driver_task.get_result())
|
result=oob_driver_task.get_result())
|
||||||
return
|
return
|
||||||
elif task.action == hd_fields.OrchestratorAction.PrepareNode:
|
elif task.action == hd_fields.OrchestratorAction.PrepareNode:
|
||||||
|
failed = worked = False
|
||||||
|
|
||||||
self.task_field_update(task_id,
|
self.task_field_update(task_id,
|
||||||
status=hd_fields.TaskStatus.Running)
|
status=hd_fields.TaskStatus.Running)
|
||||||
|
|
||||||
driver = self.enabled_drivers['oob']
|
oob_driver = self.enabled_drivers['oob']
|
||||||
|
|
||||||
if driver is None:
|
if oob_driver is None:
|
||||||
self.task_field_update(task_id,
|
self.task_field_update(task_id,
|
||||||
status=hd_fields.TaskStatus.Errored,
|
status=hd_fields.TaskStatus.Errored,
|
||||||
result=hd_fields.ActionResult.Failure)
|
result=hd_fields.ActionResult.Failure,
|
||||||
|
result_detail={'detail': 'Error: No oob driver configured', 'retry': False})
|
||||||
return
|
return
|
||||||
|
|
||||||
site_design = self.get_effective_site(task_site,
|
node_driver = self.enabled_drivers['node']
|
||||||
change_id=design_id)
|
|
||||||
|
if node_driver is None:
|
||||||
|
self.task_field_update(task_id,
|
||||||
|
status=hd_fields.TaskStatus.Errored,
|
||||||
|
result=hd_fields.ActionResult.Failure,
|
||||||
|
result_detail={'detail': 'Error: No node driver configured', 'retry': False})
|
||||||
|
return
|
||||||
|
|
||||||
|
site_design = self.get_effective_site(design_id)
|
||||||
|
|
||||||
node_filter = task.node_filter
|
node_filter = task.node_filter
|
||||||
|
|
||||||
@ -230,33 +249,88 @@ class Orchestrator(object):
|
|||||||
action=hd_fields.OrchestratorAction.SetNodeBoot,
|
action=hd_fields.OrchestratorAction.SetNodeBoot,
|
||||||
task_scope=task_scope)
|
task_scope=task_scope)
|
||||||
|
|
||||||
driver.execute_task(setboot_task.get_id())
|
self.logger.info("Starting OOB driver task %s to set PXE boot" % (setboot_task.get_id()))
|
||||||
|
|
||||||
|
oob_driver.execute_task(setboot_task.get_id())
|
||||||
|
|
||||||
|
self.logger.info("OOB driver task %s complete" % (setboot_task.get_id()))
|
||||||
|
|
||||||
setboot_task = self.state_manager.get_task(setboot_task.get_id())
|
setboot_task = self.state_manager.get_task(setboot_task.get_id())
|
||||||
|
|
||||||
|
if setboot_task.get_result() == hd_fields.ActionResult.Success:
|
||||||
|
worked = True
|
||||||
|
elif setboot_task.get_result() == hd_fields.ActionResult.PartialSuccess:
|
||||||
|
worked = failed = True
|
||||||
|
elif setboot_task.get_result() == hd_fields.ActionResult.Failure:
|
||||||
|
failed = True
|
||||||
|
|
||||||
cycle_task = self.create_task(tasks.DriverTask,
|
cycle_task = self.create_task(tasks.DriverTask,
|
||||||
parent_task_id=task.get_id(),
|
parent_task_id=task.get_id(),
|
||||||
design_id=design_id,
|
design_id=design_id,
|
||||||
action=hd_fields.OrchestratorAction.PowerCycleNode,
|
action=hd_fields.OrchestratorAction.PowerCycleNode,
|
||||||
task_scope=task_scope)
|
task_scope=task_scope)
|
||||||
driver.execute_task(cycle_task.get_id())
|
|
||||||
|
self.logger.info("Starting OOB driver task %s to power cycle nodes" % (cycle_task.get_id()))
|
||||||
|
|
||||||
|
oob_driver.execute_task(cycle_task.get_id())
|
||||||
|
|
||||||
|
self.logger.info("OOB driver task %s complete" % (cycle_task.get_id()))
|
||||||
|
|
||||||
cycle_task = self.state_manager.get_task(cycle_task.get_id())
|
cycle_task = self.state_manager.get_task(cycle_task.get_id())
|
||||||
|
|
||||||
if (setboot_task.get_result() == hd_fields.ActionResult.Success and
|
if cycle_task.get_result() == hd_fields.ActionResult.Success:
|
||||||
cycle_task.get_result() == hd_fields.ActionResult.Success):
|
worked = True
|
||||||
self.task_field_update(task_id,
|
elif cycle_task.get_result() == hd_fields.ActionResult.PartialSuccess:
|
||||||
status=hd_fields.TaskStatus.Complete,
|
worked = failed = True
|
||||||
result=hd_fields.ActionResult.Success)
|
elif cycle_task.get_result() == hd_fields.ActionResult.Failure:
|
||||||
elif (setboot_task.get_result() == hd_fields.ActionResult.Success or
|
failed = True
|
||||||
cycle_task.get_result() == hd_fields.ActionResult.Success):
|
|
||||||
self.task_field_update(task_id,
|
|
||||||
status=hd_fields.TaskStatus.Complete,
|
# IdentifyNode success will take some time after PowerCycleNode finishes
|
||||||
result=hd_fields.ActionResult.PartialSuccess)
|
# Retry the operation a few times if it fails before considering it a final failure
|
||||||
|
# Each attempt is a new task which might make the final task tree a bit confusing
|
||||||
|
|
||||||
|
node_identify_attempts = 0
|
||||||
|
|
||||||
|
while True:
|
||||||
|
|
||||||
|
node_identify_task = self.create_task(tasks.DriverTask,
|
||||||
|
parent_task_id=task.get_id(),
|
||||||
|
design_id=design_id,
|
||||||
|
action=hd_fields.OrchestratorAction.IdentifyNode,
|
||||||
|
task_scope=task_scope)
|
||||||
|
|
||||||
|
self.logger.info("Starting node driver task %s to identify node - attempt %s" %
|
||||||
|
(node_identify_task.get_id(), node_identify_attempts+1))
|
||||||
|
|
||||||
|
node_driver.execute_task(node_identify_task.get_id())
|
||||||
|
node_identify_attempts = node_identify_attempts + 1
|
||||||
|
|
||||||
|
node_identify_task = self.state_manager.get_task(node_identify_task.get_id())
|
||||||
|
|
||||||
|
if node_identify_task.get_result() == hd_fields.ActionResult.Success:
|
||||||
|
worked = True
|
||||||
|
break
|
||||||
|
elif node_identify_task.get_result() in [hd_fields.ActionResult.PartialSuccess,
|
||||||
|
hd_fields.ActionResult.Failure]:
|
||||||
|
# TODO This threshold should be a configurable default and tunable by task API
|
||||||
|
if node_identify_attempts > 2:
|
||||||
|
failed = True
|
||||||
|
break
|
||||||
|
|
||||||
|
time.sleep(5 * 60)
|
||||||
|
|
||||||
|
final_result = None
|
||||||
|
if worked and failed:
|
||||||
|
final_result = hd_fields.ActionResult.PartialSuccess
|
||||||
|
elif worked:
|
||||||
|
final_result = hd_fields.ActionResult.Success
|
||||||
else:
|
else:
|
||||||
|
final_result = hd_fields.ActionResult.Failure
|
||||||
|
|
||||||
self.task_field_update(task_id,
|
self.task_field_update(task_id,
|
||||||
status=hd_fields.TaskStatus.Complete,
|
status=hd_fields.TaskStatus.Complete,
|
||||||
result=hd_fields.ActionResult.Failure)
|
result=final_result)
|
||||||
|
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
@ -331,7 +405,7 @@ class Orchestrator(object):
|
|||||||
# the baremetal nodes which recursively resolves it for host profiles
|
# the baremetal nodes which recursively resolves it for host profiles
|
||||||
# assigned to those nodes
|
# assigned to those nodes
|
||||||
|
|
||||||
for n in site_design.baremetal_nodes:
|
for n in getattr(site_design, 'baremetal_nodes', []):
|
||||||
n.compile_applied_model(site_design)
|
n.compile_applied_model(site_design)
|
||||||
|
|
||||||
return
|
return
|
||||||
@ -342,18 +416,13 @@ class Orchestrator(object):
|
|||||||
return a Site model reflecting the effective design for the site
|
return a Site model reflecting the effective design for the site
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def get_described_site(self, design_id, site_name):
|
def get_described_site(self, design_id):
|
||||||
site_design = None
|
|
||||||
|
|
||||||
if site_name is None:
|
|
||||||
raise errors.OrchestratorError("Cannot source design for site None")
|
|
||||||
|
|
||||||
site_design = self.state_manager.get_design(design_id)
|
site_design = self.state_manager.get_design(design_id)
|
||||||
|
|
||||||
return site_design
|
return site_design
|
||||||
|
|
||||||
def get_effective_site(self, design_id, site_name):
|
def get_effective_site(self, design_id):
|
||||||
site_design = self.get_described_site(design_id, site_name)
|
site_design = self.get_described_site(design_id)
|
||||||
|
|
||||||
self.compute_model_inheritance(site_design)
|
self.compute_model_inheritance(site_design)
|
||||||
|
|
@ -18,10 +18,10 @@ from threading import Lock
|
|||||||
|
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
import helm_drydock.objects as objects
|
import drydock_provisioner.objects as objects
|
||||||
import helm_drydock.objects.task as tasks
|
import drydock_provisioner.objects.task as tasks
|
||||||
|
|
||||||
from helm_drydock.error import DesignError, StateError
|
from drydock_provisioner.error import DesignError, StateError
|
||||||
|
|
||||||
class DesignState(object):
|
class DesignState(object):
|
||||||
|
|
||||||
@ -41,6 +41,7 @@ class DesignState(object):
|
|||||||
# has started
|
# has started
|
||||||
def get_design(self, design_id):
|
def get_design(self, design_id):
|
||||||
if design_id not in self.designs.keys():
|
if design_id not in self.designs.keys():
|
||||||
|
|
||||||
raise DesignError("Design ID %s not found" % (design_id))
|
raise DesignError("Design ID %s not found" % (design_id))
|
||||||
|
|
||||||
return objects.SiteDesign.obj_from_primitive(self.designs[design_id])
|
return objects.SiteDesign.obj_from_primitive(self.designs[design_id])
|
||||||
@ -133,7 +134,7 @@ class DesignState(object):
|
|||||||
|
|
||||||
def get_task(self, task_id):
|
def get_task(self, task_id):
|
||||||
for t in self.tasks:
|
for t in self.tasks:
|
||||||
if t.get_id() == task_id:
|
if t.get_id() == task_id or str(t.get_id()) == task_id:
|
||||||
return deepcopy(t)
|
return deepcopy(t)
|
||||||
return None
|
return None
|
||||||
|
|
@ -153,7 +153,7 @@ metadata:
|
|||||||
description: Describe server configuration attributes. Not a specific server, but profile adopted by a server definition
|
description: Describe server configuration attributes. Not a specific server, but profile adopted by a server definition
|
||||||
spec:
|
spec:
|
||||||
# The HostProfile this profile adopts initial state from. No default.
|
# The HostProfile this profile adopts initial state from. No default.
|
||||||
# See helm_drydock/objects/readme.md for information on how HostProfile and BaremetalNode inheritance works
|
# See drydock_provisioner/objects/readme.md for information on how HostProfile and BaremetalNode inheritance works
|
||||||
host_profile: 'defaults'
|
host_profile: 'defaults'
|
||||||
# The HardwareProfile describing the node hardware. No default.
|
# The HardwareProfile describing the node hardware. No default.
|
||||||
hardware_profile: 'DellR720v1'
|
hardware_profile: 'DellR720v1'
|
||||||
@ -242,7 +242,7 @@ metadata:
|
|||||||
description: Specify a physical server.
|
description: Specify a physical server.
|
||||||
spec:
|
spec:
|
||||||
# The HostProfile this server adopts initial state from. No default.
|
# The HostProfile this server adopts initial state from. No default.
|
||||||
# See helm_drydock/objects/readme.md for information on how HostProfile and BaremetalNode inheritance works
|
# See drydock_provisioner/objects/readme.md for information on how HostProfile and BaremetalNode inheritance works
|
||||||
host_profile: 'defaults'
|
host_profile: 'defaults'
|
||||||
# The HardwareProfile describing the node hardware. No default.
|
# The HardwareProfile describing the node hardware. No default.
|
||||||
hardware_profile: 'DellR720v1'
|
hardware_profile: 'DellR720v1'
|
||||||
|
@ -1,14 +0,0 @@
|
|||||||
# Control #
|
|
||||||
|
|
||||||
This is the external facing API service to control the rest
|
|
||||||
of Drydock and query Drydock-managed data.
|
|
||||||
|
|
||||||
Anticipate basing this service on the falcon Python library
|
|
||||||
|
|
||||||
## Endpoints ##
|
|
||||||
|
|
||||||
### /tasks ###
|
|
||||||
|
|
||||||
POST - Create a new orchestration task and submit it for execution
|
|
||||||
GET - Get status of a task
|
|
||||||
DELETE - Cancel execution of a task if permitted
|
|
@ -1,306 +0,0 @@
|
|||||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
import helm_drydock.error as errors
|
|
||||||
import helm_drydock.config as config
|
|
||||||
import helm_drydock.drivers as drivers
|
|
||||||
import helm_drydock.objects.fields as hd_fields
|
|
||||||
import helm_drydock.objects.task as task_model
|
|
||||||
|
|
||||||
from helm_drydock.drivers.node import NodeDriver
|
|
||||||
from .api_client import MaasRequestFactory
|
|
||||||
import helm_drydock.drivers.node.maasdriver.models.fabric as maas_fabric
|
|
||||||
import helm_drydock.drivers.node.maasdriver.models.vlan as maas_vlan
|
|
||||||
import helm_drydock.drivers.node.maasdriver.models.subnet as maas_subnet
|
|
||||||
|
|
||||||
class MaasNodeDriver(NodeDriver):
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
super(MaasNodeDriver, self).__init__(**kwargs)
|
|
||||||
|
|
||||||
self.driver_name = "maasdriver"
|
|
||||||
self.driver_key = "maasdriver"
|
|
||||||
self.driver_desc = "MaaS Node Provisioning Driver"
|
|
||||||
|
|
||||||
self.config = config.DrydockConfig.node_driver[self.driver_key]
|
|
||||||
|
|
||||||
def execute_task(self, task_id):
|
|
||||||
task = self.state_manager.get_task(task_id)
|
|
||||||
|
|
||||||
if task is None:
|
|
||||||
raise errors.DriverError("Invalid task %s" % (task_id))
|
|
||||||
|
|
||||||
if task.action not in self.supported_actions:
|
|
||||||
raise errors.DriverError("Driver %s doesn't support task action %s"
|
|
||||||
% (self.driver_desc, task.action))
|
|
||||||
|
|
||||||
if task.action == hd_fields.OrchestratorAction.ValidateNodeServices:
|
|
||||||
self.orchestrator.task_field_update(task.get_id(),
|
|
||||||
status=hd_fields.TaskStatus.Running)
|
|
||||||
maas_client = MaasRequestFactory(self.config['api_url'], self.config['api_key'])
|
|
||||||
|
|
||||||
try:
|
|
||||||
if maas_client.test_connectivity():
|
|
||||||
if maas_client.test_authentication():
|
|
||||||
self.orchestrator.task_field_update(task.get_id(),
|
|
||||||
status=hd_fields.TaskStatus.Complete,
|
|
||||||
result=hd_fields.ActionResult.Success)
|
|
||||||
return
|
|
||||||
except errors.TransientDriverError(ex):
|
|
||||||
result = {
|
|
||||||
'retry': True,
|
|
||||||
'detail': str(ex),
|
|
||||||
}
|
|
||||||
self.orchestrator.task_field_update(task.get_id(),
|
|
||||||
status=hd_fields.TaskStatus.Complete,
|
|
||||||
result=hd_fields.ActionResult.Failure,
|
|
||||||
result_details=result)
|
|
||||||
return
|
|
||||||
except errors.PersistentDriverError(ex):
|
|
||||||
result = {
|
|
||||||
'retry': False,
|
|
||||||
'detail': str(ex),
|
|
||||||
}
|
|
||||||
self.orchestrator.task_field_update(task.get_id(),
|
|
||||||
status=hd_fields.TaskStatus.Complete,
|
|
||||||
result=hd_fields.ActionResult.Failure,
|
|
||||||
result_details=result)
|
|
||||||
return
|
|
||||||
except Exception(ex):
|
|
||||||
result = {
|
|
||||||
'retry': False,
|
|
||||||
'detail': str(ex),
|
|
||||||
}
|
|
||||||
self.orchestrator.task_field_update(task.get_id(),
|
|
||||||
status=hd_fields.TaskStatus.Complete,
|
|
||||||
result=hd_fields.ActionResult.Failure,
|
|
||||||
result_details=result)
|
|
||||||
return
|
|
||||||
|
|
||||||
design_id = getattr(task, 'design_id', None)
|
|
||||||
|
|
||||||
if design_id is None:
|
|
||||||
raise errors.DriverError("No design ID specified in task %s" %
|
|
||||||
(task_id))
|
|
||||||
|
|
||||||
|
|
||||||
if task.site_name is None:
|
|
||||||
raise errors.DriverError("No site specified for task %s." %
|
|
||||||
(task_id))
|
|
||||||
|
|
||||||
self.orchestrator.task_field_update(task.get_id(),
|
|
||||||
status=hd_fields.TaskStatus.Running)
|
|
||||||
|
|
||||||
site_design = self.orchestrator.get_effective_site(design_id, task.site_name)
|
|
||||||
|
|
||||||
if task.action == hd_fields.OrchestratorAction.CreateNetworkTemplate:
|
|
||||||
subtask = self.orchestrator.create_task(task_model.DriverTask,
|
|
||||||
parent_task_id=task.get_id(), design_id=design_id,
|
|
||||||
action=task.action, site_name=task.site_name,
|
|
||||||
task_scope={'site': task.site_name})
|
|
||||||
runner = MaasTaskRunner(state_manager=self.state_manager,
|
|
||||||
orchestrator=self.orchestrator,
|
|
||||||
task_id=subtask.get_id(),config=self.config)
|
|
||||||
runner.start()
|
|
||||||
|
|
||||||
runner.join(timeout=120)
|
|
||||||
|
|
||||||
if runner.is_alive():
|
|
||||||
result = {
|
|
||||||
'retry': False,
|
|
||||||
'detail': 'MaaS Network creation timed-out'
|
|
||||||
}
|
|
||||||
self.orchestrator.task_field_update(task.get_id(),
|
|
||||||
status=hd_fields.TaskStatus.Complete,
|
|
||||||
result=hd_fields.ActionResult.Failure,
|
|
||||||
result_detail=result)
|
|
||||||
else:
|
|
||||||
subtask = self.state_manager.get_task(subtask.get_id())
|
|
||||||
self.orchestrator.task_field_update(task.get_id(),
|
|
||||||
status=hd_fields.TaskStatus.Complete,
|
|
||||||
result=subtask.get_result())
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
class MaasTaskRunner(drivers.DriverTaskRunner):
|
|
||||||
|
|
||||||
def __init__(self, config=None, **kwargs):
|
|
||||||
super(MaasTaskRunner, self).__init__(**kwargs)
|
|
||||||
|
|
||||||
self.driver_config = config
|
|
||||||
|
|
||||||
def execute_task(self):
|
|
||||||
task_action = self.task.action
|
|
||||||
|
|
||||||
self.orchestrator.task_field_update(self.task.get_id(),
|
|
||||||
status=hd_fields.TaskStatus.Running,
|
|
||||||
result=hd_fields.ActionResult.Incomplete)
|
|
||||||
|
|
||||||
self.maas_client = MaasRequestFactory(self.driver_config['api_url'],
|
|
||||||
self.driver_config['api_key'])
|
|
||||||
|
|
||||||
site_design = self.orchestrator.get_effective_site(self.task.design_id,
|
|
||||||
self.task.site_name)
|
|
||||||
|
|
||||||
if task_action == hd_fields.OrchestratorAction.CreateNetworkTemplate:
|
|
||||||
# Try to true up MaaS definitions of fabrics/vlans/subnets
|
|
||||||
# with the networks defined in Drydock
|
|
||||||
design_networks = site_design.networks
|
|
||||||
|
|
||||||
subnets = maas_subnet.Subnets(self.maas_client)
|
|
||||||
subnets.refresh()
|
|
||||||
|
|
||||||
result_detail = {
|
|
||||||
'detail': []
|
|
||||||
}
|
|
||||||
|
|
||||||
for n in design_networks:
|
|
||||||
exists = subnets.query({'cidr': n.cidr})
|
|
||||||
|
|
||||||
subnet = None
|
|
||||||
|
|
||||||
if len(exists) > 0:
|
|
||||||
subnet = exists[0]
|
|
||||||
|
|
||||||
subnet.name = n.name
|
|
||||||
subnet.dns_servers = n.dns_servers
|
|
||||||
|
|
||||||
vlan_list = maas_vlan.Vlans(self.maas_client, fabric_id=subnet.fabric)
|
|
||||||
vlan_list.refresh()
|
|
||||||
|
|
||||||
vlan = vlan_list.select(subnet.vlan)
|
|
||||||
|
|
||||||
if vlan is not None:
|
|
||||||
if ((n.vlan_id is None and vlan.vid != 0) or
|
|
||||||
(n.vlan_id is not None and vlan.vid != n.vlan_id)):
|
|
||||||
|
|
||||||
# if the VLAN name matches, assume this is the correct resource
|
|
||||||
# and it needs to be updated
|
|
||||||
if vlan.name == n.name:
|
|
||||||
vlan.set_vid(n.vlan_id)
|
|
||||||
vlan.mtu = n.mtu
|
|
||||||
vlan.update()
|
|
||||||
else:
|
|
||||||
vlan_id = n.vlan_id if n.vlan_id is not None else 0
|
|
||||||
target_vlan = vlan_list.query({'vid': vlan_id})
|
|
||||||
if len(target_vlan) > 0:
|
|
||||||
subnet.vlan = target_vlan[0].resource_id
|
|
||||||
else:
|
|
||||||
# This is a flag that after creating a fabric and
|
|
||||||
# VLAN below, update the subnet
|
|
||||||
subnet.vlan = None
|
|
||||||
else:
|
|
||||||
subnet.vlan = None
|
|
||||||
|
|
||||||
# Check if the routes have a default route
|
|
||||||
subnet.gateway_ip = n.get_default_gateway()
|
|
||||||
|
|
||||||
|
|
||||||
result_detail['detail'].append("Subnet %s found for network %s, updated attributes"
|
|
||||||
% (exists[0].resource_id, n.name))
|
|
||||||
|
|
||||||
# Need to create a Fabric/Vlan for this network
|
|
||||||
if (subnet is None or (subnet is not None and subnet.vlan is None)):
|
|
||||||
fabric_list = maas_fabric.Fabrics(self.maas_client)
|
|
||||||
fabric_list.refresh()
|
|
||||||
matching_fabrics = fabric_list.query({'name': n.name})
|
|
||||||
|
|
||||||
fabric = None
|
|
||||||
vlan = None
|
|
||||||
|
|
||||||
if len(matching_fabrics) > 0:
|
|
||||||
# Fabric exists, update VLAN
|
|
||||||
fabric = matching_fabrics[0]
|
|
||||||
|
|
||||||
vlan_list = maas_vlan.Vlans(self.maas_client, fabric_id=fabric.resource_id)
|
|
||||||
vlan_list.refresh()
|
|
||||||
vlan_id = n.vlan_id if n.vlan_id is not None else 0
|
|
||||||
matching_vlans = vlan_list.query({'vid': vlan_id})
|
|
||||||
|
|
||||||
if len(matching_vlans) > 0:
|
|
||||||
vlan = matching_vlans[0]
|
|
||||||
|
|
||||||
vlan.name = n.name
|
|
||||||
if getattr(n, 'mtu', None) is not None:
|
|
||||||
vlan.mtu = n.mtu
|
|
||||||
|
|
||||||
if subnet is not None:
|
|
||||||
subnet.vlan = vlan.resource_id
|
|
||||||
subnet.update()
|
|
||||||
vlan.update()
|
|
||||||
else:
|
|
||||||
vlan = maas_vlan.Vlan(self.maas_client, name=n.name, vid=vlan_id,
|
|
||||||
mtu=getattr(n, 'mtu', None),fabric_id=fabric.resource_id)
|
|
||||||
vlan = vlan_list.add(vlan)
|
|
||||||
|
|
||||||
if subnet is not None:
|
|
||||||
subnet.vlan = vlan.resource_id
|
|
||||||
subnet.update()
|
|
||||||
|
|
||||||
else:
|
|
||||||
new_fabric = maas_fabric.Fabric(self.maas_client, name=n.name)
|
|
||||||
new_fabric = fabric_list.add(new_fabric)
|
|
||||||
new_fabric.refresh()
|
|
||||||
fabric = new_fabric
|
|
||||||
|
|
||||||
vlan_list = maas_vlan.Vlans(self.maas_client, fabric_id=new_fabric.resource_id)
|
|
||||||
vlan_list.refresh()
|
|
||||||
vlan = vlan_list.single()
|
|
||||||
|
|
||||||
vlan.name = n.name
|
|
||||||
vlan.vid = n.vlan_id if n.vlan_id is not None else 0
|
|
||||||
if getattr(n, 'mtu', None) is not None:
|
|
||||||
vlan.mtu = n.mtu
|
|
||||||
|
|
||||||
vlan.update()
|
|
||||||
|
|
||||||
if subnet is not None:
|
|
||||||
subnet.vlan = vlan.resource_id
|
|
||||||
subnet.update()
|
|
||||||
|
|
||||||
if subnet is None:
|
|
||||||
subnet = maas_subnet.Subnet(self.maas_client, name=n.name, cidr=n.cidr, fabric=fabric.resource_id,
|
|
||||||
vlan=vlan.resource_id, gateway_ip=n.get_default_gateway())
|
|
||||||
|
|
||||||
subnet_list = maas_subnet.Subnets(self.maas_client)
|
|
||||||
subnet = subnet_list.add(subnet)
|
|
||||||
|
|
||||||
subnet_list = maas_subnet.Subnets(self.maas_client)
|
|
||||||
subnet_list.refresh()
|
|
||||||
|
|
||||||
action_result = hd_fields.ActionResult.Incomplete
|
|
||||||
|
|
||||||
success_rate = 0
|
|
||||||
|
|
||||||
for n in design_networks:
|
|
||||||
exists = subnet_list.query({'cidr': n.cidr})
|
|
||||||
if len(exists) > 0:
|
|
||||||
subnet = exists[0]
|
|
||||||
if subnet.name == n.name:
|
|
||||||
success_rate = success_rate + 1
|
|
||||||
else:
|
|
||||||
success_rate = success_rate + 1
|
|
||||||
else:
|
|
||||||
success_rate = success_rate + 1
|
|
||||||
|
|
||||||
if success_rate == len(design_networks):
|
|
||||||
action_result = hd_fields.ActionResult.Success
|
|
||||||
elif success_rate == - (len(design_networks)):
|
|
||||||
action_result = hd_fields.ActionResult.Failure
|
|
||||||
else:
|
|
||||||
action_result = hd_fields.ActionResult.PartialSuccess
|
|
||||||
|
|
||||||
self.orchestrator.task_field_update(self.task.get_id(),
|
|
||||||
status=hd_fields.TaskStatus.Complete,
|
|
||||||
result=action_result,
|
|
||||||
result_detail=result_detail)
|
|
@ -1,107 +0,0 @@
|
|||||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
# ingester - Ingest host topologies to define site design and
|
|
||||||
# persist design to helm-drydock's statemgmt service
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import yaml
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
import helm_drydock.objects as objects
|
|
||||||
import helm_drydock.objects.site as site
|
|
||||||
import helm_drydock.objects.network as network
|
|
||||||
import helm_drydock.objects.hwprofile as hwprofile
|
|
||||||
import helm_drydock.objects.node as node
|
|
||||||
import helm_drydock.objects.hostprofile as hostprofile
|
|
||||||
|
|
||||||
from helm_drydock.statemgmt import DesignState
|
|
||||||
|
|
||||||
class Ingester(object):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
logging.basicConfig(format="%(asctime)-15s [%(levelname)] %(module)s %(process)d %(message)s")
|
|
||||||
self.log = logging.Logger("ingester")
|
|
||||||
self.registered_plugins = {}
|
|
||||||
|
|
||||||
def enable_plugins(self, plugins=[]):
|
|
||||||
if len(plugins) == 0:
|
|
||||||
self.log.error("Cannot have an empty plugin list.")
|
|
||||||
|
|
||||||
for plugin in plugins:
|
|
||||||
try:
|
|
||||||
new_plugin = plugin()
|
|
||||||
plugin_name = new_plugin.get_name()
|
|
||||||
self.registered_plugins[plugin_name] = new_plugin
|
|
||||||
except:
|
|
||||||
self.log.error("Could not enable plugin %s" % (plugin.__name__))
|
|
||||||
|
|
||||||
if len(self.registered_plugins) == 0:
|
|
||||||
self.log.error("Could not enable at least one plugin")
|
|
||||||
raise Exception("Could not enable at least one plugin")
|
|
||||||
"""
|
|
||||||
enable_plugins
|
|
||||||
|
|
||||||
params: plugins - A list of class objects denoting the ingester plugins to be enabled
|
|
||||||
|
|
||||||
Enable plugins that can be used for ingest_data calls. Each plugin should use
|
|
||||||
helm_drydock.ingester.plugins.IngesterPlugin as its base class. As long as one
|
|
||||||
enabled plugin successfully initializes, the call is considered successful. Otherwise
|
|
||||||
it will throw an exception
|
|
||||||
"""
|
|
||||||
|
|
||||||
def ingest_data(self, plugin_name='', design_state=None, **kwargs):
|
|
||||||
if design_state is None:
|
|
||||||
self.log.error("ingest_data called without valid DesignState handler")
|
|
||||||
raise Exception("Invalid design_state handler")
|
|
||||||
|
|
||||||
design_data = None
|
|
||||||
|
|
||||||
# If no design_id is specified, instantiate a new one
|
|
||||||
if 'design_id' not in kwargs.keys():
|
|
||||||
design_id = str(uuid.uuid4())
|
|
||||||
design_data = objects.SiteDesign(id=design_id)
|
|
||||||
design_state.post_design(design_data)
|
|
||||||
else:
|
|
||||||
design_id = kwargs.get('design_id')
|
|
||||||
design_data = design_state.get_design(design_id)
|
|
||||||
|
|
||||||
if plugin_name in self.registered_plugins:
|
|
||||||
design_items = self.registered_plugins[plugin_name].ingest_data(**kwargs)
|
|
||||||
for m in design_items:
|
|
||||||
if type(m) is site.Site:
|
|
||||||
design_data.set_site(m)
|
|
||||||
elif type(m) is network.Network:
|
|
||||||
design_data.add_network(m)
|
|
||||||
elif type(m) is network.NetworkLink:
|
|
||||||
design_data.add_network_link(m)
|
|
||||||
elif type(m) is hostprofile.HostProfile:
|
|
||||||
design_data.add_host_profile(m)
|
|
||||||
elif type(m) is hwprofile.HardwareProfile:
|
|
||||||
design_data.add_hardware_profile(m)
|
|
||||||
elif type(m) is node.BaremetalNode:
|
|
||||||
design_data.add_baremetal_node(m)
|
|
||||||
design_state.put_design(design_data)
|
|
||||||
else:
|
|
||||||
self.log.error("Could not find plugin %s to ingest data." % (plugin_name))
|
|
||||||
raise LookupError("Could not find plugin %s" % plugin_name)
|
|
||||||
"""
|
|
||||||
ingest_data
|
|
||||||
|
|
||||||
params: plugin_name - Which plugin should be used for ingestion
|
|
||||||
params: params - A map of parameters that will be passed to the plugin's ingest_data method
|
|
||||||
|
|
||||||
Execute a data ingestion using the named plugin (assuming it is enabled)
|
|
||||||
"""
|
|
||||||
|
|
33
setup.py
33
setup.py
@ -12,7 +12,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
# helm_drydock - A tool to consume a host topology and orchestrate
|
# drydock_provisioner - A tool to consume a host topology and orchestrate
|
||||||
# and monitor the provisioning of those hosts and execution of bootstrap
|
# and monitor the provisioning of those hosts and execution of bootstrap
|
||||||
# scripts
|
# scripts
|
||||||
#
|
#
|
||||||
@ -32,26 +32,27 @@
|
|||||||
|
|
||||||
from setuptools import setup
|
from setuptools import setup
|
||||||
|
|
||||||
setup(name='helm_drydock',
|
setup(name='drydock_provisioner',
|
||||||
version='0.1a1',
|
version='0.1a1',
|
||||||
description='Bootstrapper for Kubernetes infrastructure',
|
description='Bootstrapper for Kubernetes infrastructure',
|
||||||
url='http://github.com/att-comdev/drydock',
|
url='http://github.com/att-comdev/drydock',
|
||||||
author='Scott Hussey - AT&T',
|
author='Scott Hussey - AT&T',
|
||||||
author_email='sh8121@att.com',
|
author_email='sh8121@att.com',
|
||||||
license='Apache 2.0',
|
license='Apache 2.0',
|
||||||
packages=['helm_drydock',
|
packages=['drydock_provisioner',
|
||||||
'helm_drydock.objects',
|
'drydock_provisioner.objects',
|
||||||
'helm_drydock.ingester',
|
'drydock_provisioner.ingester',
|
||||||
'helm_drydock.ingester.plugins',
|
'drydock_provisioner.ingester.plugins',
|
||||||
'helm_drydock.statemgmt',
|
'drydock_provisioner.statemgmt',
|
||||||
'helm_drydock.orchestrator',
|
'drydock_provisioner.orchestrator',
|
||||||
'helm_drydock.control',
|
'drydock_provisioner.control',
|
||||||
'helm_drydock.drivers',
|
'drydock_provisioner.drivers',
|
||||||
'helm_drydock.drivers.oob',
|
'drydock_provisioner.drivers.oob',
|
||||||
'helm_drydock.drivers.oob.pyghmi_driver',
|
'drydock_provisioner.drivers.oob.pyghmi_driver',
|
||||||
'helm_drydock.drivers.node',
|
'drydock_provisioner.drivers.node',
|
||||||
'helm_drydock.drivers.node.maasdriver',
|
'drydock_provisioner.drivers.node.maasdriver',
|
||||||
'helm_drydock.drivers.node.maasdriver.models'],
|
'drydock_provisioner.drivers.node.maasdriver.models',
|
||||||
|
'drydock_provisioner.control'],
|
||||||
install_requires=[
|
install_requires=[
|
||||||
'PyYAML',
|
'PyYAML',
|
||||||
'pyghmi>=1.0.18',
|
'pyghmi>=1.0.18',
|
||||||
@ -60,6 +61,8 @@ setup(name='helm_drydock',
|
|||||||
'oslo.versionedobjects>=1.23.0',
|
'oslo.versionedobjects>=1.23.0',
|
||||||
'requests',
|
'requests',
|
||||||
'oauthlib',
|
'oauthlib',
|
||||||
|
'uwsgi>1.4',
|
||||||
|
'bson===0.4.7'
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -13,8 +13,8 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import helm_drydock.config as config
|
import drydock_provisioner.config as config
|
||||||
import helm_drydock.drivers.node.maasdriver.api_client as client
|
import drydock_provisioner.drivers.node.maasdriver.api_client as client
|
||||||
|
|
||||||
class TestClass(object):
|
class TestClass(object):
|
||||||
|
|
||||||
|
@ -14,10 +14,10 @@
|
|||||||
import json
|
import json
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
import helm_drydock.config as config
|
import drydock_provisioner.config as config
|
||||||
import helm_drydock.drivers.node.maasdriver.api_client as client
|
import drydock_provisioner.drivers.node.maasdriver.api_client as client
|
||||||
import helm_drydock.drivers.node.maasdriver.models.fabric as maas_fabric
|
import drydock_provisioner.drivers.node.maasdriver.models.fabric as maas_fabric
|
||||||
import helm_drydock.drivers.node.maasdriver.models.subnet as maas_subnet
|
import drydock_provisioner.drivers.node.maasdriver.models.subnet as maas_subnet
|
||||||
|
|
||||||
class TestClass(object):
|
class TestClass(object):
|
||||||
|
|
||||||
|
@ -17,23 +17,23 @@ import shutil
|
|||||||
import os
|
import os
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
import helm_drydock.config as config
|
import drydock_provisioner.config as config
|
||||||
import helm_drydock.drivers.node.maasdriver.api_client as client
|
import drydock_provisioner.drivers.node.maasdriver.api_client as client
|
||||||
import helm_drydock.ingester.plugins.yaml
|
import drydock_provisioner.ingester.plugins.yaml
|
||||||
import helm_drydock.statemgmt as statemgmt
|
import drydock_provisioner.statemgmt as statemgmt
|
||||||
import helm_drydock.objects as objects
|
import drydock_provisioner.objects as objects
|
||||||
import helm_drydock.orchestrator as orch
|
import drydock_provisioner.orchestrator as orch
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
import helm_drydock.objects.task as task
|
import drydock_provisioner.objects.task as task
|
||||||
import helm_drydock.drivers as drivers
|
import drydock_provisioner.drivers as drivers
|
||||||
from helm_drydock.ingester import Ingester
|
from drydock_provisioner.ingester import Ingester
|
||||||
|
|
||||||
class TestClass(object):
|
class TestClass(object):
|
||||||
|
|
||||||
def test_client_verify(self):
|
def test_client_verify(self):
|
||||||
design_state = statemgmt.DesignState()
|
design_state = statemgmt.DesignState()
|
||||||
orchestrator = orch.Orchestrator(state_manager=design_state,
|
orchestrator = orch.Orchestrator(state_manager=design_state,
|
||||||
enabled_drivers={'node': 'helm_drydock.drivers.node.maasdriver.driver.MaasNodeDriver'})
|
enabled_drivers={'node': 'drydock_provisioner.drivers.node.maasdriver.driver.MaasNodeDriver'})
|
||||||
|
|
||||||
orch_task = orchestrator.create_task(task.OrchestratorTask,
|
orch_task = orchestrator.create_task(task.OrchestratorTask,
|
||||||
site='sitename',
|
site='sitename',
|
||||||
@ -57,14 +57,14 @@ class TestClass(object):
|
|||||||
design_state.post_design(design_data)
|
design_state.post_design(design_data)
|
||||||
|
|
||||||
ingester = Ingester()
|
ingester = Ingester()
|
||||||
ingester.enable_plugins([helm_drydock.ingester.plugins.yaml.YamlIngester])
|
ingester.enable_plugins([drydock_provisioner.ingester.plugins.yaml.YamlIngester])
|
||||||
ingester.ingest_data(plugin_name='yaml', design_state=design_state,
|
ingester.ingest_data(plugin_name='yaml', design_state=design_state,
|
||||||
filenames=[str(input_file)], design_id=design_id)
|
filenames=[str(input_file)], design_id=design_id)
|
||||||
|
|
||||||
design_data = design_state.get_design(design_id)
|
design_data = design_state.get_design(design_id)
|
||||||
|
|
||||||
orchestrator = orch.Orchestrator(state_manager=design_state,
|
orchestrator = orch.Orchestrator(state_manager=design_state,
|
||||||
enabled_drivers={'node': 'helm_drydock.drivers.node.maasdriver.driver.MaasNodeDriver'})
|
enabled_drivers={'node': 'drydock_provisioner.drivers.node.maasdriver.driver.MaasNodeDriver'})
|
||||||
|
|
||||||
orch_task = orchestrator.create_task(task.OrchestratorTask,
|
orch_task = orchestrator.create_task(task.OrchestratorTask,
|
||||||
site='sitename',
|
site='sitename',
|
||||||
|
@ -12,16 +12,16 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from helm_drydock.ingester import Ingester
|
from drydock_provisioner.ingester import Ingester
|
||||||
from helm_drydock.statemgmt import DesignState
|
from drydock_provisioner.statemgmt import DesignState
|
||||||
from helm_drydock.orchestrator import Orchestrator
|
from drydock_provisioner.orchestrator import Orchestrator
|
||||||
|
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import shutil
|
import shutil
|
||||||
import os
|
import os
|
||||||
import helm_drydock.ingester.plugins.yaml
|
import drydock_provisioner.ingester.plugins.yaml
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
class TestClass(object):
|
class TestClass(object):
|
||||||
@ -32,7 +32,7 @@ class TestClass(object):
|
|||||||
|
|
||||||
def test_design_inheritance(self, loaded_design):
|
def test_design_inheritance(self, loaded_design):
|
||||||
orchestrator = Orchestrator(state_manager=loaded_design,
|
orchestrator = Orchestrator(state_manager=loaded_design,
|
||||||
enabled_drivers={'oob': 'helm_drydock.drivers.oob.pyghmi_driver.PyghmiDriver'})
|
enabled_drivers={'oob': 'drydock_provisioner.drivers.oob.pyghmi_driver.PyghmiDriver'})
|
||||||
|
|
||||||
design_data = orchestrator.load_design_data("sitename")
|
design_data = orchestrator.load_design_data("sitename")
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ class TestClass(object):
|
|||||||
design_state.post_design_base(design_data)
|
design_state.post_design_base(design_data)
|
||||||
|
|
||||||
ingester = Ingester()
|
ingester = Ingester()
|
||||||
ingester.enable_plugins([helm_drydock.ingester.plugins.yaml.YamlIngester])
|
ingester.enable_plugins([drydock_provisioner.ingester.plugins.yaml.YamlIngester])
|
||||||
ingester.ingest_data(plugin_name='yaml', design_state=design_state, filenames=[str(input_file)])
|
ingester.ingest_data(plugin_name='yaml', design_state=design_state, filenames=[str(input_file)])
|
||||||
|
|
||||||
return design_state
|
return design_state
|
||||||
|
@ -12,14 +12,14 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from helm_drydock.ingester import Ingester
|
from drydock_provisioner.ingester import Ingester
|
||||||
from helm_drydock.statemgmt import DesignState
|
from drydock_provisioner.statemgmt import DesignState
|
||||||
import helm_drydock.objects as objects
|
import drydock_provisioner.objects as objects
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import shutil
|
import shutil
|
||||||
import os
|
import os
|
||||||
import helm_drydock.ingester.plugins.yaml
|
import drydock_provisioner.ingester.plugins.yaml
|
||||||
|
|
||||||
class TestClass(object):
|
class TestClass(object):
|
||||||
|
|
||||||
@ -37,7 +37,7 @@ class TestClass(object):
|
|||||||
design_state.post_design(design_data)
|
design_state.post_design(design_data)
|
||||||
|
|
||||||
ingester = Ingester()
|
ingester = Ingester()
|
||||||
ingester.enable_plugins([helm_drydock.ingester.plugins.yaml.YamlIngester])
|
ingester.enable_plugins([drydock_provisioner.ingester.plugins.yaml.YamlIngester])
|
||||||
ingester.ingest_data(plugin_name='yaml', design_state=design_state,
|
ingester.ingest_data(plugin_name='yaml', design_state=design_state,
|
||||||
filenames=[str(input_file)], design_id=design_id)
|
filenames=[str(input_file)], design_id=design_id)
|
||||||
|
|
||||||
@ -59,7 +59,7 @@ class TestClass(object):
|
|||||||
design_state.post_design(design_data)
|
design_state.post_design(design_data)
|
||||||
|
|
||||||
ingester = Ingester()
|
ingester = Ingester()
|
||||||
ingester.enable_plugins([helm_drydock.ingester.plugins.yaml.YamlIngester])
|
ingester.enable_plugins([drydock_provisioner.ingester.plugins.yaml.YamlIngester])
|
||||||
ingester.ingest_data(plugin_name='yaml', design_state=design_state, design_id=design_id,
|
ingester.ingest_data(plugin_name='yaml', design_state=design_state, design_id=design_id,
|
||||||
filenames=[str(profiles_file), str(networks_file), str(nodes_file)])
|
filenames=[str(profiles_file), str(networks_file), str(nodes_file)])
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ import shutil
|
|||||||
import os
|
import os
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from helm_drydock.ingester.plugins.yaml import YamlIngester
|
from drydock_provisioner.ingester.plugins.yaml import YamlIngester
|
||||||
|
|
||||||
class TestClass(object):
|
class TestClass(object):
|
||||||
|
|
||||||
|
@ -14,8 +14,8 @@
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
import helm_drydock.objects as objects
|
import drydock_provisioner.objects as objects
|
||||||
from helm_drydock.objects import fields
|
from drydock_provisioner.objects import fields
|
||||||
|
|
||||||
class TestClass(object):
|
class TestClass(object):
|
||||||
|
|
||||||
@ -23,7 +23,7 @@ class TestClass(object):
|
|||||||
objects.register_all()
|
objects.register_all()
|
||||||
|
|
||||||
model_attr = {
|
model_attr = {
|
||||||
'versioned_object.namespace': 'helm_drydock.objects',
|
'versioned_object.namespace': 'drydock_provisioner.objects',
|
||||||
'versioned_object.name': 'HardwareProfile',
|
'versioned_object.name': 'HardwareProfile',
|
||||||
'versioned_object.version': '1.0',
|
'versioned_object.version': '1.0',
|
||||||
'versioned_object.data': {
|
'versioned_object.data': {
|
||||||
@ -38,13 +38,13 @@ class TestClass(object):
|
|||||||
'bootstrap_protocol': 'pxe',
|
'bootstrap_protocol': 'pxe',
|
||||||
'pxe_interface': '0',
|
'pxe_interface': '0',
|
||||||
'devices': {
|
'devices': {
|
||||||
'versioned_object.namespace': 'helm_drydock.objects',
|
'versioned_object.namespace': 'drydock_provisioner.objects',
|
||||||
'versioned_object.name': 'HardwareDeviceAliasList',
|
'versioned_object.name': 'HardwareDeviceAliasList',
|
||||||
'versioned_object.version': '1.0',
|
'versioned_object.version': '1.0',
|
||||||
'versioned_object.data': {
|
'versioned_object.data': {
|
||||||
'objects': [
|
'objects': [
|
||||||
{
|
{
|
||||||
'versioned_object.namespace': 'helm_drydock.objects',
|
'versioned_object.namespace': 'drydock_provisioner.objects',
|
||||||
'versioned_object.name': 'HardwareDeviceAlias',
|
'versioned_object.name': 'HardwareDeviceAlias',
|
||||||
'versioned_object.version': '1.0',
|
'versioned_object.version': '1.0',
|
||||||
'versioned_object.data': {
|
'versioned_object.data': {
|
||||||
@ -56,7 +56,7 @@ class TestClass(object):
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'versioned_object.namespace': 'helm_drydock.objects',
|
'versioned_object.namespace': 'drydock_provisioner.objects',
|
||||||
'versioned_object.name': 'HardwareDeviceAlias',
|
'versioned_object.name': 'HardwareDeviceAlias',
|
||||||
'versioned_object.version': '1.0',
|
'versioned_object.version': '1.0',
|
||||||
'versioned_object.data': {
|
'versioned_object.data': {
|
||||||
|
@ -18,11 +18,11 @@
|
|||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
|
|
||||||
import helm_drydock.orchestrator as orch
|
import drydock_provisioner.orchestrator as orch
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
import helm_drydock.statemgmt as statemgmt
|
import drydock_provisioner.statemgmt as statemgmt
|
||||||
import helm_drydock.objects.task as task
|
import drydock_provisioner.objects.task as task
|
||||||
import helm_drydock.drivers as drivers
|
import drydock_provisioner.drivers as drivers
|
||||||
|
|
||||||
|
|
||||||
class TestClass(object):
|
class TestClass(object):
|
||||||
|
@ -23,15 +23,15 @@ import os
|
|||||||
import shutil
|
import shutil
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from helm_drydock.ingester import Ingester
|
from drydock_provisioner.ingester import Ingester
|
||||||
|
|
||||||
import helm_drydock.orchestrator as orch
|
import drydock_provisioner.orchestrator as orch
|
||||||
import helm_drydock.objects.fields as hd_fields
|
import drydock_provisioner.objects.fields as hd_fields
|
||||||
import helm_drydock.statemgmt as statemgmt
|
import drydock_provisioner.statemgmt as statemgmt
|
||||||
import helm_drydock.objects as objects
|
import drydock_provisioner.objects as objects
|
||||||
import helm_drydock.objects.task as task
|
import drydock_provisioner.objects.task as task
|
||||||
import helm_drydock.drivers as drivers
|
import drydock_provisioner.drivers as drivers
|
||||||
import helm_drydock.ingester.plugins.yaml as yaml_ingester
|
import drydock_provisioner.ingester.plugins.yaml as yaml_ingester
|
||||||
|
|
||||||
class TestClass(object):
|
class TestClass(object):
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ class TestClass(object):
|
|||||||
#mocker.patch.object('pyghmi.ipmi.command.Command','get_asset_tag')
|
#mocker.patch.object('pyghmi.ipmi.command.Command','get_asset_tag')
|
||||||
|
|
||||||
orchestrator = orch.Orchestrator(state_manager=loaded_design,
|
orchestrator = orch.Orchestrator(state_manager=loaded_design,
|
||||||
enabled_drivers={'oob': 'helm_drydock.drivers.oob.pyghmi_driver.PyghmiDriver'})
|
enabled_drivers={'oob': 'drydock_provisioner.drivers.oob.pyghmi_driver.PyghmiDriver'})
|
||||||
|
|
||||||
orch_task = orchestrator.create_task(task.OrchestratorTask,
|
orch_task = orchestrator.create_task(task.OrchestratorTask,
|
||||||
site='sitename',
|
site='sitename',
|
||||||
@ -63,7 +63,7 @@ class TestClass(object):
|
|||||||
#mocker.patch.object('pyghmi.ipmi.command.Command','set_bootdev')
|
#mocker.patch.object('pyghmi.ipmi.command.Command','set_bootdev')
|
||||||
|
|
||||||
orchestrator = orch.Orchestrator(state_manager=loaded_design,
|
orchestrator = orch.Orchestrator(state_manager=loaded_design,
|
||||||
enabled_drivers={'oob': 'helm_drydock.drivers.oob.pyghmi_driver.PyghmiDriver'})
|
enabled_drivers={'oob': 'drydock_provisioner.drivers.oob.pyghmi_driver.PyghmiDriver'})
|
||||||
|
|
||||||
orch_task = orchestrator.create_task(task.OrchestratorTask,
|
orch_task = orchestrator.create_task(task.OrchestratorTask,
|
||||||
site='sitename',
|
site='sitename',
|
||||||
|
@ -15,8 +15,8 @@ import pytest
|
|||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
|
||||||
import helm_drydock.objects as objects
|
import drydock_provisioner.objects as objects
|
||||||
import helm_drydock.statemgmt as statemgmt
|
import drydock_provisioner.statemgmt as statemgmt
|
||||||
|
|
||||||
class TestClass(object):
|
class TestClass(object):
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user