Merge "Enforce flake8 E123/6/7/8 in ironic"
This commit is contained in:
commit
309707bab0
@ -27,7 +27,7 @@ API_SERVICE_OPTS = [
|
||||
default=1000,
|
||||
help='The maximum number of items returned in a single '
|
||||
'response from a collection resource.'),
|
||||
]
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
opt_group = cfg.OptGroup(name='api',
|
||||
|
@ -24,7 +24,8 @@ from ironic.api import hooks
|
||||
from ironic.api import middleware
|
||||
|
||||
api_opts = [
|
||||
cfg.StrOpt('auth_strategy',
|
||||
cfg.StrOpt(
|
||||
'auth_strategy',
|
||||
default='keystone',
|
||||
help='Authentication strategy used by ironic-api: one of "keystone" '
|
||||
'or "noauth". "noauth" should not be used in a production '
|
||||
@ -33,7 +34,7 @@ api_opts = [
|
||||
default=False,
|
||||
help=('Enable pecan debug mode. WARNING: this is insecure '
|
||||
'and should not be used in a production environment.')),
|
||||
]
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(api_opts)
|
||||
|
@ -70,8 +70,8 @@ class Version(object):
|
||||
:param latest_version: version to use if latest is requested
|
||||
:raises: webob.HTTPNotAcceptable
|
||||
"""
|
||||
(self.major, self.minor) = Version.parse_headers(headers,
|
||||
default_version, latest_version)
|
||||
(self.major, self.minor) = Version.parse_headers(
|
||||
headers, default_version, latest_version)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s.%s' % (self.major, self.minor)
|
||||
|
@ -122,9 +122,9 @@ class V1(base.APIBase):
|
||||
v1.chassis = [link.Link.make_link('self', pecan.request.host_url,
|
||||
'chassis', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
pecan.request.host_url,
|
||||
'chassis', '',
|
||||
bookmark=True)
|
||||
pecan.request.host_url,
|
||||
'chassis', '',
|
||||
bookmark=True)
|
||||
]
|
||||
v1.nodes = [link.Link.make_link('self', pecan.request.host_url,
|
||||
'nodes', ''),
|
||||
@ -173,15 +173,17 @@ class Controller(rest.RestController):
|
||||
raise exc.HTTPNotAcceptable(_(
|
||||
"Mutually exclusive versions requested. Version %(ver)s "
|
||||
"requested but not supported by this service. The supported "
|
||||
"version range is: [%(min)s, %(max)s].") % {'ver': version,
|
||||
'min': MIN_VER_STR, 'max': MAX_VER_STR}, headers=headers)
|
||||
"version range is: [%(min)s, %(max)s].") %
|
||||
{'ver': version, 'min': MIN_VER_STR, 'max': MAX_VER_STR},
|
||||
headers=headers)
|
||||
# ensure the minor version is within the supported range
|
||||
if version < MIN_VER or version > MAX_VER:
|
||||
raise exc.HTTPNotAcceptable(_(
|
||||
"Version %(ver)s was requested but the minor version is not "
|
||||
"supported by this service. The supported version range is: "
|
||||
"[%(min)s, %(max)s].") % {'ver': version, 'min': MIN_VER_STR,
|
||||
'max': MAX_VER_STR}, headers=headers)
|
||||
"[%(min)s, %(max)s].") %
|
||||
{'ver': version, 'min': MIN_VER_STR, 'max': MAX_VER_STR},
|
||||
headers=headers)
|
||||
|
||||
@pecan.expose()
|
||||
def _route(self, args):
|
||||
|
@ -160,9 +160,9 @@ class ChassisController(rest.RestController):
|
||||
marker)
|
||||
|
||||
if sort_key in self.invalid_sort_key_list:
|
||||
raise exception.InvalidParameterValue(_(
|
||||
"The sort_key value %(key)s is an invalid field for sorting")
|
||||
% {'key': sort_key})
|
||||
raise exception.InvalidParameterValue(
|
||||
_("The sort_key value %(key)s is an invalid field for sorting")
|
||||
% {'key': sort_key})
|
||||
|
||||
chassis = objects.Chassis.list(pecan.request.context, limit,
|
||||
marker_obj, sort_key=sort_key,
|
||||
@ -174,7 +174,7 @@ class ChassisController(rest.RestController):
|
||||
sort_dir=sort_dir)
|
||||
|
||||
@expose.expose(ChassisCollection, types.uuid,
|
||||
int, wtypes.text, wtypes.text)
|
||||
int, wtypes.text, wtypes.text)
|
||||
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||
"""Retrieve a list of chassis.
|
||||
|
||||
@ -186,7 +186,7 @@ class ChassisController(rest.RestController):
|
||||
return self._get_chassis_collection(marker, limit, sort_key, sort_dir)
|
||||
|
||||
@expose.expose(ChassisCollection, types.uuid, int,
|
||||
wtypes.text, wtypes.text)
|
||||
wtypes.text, wtypes.text)
|
||||
def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||
"""Retrieve a list of chassis with detail.
|
||||
|
||||
@ -239,8 +239,9 @@ class ChassisController(rest.RestController):
|
||||
rpc_chassis = objects.Chassis.get_by_uuid(pecan.request.context,
|
||||
chassis_uuid)
|
||||
try:
|
||||
chassis = Chassis(**api_utils.apply_jsonpatch(
|
||||
rpc_chassis.as_dict(), patch))
|
||||
chassis = Chassis(
|
||||
**api_utils.apply_jsonpatch(rpc_chassis.as_dict(), patch))
|
||||
|
||||
except api_utils.JSONPATCH_EXCEPTIONS as e:
|
||||
raise exception.PatchError(patch=patch, reason=e)
|
||||
|
||||
|
@ -41,8 +41,8 @@ class Collection(base.APIBase):
|
||||
resource_url = url or self._type
|
||||
q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs])
|
||||
next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % {
|
||||
'args': q_args, 'limit': limit,
|
||||
'marker': self.collection[-1].uuid}
|
||||
'args': q_args, 'limit': limit,
|
||||
'marker': self.collection[-1].uuid}
|
||||
|
||||
return link.Link.make_link('next', pecan.request.host_url,
|
||||
resource_url, next_args).href
|
||||
|
@ -68,9 +68,9 @@ class Driver(base.APIBase):
|
||||
pecan.request.host_url,
|
||||
'drivers', name),
|
||||
link.Link.make_link('bookmark',
|
||||
pecan.request.host_url,
|
||||
'drivers', name,
|
||||
bookmark=True)
|
||||
pecan.request.host_url,
|
||||
'drivers', name,
|
||||
bookmark=True)
|
||||
]
|
||||
return driver
|
||||
|
||||
@ -127,13 +127,13 @@ class DriverPassthruController(rest.RestController):
|
||||
if driver_name not in _VENDOR_METHODS:
|
||||
topic = pecan.request.rpcapi.get_topic_for_driver(driver_name)
|
||||
ret = pecan.request.rpcapi.get_driver_vendor_passthru_methods(
|
||||
pecan.request.context, driver_name, topic=topic)
|
||||
pecan.request.context, driver_name, topic=topic)
|
||||
_VENDOR_METHODS[driver_name] = ret
|
||||
|
||||
return _VENDOR_METHODS[driver_name]
|
||||
|
||||
@expose.expose(wtypes.text, wtypes.text, wtypes.text,
|
||||
body=wtypes.text)
|
||||
body=wtypes.text)
|
||||
def _default(self, driver_name, method, data=None):
|
||||
"""Call a driver API extension.
|
||||
|
||||
@ -151,8 +151,8 @@ class DriverPassthruController(rest.RestController):
|
||||
http_method = pecan.request.method.upper()
|
||||
topic = pecan.request.rpcapi.get_topic_for_driver(driver_name)
|
||||
ret, is_async = pecan.request.rpcapi.driver_vendor_passthru(
|
||||
pecan.request.context, driver_name, method,
|
||||
http_method, data, topic=topic)
|
||||
pecan.request.context, driver_name, method,
|
||||
http_method, data, topic=topic)
|
||||
status_code = 202 if is_async else 200
|
||||
return wsme.api.Response(ret, status_code=status_code)
|
||||
|
||||
@ -204,7 +204,7 @@ class DriversController(rest.RestController):
|
||||
if driver_name not in _DRIVER_PROPERTIES:
|
||||
topic = pecan.request.rpcapi.get_topic_for_driver(driver_name)
|
||||
properties = pecan.request.rpcapi.get_driver_properties(
|
||||
pecan.request.context, driver_name, topic=topic)
|
||||
pecan.request.context, driver_name, topic=topic)
|
||||
_DRIVER_PROPERTIES[driver_name] = properties
|
||||
|
||||
return _DRIVER_PROPERTIES[driver_name]
|
||||
|
@ -133,7 +133,7 @@ class BootDeviceController(rest.RestController):
|
||||
rpc_node.uuid, topic)
|
||||
|
||||
@expose.expose(None, types.uuid_or_name, wtypes.text, types.boolean,
|
||||
status_code=204)
|
||||
status_code=204)
|
||||
def put(self, node_ident, boot_device, persistent=False):
|
||||
"""Set the boot device for a node.
|
||||
|
||||
@ -226,7 +226,7 @@ class NodeConsoleController(rest.RestController):
|
||||
return ConsoleInfo(console_enabled=console_state, console_info=console)
|
||||
|
||||
@expose.expose(None, types.uuid_or_name, types.boolean,
|
||||
status_code=202)
|
||||
status_code=202)
|
||||
def put(self, node_ident, enabled):
|
||||
"""Start and stop the node console.
|
||||
|
||||
@ -315,7 +315,7 @@ class NodeStatesController(rest.RestController):
|
||||
return NodeStates.convert(rpc_node)
|
||||
|
||||
@expose.expose(None, types.uuid_or_name, wtypes.text,
|
||||
status_code=202)
|
||||
status_code=202)
|
||||
def power(self, node_ident, target):
|
||||
"""Set the power state of the node.
|
||||
|
||||
@ -336,14 +336,14 @@ class NodeStatesController(rest.RestController):
|
||||
ir_states.POWER_OFF,
|
||||
ir_states.REBOOT]:
|
||||
raise exception.InvalidStateRequested(
|
||||
action=target, node=node_ident,
|
||||
state=rpc_node.power_state)
|
||||
action=target, node=node_ident,
|
||||
state=rpc_node.power_state)
|
||||
|
||||
# Don't change power state for nodes in cleaning
|
||||
elif rpc_node.provision_state == ir_states.CLEANING:
|
||||
raise exception.InvalidStateRequested(
|
||||
action=target, node=node_ident,
|
||||
state=rpc_node.provision_state)
|
||||
action=target, node=node_ident,
|
||||
state=rpc_node.provision_state)
|
||||
|
||||
pecan.request.rpcapi.change_node_power_state(pecan.request.context,
|
||||
rpc_node.uuid, target,
|
||||
@ -353,7 +353,7 @@ class NodeStatesController(rest.RestController):
|
||||
pecan.response.location = link.build_url('nodes', url_args)
|
||||
|
||||
@expose.expose(None, types.uuid_or_name, wtypes.text,
|
||||
wtypes.text, status_code=202)
|
||||
wtypes.text, status_code=202)
|
||||
def provision(self, node_ident, target, configdrive=None):
|
||||
"""Asynchronous trigger the provisioning of the node.
|
||||
|
||||
@ -400,8 +400,8 @@ class NodeStatesController(rest.RestController):
|
||||
m.initialize(rpc_node.provision_state)
|
||||
if not m.is_valid_event(ir_states.VERBS.get(target, target)):
|
||||
raise exception.InvalidStateRequested(
|
||||
action=target, node=rpc_node.uuid,
|
||||
state=rpc_node.provision_state)
|
||||
action=target, node=rpc_node.uuid,
|
||||
state=rpc_node.provision_state)
|
||||
|
||||
if configdrive and target != ir_states.ACTIVE:
|
||||
msg = (_('Adding a config drive is only supported when setting '
|
||||
@ -421,14 +421,14 @@ class NodeStatesController(rest.RestController):
|
||||
None, topic)
|
||||
elif target == ir_states.DELETED:
|
||||
pecan.request.rpcapi.do_node_tear_down(
|
||||
pecan.request.context, rpc_node.uuid, topic)
|
||||
pecan.request.context, rpc_node.uuid, topic)
|
||||
elif target == ir_states.VERBS['inspect']:
|
||||
pecan.request.rpcapi.inspect_hardware(
|
||||
pecan.request.context, rpc_node.uuid, topic=topic)
|
||||
elif target in (
|
||||
ir_states.VERBS['manage'], ir_states.VERBS['provide']):
|
||||
pecan.request.rpcapi.do_provisioning_action(
|
||||
pecan.request.context, rpc_node.uuid, target, topic)
|
||||
pecan.request.context, rpc_node.uuid, target, topic)
|
||||
else:
|
||||
msg = (_('The requested action "%(action)s" could not be '
|
||||
'understood.') % {'action': target})
|
||||
@ -622,8 +622,9 @@ class Node(base.APIBase):
|
||||
target_provision_state=ir_states.NOSTATE,
|
||||
reservation=None, driver='fake', driver_info={},
|
||||
driver_internal_info={}, extra={},
|
||||
properties={'memory_mb': '1024', 'local_gb': '10',
|
||||
'cpus': '1'}, updated_at=time, created_at=time,
|
||||
properties={
|
||||
'memory_mb': '1024', 'local_gb': '10', 'cpus': '1'},
|
||||
updated_at=time, created_at=time,
|
||||
provision_updated_at=time, instance_info={},
|
||||
maintenance=False, maintenance_reason=None,
|
||||
inspection_finished_at=None, inspection_started_at=time,
|
||||
@ -685,13 +686,13 @@ class NodeVendorPassthruController(rest.RestController):
|
||||
if rpc_node.driver not in _VENDOR_METHODS:
|
||||
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
|
||||
ret = pecan.request.rpcapi.get_node_vendor_passthru_methods(
|
||||
pecan.request.context, rpc_node.uuid, topic=topic)
|
||||
pecan.request.context, rpc_node.uuid, topic=topic)
|
||||
_VENDOR_METHODS[rpc_node.driver] = ret
|
||||
|
||||
return _VENDOR_METHODS[rpc_node.driver]
|
||||
|
||||
@expose.expose(wtypes.text, types.uuid_or_name, wtypes.text,
|
||||
body=wtypes.text)
|
||||
body=wtypes.text)
|
||||
def _default(self, node_ident, method, data=None):
|
||||
"""Call a vendor extension.
|
||||
|
||||
@ -712,8 +713,8 @@ class NodeVendorPassthruController(rest.RestController):
|
||||
|
||||
http_method = pecan.request.method.upper()
|
||||
ret, is_async = pecan.request.rpcapi.vendor_passthru(
|
||||
pecan.request.context, rpc_node.uuid, method,
|
||||
http_method, data, topic)
|
||||
pecan.request.context, rpc_node.uuid, method,
|
||||
http_method, data, topic)
|
||||
status_code = 202 if is_async else 200
|
||||
return wsme.api.Response(ret, status_code=status_code)
|
||||
|
||||
@ -734,7 +735,7 @@ class NodeMaintenanceController(rest.RestController):
|
||||
rpc_node, topic=topic)
|
||||
|
||||
@expose.expose(None, types.uuid_or_name, wtypes.text,
|
||||
status_code=202)
|
||||
status_code=202)
|
||||
def put(self, node_ident, reason=None):
|
||||
"""Put the node in maintenance mode.
|
||||
|
||||
@ -793,8 +794,8 @@ class NodesController(rest.RestController):
|
||||
maintenance, marker, limit, sort_key, sort_dir,
|
||||
expand=False, resource_url=None):
|
||||
if self.from_chassis and not chassis_uuid:
|
||||
raise exception.MissingParameterValue(_(
|
||||
"Chassis id not specified."))
|
||||
raise exception.MissingParameterValue(
|
||||
_("Chassis id not specified."))
|
||||
|
||||
limit = api_utils.validate_limit(limit)
|
||||
sort_dir = api_utils.validate_sort_dir(sort_dir)
|
||||
@ -805,9 +806,9 @@ class NodesController(rest.RestController):
|
||||
marker)
|
||||
|
||||
if sort_key in self.invalid_sort_key_list:
|
||||
raise exception.InvalidParameterValue(_(
|
||||
"The sort_key value %(key)s is an invalid field for sorting")
|
||||
% {'key': sort_key})
|
||||
raise exception.InvalidParameterValue(
|
||||
_("The sort_key value %(key)s is an invalid field for "
|
||||
"sorting") % {'key': sort_key})
|
||||
|
||||
if instance_uuid:
|
||||
nodes = self._get_nodes_by_instance(instance_uuid)
|
||||
@ -846,9 +847,8 @@ class NodesController(rest.RestController):
|
||||
except exception.InstanceNotFound:
|
||||
return []
|
||||
|
||||
@expose.expose(NodeCollection, types.uuid, types.uuid,
|
||||
types.boolean, types.boolean, types.uuid, int, wtypes.text,
|
||||
wtypes.text)
|
||||
@expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean,
|
||||
types.boolean, types.uuid, int, wtypes.text, wtypes.text)
|
||||
def get_all(self, chassis_uuid=None, instance_uuid=None, associated=None,
|
||||
maintenance=None, marker=None, limit=None, sort_key='id',
|
||||
sort_dir='asc'):
|
||||
@ -873,9 +873,8 @@ class NodesController(rest.RestController):
|
||||
associated, maintenance, marker,
|
||||
limit, sort_key, sort_dir)
|
||||
|
||||
@expose.expose(NodeCollection, types.uuid, types.uuid,
|
||||
types.boolean, types.boolean, types.uuid, int, wtypes.text,
|
||||
wtypes.text)
|
||||
@expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean,
|
||||
types.boolean, types.uuid, int, wtypes.text, wtypes.text)
|
||||
def detail(self, chassis_uuid=None, instance_uuid=None, associated=None,
|
||||
maintenance=None, marker=None, limit=None, sort_key='id',
|
||||
sort_dir='asc'):
|
||||
@ -929,7 +928,7 @@ class NodesController(rest.RestController):
|
||||
|
||||
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
|
||||
return pecan.request.rpcapi.validate_driver_interfaces(
|
||||
pecan.request.context, rpc_node.uuid, topic)
|
||||
pecan.request.context, rpc_node.uuid, topic)
|
||||
|
||||
@expose.expose(Node, types.uuid_or_name)
|
||||
def get_one(self, node_ident):
|
||||
@ -1076,7 +1075,7 @@ class NodesController(rest.RestController):
|
||||
status_code=409)
|
||||
|
||||
new_node = pecan.request.rpcapi.update_node(
|
||||
pecan.request.context, rpc_node, topic)
|
||||
pecan.request.context, rpc_node, topic)
|
||||
|
||||
return Node.convert_with_links(new_node)
|
||||
|
||||
|
@ -182,8 +182,8 @@ class PortsController(rest.RestController):
|
||||
sort_key, sort_dir, expand=False,
|
||||
resource_url=None):
|
||||
if self.from_nodes and not node_ident:
|
||||
raise exception.MissingParameterValue(_(
|
||||
"Node identifier not specified."))
|
||||
raise exception.MissingParameterValue(
|
||||
_("Node identifier not specified."))
|
||||
|
||||
limit = api_utils.validate_limit(limit)
|
||||
sort_dir = api_utils.validate_sort_dir(sort_dir)
|
||||
@ -194,9 +194,9 @@ class PortsController(rest.RestController):
|
||||
marker)
|
||||
|
||||
if sort_key in self.invalid_sort_key_list:
|
||||
raise exception.InvalidParameterValue(_(
|
||||
"The sort_key value %(key)s is an invalid field for sorting"
|
||||
) % {'key': sort_key})
|
||||
raise exception.InvalidParameterValue(
|
||||
_("The sort_key value %(key)s is an invalid field for "
|
||||
"sorting") % {'key': sort_key})
|
||||
|
||||
if node_ident:
|
||||
# FIXME(comstud): Since all we need is the node ID, we can
|
||||
@ -236,8 +236,8 @@ class PortsController(rest.RestController):
|
||||
return []
|
||||
|
||||
@expose.expose(PortCollection, types.uuid_or_name, types.uuid,
|
||||
types.macaddress, types.uuid, int, wtypes.text,
|
||||
wtypes.text)
|
||||
types.macaddress, types.uuid, int, wtypes.text,
|
||||
wtypes.text)
|
||||
def get_all(self, node=None, node_uuid=None, address=None, marker=None,
|
||||
limit=None, sort_key='id', sort_dir='asc'):
|
||||
"""Retrieve a list of ports.
|
||||
@ -268,8 +268,8 @@ class PortsController(rest.RestController):
|
||||
limit, sort_key, sort_dir)
|
||||
|
||||
@expose.expose(PortCollection, types.uuid_or_name, types.uuid,
|
||||
types.macaddress, types.uuid, int, wtypes.text,
|
||||
wtypes.text)
|
||||
types.macaddress, types.uuid, int, wtypes.text,
|
||||
wtypes.text)
|
||||
def detail(self, node=None, node_uuid=None, address=None, marker=None,
|
||||
limit=None, sort_key='id', sort_dir='asc'):
|
||||
"""Retrieve a list of ports with detail.
|
||||
@ -375,7 +375,7 @@ class PortsController(rest.RestController):
|
||||
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
|
||||
|
||||
new_port = pecan.request.rpcapi.update_port(
|
||||
pecan.request.context, rpc_port, topic)
|
||||
pecan.request.context, rpc_port, topic)
|
||||
|
||||
return Port.convert_with_links(new_port)
|
||||
|
||||
|
@ -46,9 +46,9 @@ def main():
|
||||
host = CONF.api.host_ip
|
||||
port = CONF.api.port
|
||||
wsgi = simple_server.make_server(
|
||||
host, port,
|
||||
app.VersionSelectorApplication(),
|
||||
server_class=ThreadedSimpleServer)
|
||||
host, port,
|
||||
app.VersionSelectorApplication(),
|
||||
server_class=ThreadedSimpleServer)
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
LOG.info(_LI("Serving on http://%(host)s:%(port)s"),
|
||||
|
@ -54,17 +54,19 @@ class DBCommand(object):
|
||||
def add_command_parsers(subparsers):
|
||||
command_object = DBCommand()
|
||||
|
||||
parser = subparsers.add_parser('upgrade',
|
||||
help="Upgrade the database schema to the latest version. "
|
||||
"Optionally, use --revision to specify an alembic revision "
|
||||
"string to upgrade to.")
|
||||
parser = subparsers.add_parser(
|
||||
'upgrade',
|
||||
help="Upgrade the database schema to the latest version. "
|
||||
"Optionally, use --revision to specify an alembic revision "
|
||||
"string to upgrade to.")
|
||||
parser.set_defaults(func=command_object.upgrade)
|
||||
parser.add_argument('--revision', nargs='?')
|
||||
|
||||
parser = subparsers.add_parser('downgrade',
|
||||
help="Downgrade the database schema to the oldest revision. "
|
||||
"While optional, one should generally use --revision to "
|
||||
"specify the alembic revision string to downgrade to.")
|
||||
parser = subparsers.add_parser(
|
||||
'downgrade',
|
||||
help="Downgrade the database schema to the oldest revision. "
|
||||
"While optional, one should generally use --revision to "
|
||||
"specify the alembic revision string to downgrade to.")
|
||||
parser.set_defaults(func=command_object.downgrade)
|
||||
parser.add_argument('--revision', nargs='?')
|
||||
|
||||
@ -72,19 +74,22 @@ def add_command_parsers(subparsers):
|
||||
parser.add_argument('--revision', nargs='?')
|
||||
parser.set_defaults(func=command_object.stamp)
|
||||
|
||||
parser = subparsers.add_parser('revision',
|
||||
help="Create a new alembic revision. "
|
||||
"Use --message to set the message string.")
|
||||
parser = subparsers.add_parser(
|
||||
'revision',
|
||||
help="Create a new alembic revision. "
|
||||
"Use --message to set the message string.")
|
||||
parser.add_argument('-m', '--message')
|
||||
parser.add_argument('--autogenerate', action='store_true')
|
||||
parser.set_defaults(func=command_object.revision)
|
||||
|
||||
parser = subparsers.add_parser('version',
|
||||
help="Print the current version information and exit.")
|
||||
parser = subparsers.add_parser(
|
||||
'version',
|
||||
help="Print the current version information and exit.")
|
||||
parser.set_defaults(func=command_object.version)
|
||||
|
||||
parser = subparsers.add_parser('create_schema',
|
||||
help="Create the database schema.")
|
||||
parser = subparsers.add_parser(
|
||||
'create_schema',
|
||||
help="Create the database schema.")
|
||||
parser.set_defaults(func=command_object.create_schema)
|
||||
|
||||
|
||||
|
@ -299,7 +299,7 @@ def _print_type(opt_type, opt_name, opt_default):
|
||||
if opt_type == STROPT:
|
||||
assert(isinstance(opt_default, six.string_types))
|
||||
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
|
||||
opt_default)))
|
||||
opt_default)))
|
||||
elif opt_type == BOOLOPT:
|
||||
assert(isinstance(opt_default, bool))
|
||||
print('#%s=%s' % (opt_name, str(opt_default).lower()))
|
||||
@ -316,7 +316,7 @@ def _print_type(opt_type, opt_name, opt_default):
|
||||
elif opt_type == DICTOPT:
|
||||
assert(isinstance(opt_default, dict))
|
||||
opt_default_strlist = [str(key) + ':' + str(value)
|
||||
for (key, value) in opt_default.items()]
|
||||
for (key, value) in opt_default.items()]
|
||||
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
|
||||
elif opt_type == MULTISTROPT:
|
||||
assert(isinstance(opt_default, list))
|
||||
|
@ -157,8 +157,8 @@ class DiskPartitioner(object):
|
||||
max_retries = CONF.disk_partitioner.check_device_max_retries
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(
|
||||
self._wait_for_disk_to_become_available,
|
||||
retries, max_retries, pids, fuser_err)
|
||||
self._wait_for_disk_to_become_available,
|
||||
retries, max_retries, pids, fuser_err)
|
||||
timer.start(interval=interval).wait()
|
||||
|
||||
if retries[0] > max_retries:
|
||||
|
@ -25,17 +25,17 @@ from ironic.common.i18n import _LI
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
driver_opts = [
|
||||
cfg.ListOpt('enabled_drivers',
|
||||
default=['pxe_ipmitool'],
|
||||
help='Specify the list of drivers to load during service '
|
||||
'initialization. Missing drivers, or drivers which '
|
||||
'fail to initialize, will prevent the conductor '
|
||||
'service from starting. The option default is a '
|
||||
'recommended set of production-oriented drivers. A '
|
||||
'complete list of drivers present on your system may '
|
||||
'be found by enumerating the "ironic.drivers" '
|
||||
'entrypoint. An example may be found in the '
|
||||
'developer documentation online.'),
|
||||
cfg.ListOpt('enabled_drivers',
|
||||
default=['pxe_ipmitool'],
|
||||
help='Specify the list of drivers to load during service '
|
||||
'initialization. Missing drivers, or drivers which '
|
||||
'fail to initialize, will prevent the conductor '
|
||||
'service from starting. The option default is a '
|
||||
'recommended set of production-oriented drivers. A '
|
||||
'complete list of drivers present on your system may '
|
||||
'be found by enumerating the "ironic.drivers" '
|
||||
'entrypoint. An example may be found in the '
|
||||
'developer documentation online.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -119,11 +119,11 @@ class DriverFactory(object):
|
||||
return ext.name in CONF.enabled_drivers
|
||||
|
||||
cls._extension_manager = (
|
||||
dispatch.NameDispatchExtensionManager(
|
||||
'ironic.drivers',
|
||||
_check_func,
|
||||
invoke_on_load=True,
|
||||
on_load_failure_callback=_catch_driver_not_found))
|
||||
dispatch.NameDispatchExtensionManager(
|
||||
'ironic.drivers',
|
||||
_check_func,
|
||||
invoke_on_load=True,
|
||||
on_load_failure_callback=_catch_driver_not_found))
|
||||
|
||||
# NOTE(deva): if we were unable to load any configured driver, perhaps
|
||||
# because it is not present on the system, raise an error.
|
||||
@ -136,7 +136,7 @@ class DriverFactory(object):
|
||||
raise exception.DriverNotFound(driver_name=names)
|
||||
|
||||
LOG.info(_LI("Loaded the following drivers: %s"),
|
||||
cls._extension_manager.names())
|
||||
cls._extension_manager.names())
|
||||
|
||||
@property
|
||||
def names(self):
|
||||
|
@ -74,7 +74,7 @@ class FSM(object):
|
||||
return self._states[self._current.name]['terminal']
|
||||
|
||||
def add_state(self, state, on_enter=None, on_exit=None,
|
||||
target=None, terminal=None, stable=False):
|
||||
target=None, terminal=None, stable=False):
|
||||
"""Adds a given state to the state machine.
|
||||
|
||||
The on_enter and on_exit callbacks, if provided will be expected to
|
||||
@ -100,7 +100,7 @@ class FSM(object):
|
||||
raise ValueError(_("On exit callback must be callable"))
|
||||
if target is not None and target not in self._states:
|
||||
raise excp.InvalidState(_("Target state '%s' does not exist")
|
||||
% target)
|
||||
% target)
|
||||
if target is not None and not self._states[target]['stable']:
|
||||
raise excp.InvalidState(
|
||||
_("Target state '%s' is not a 'stable' state") % target)
|
||||
|
@ -127,13 +127,13 @@ class BaseImageService(object):
|
||||
host = self.glance_host
|
||||
port = self.glance_port
|
||||
error_msg = _LE("Error contacting glance server "
|
||||
"'%(host)s:%(port)s' for '%(method)s', attempt"
|
||||
" %(attempt)s of %(num_attempts)s failed.")
|
||||
"'%(host)s:%(port)s' for '%(method)s', attempt"
|
||||
" %(attempt)s of %(num_attempts)s failed.")
|
||||
LOG.exception(error_msg, {'host': host,
|
||||
'port': port,
|
||||
'num_attempts': num_attempts,
|
||||
'attempt': attempt,
|
||||
'method': method})
|
||||
'port': port,
|
||||
'num_attempts': num_attempts,
|
||||
'attempt': attempt,
|
||||
'method': method})
|
||||
if attempt == num_attempts:
|
||||
raise exception.GlanceConnectionFailed(host=host,
|
||||
port=port,
|
||||
|
@ -83,7 +83,7 @@ class HashRing(object):
|
||||
self.replicas = replicas if replicas <= len(hosts) else len(hosts)
|
||||
except TypeError:
|
||||
raise exception.Invalid(
|
||||
_("Invalid hosts supplied when building HashRing."))
|
||||
_("Invalid hosts supplied when building HashRing."))
|
||||
|
||||
self._host_hashes = {}
|
||||
for host in hosts:
|
||||
@ -114,7 +114,7 @@ class HashRing(object):
|
||||
return position if position < len(self._partitions) else 0
|
||||
except TypeError:
|
||||
raise exception.Invalid(
|
||||
_("Invalid data supplied to HashRing.get_hosts."))
|
||||
_("Invalid data supplied to HashRing.get_hosts."))
|
||||
|
||||
def get_hosts(self, data, ignore_hosts=None):
|
||||
"""Get the list of hosts which the supplied data maps onto.
|
||||
@ -197,4 +197,4 @@ class HashRingManager(object):
|
||||
return self.ring[driver_name]
|
||||
except KeyError:
|
||||
raise exception.DriverNotFound(
|
||||
_("The driver '%s' is unknown.") % driver_name)
|
||||
_("The driver '%s' is unknown.") % driver_name)
|
||||
|
@ -137,7 +137,8 @@ class HttpImageService(BaseImageService):
|
||||
try:
|
||||
response = requests.head(image_href)
|
||||
if response.status_code != 200:
|
||||
raise exception.ImageRefValidationFailed(image_href=image_href,
|
||||
raise exception.ImageRefValidationFailed(
|
||||
image_href=image_href,
|
||||
reason=_("Got HTTP code %s instead of 200 in response to "
|
||||
"HEAD request.") % response.status_code)
|
||||
except requests.RequestException as e:
|
||||
@ -159,7 +160,8 @@ class HttpImageService(BaseImageService):
|
||||
try:
|
||||
response = requests.get(image_href, stream=True)
|
||||
if response.status_code != 200:
|
||||
raise exception.ImageRefValidationFailed(image_href=image_href,
|
||||
raise exception.ImageRefValidationFailed(
|
||||
image_href=image_href,
|
||||
reason=_("Got HTTP code %s instead of 200 in response to "
|
||||
"GET request.") % response.status_code)
|
||||
with response.raw as input_img:
|
||||
@ -181,7 +183,8 @@ class HttpImageService(BaseImageService):
|
||||
response = self.validate_href(image_href)
|
||||
image_size = response.headers.get('Content-Length')
|
||||
if image_size is None:
|
||||
raise exception.ImageRefValidationFailed(image_href=image_href,
|
||||
raise exception.ImageRefValidationFailed(
|
||||
image_href=image_href,
|
||||
reason=_("Cannot determine image size as there is no "
|
||||
"Content-Length header specified in response "
|
||||
"to HEAD request."))
|
||||
@ -204,7 +207,8 @@ class FileImageService(BaseImageService):
|
||||
"""
|
||||
image_path = urlparse.urlparse(image_href).path
|
||||
if not os.path.isfile(image_path):
|
||||
raise exception.ImageRefValidationFailed(image_href=image_href,
|
||||
raise exception.ImageRefValidationFailed(
|
||||
image_href=image_href,
|
||||
reason=_("Specified image file not found."))
|
||||
return image_path
|
||||
|
||||
|
@ -45,14 +45,14 @@ image_opts = [
|
||||
help='If True, convert backing images to "raw" disk image '
|
||||
'format.'),
|
||||
cfg.StrOpt('isolinux_bin',
|
||||
default='/usr/lib/syslinux/isolinux.bin',
|
||||
help='Path to isolinux binary file.'),
|
||||
default='/usr/lib/syslinux/isolinux.bin',
|
||||
help='Path to isolinux binary file.'),
|
||||
cfg.StrOpt('isolinux_config_template',
|
||||
default=paths.basedir_def('common/isolinux_config.template'),
|
||||
help='Template file for isolinux configuration file.'),
|
||||
default=paths.basedir_def('common/isolinux_config.template'),
|
||||
help='Template file for isolinux configuration file.'),
|
||||
cfg.StrOpt('grub_config_template',
|
||||
default=paths.basedir_def('common/grub_conf.template'),
|
||||
help='Template file for grub configuration file.'),
|
||||
default=paths.basedir_def('common/grub_conf.template'),
|
||||
help='Template file for grub configuration file.'),
|
||||
]
|
||||
|
||||
|
||||
@ -141,7 +141,7 @@ def create_vfat_image(output_file, files_info=None, parameters=None,
|
||||
if parameters:
|
||||
parameters_file = os.path.join(tmpdir, parameters_file)
|
||||
params_list = ['%(key)s=%(val)s' % {'key': k, 'val': v}
|
||||
for k, v in parameters.items()]
|
||||
for k, v in parameters.items()]
|
||||
file_contents = '\n'.join(params_list)
|
||||
utils.write_to_file(parameters_file, file_contents)
|
||||
|
||||
@ -209,10 +209,10 @@ def create_isolinux_image_for_bios(output_file, kernel, ramdisk,
|
||||
|
||||
with utils.tempdir() as tmpdir:
|
||||
files_info = {
|
||||
kernel: 'vmlinuz',
|
||||
ramdisk: 'initrd',
|
||||
CONF.isolinux_bin: ISOLINUX_BIN,
|
||||
}
|
||||
kernel: 'vmlinuz',
|
||||
ramdisk: 'initrd',
|
||||
CONF.isolinux_bin: ISOLINUX_BIN,
|
||||
}
|
||||
try:
|
||||
_create_root_fs(tmpdir, files_info)
|
||||
except (OSError, IOError) as e:
|
||||
@ -264,10 +264,10 @@ def create_isolinux_image_for_uefi(output_file, deploy_iso, kernel, ramdisk,
|
||||
|
||||
with utils.tempdir() as tmpdir:
|
||||
files_info = {
|
||||
kernel: 'vmlinuz',
|
||||
ramdisk: 'initrd',
|
||||
CONF.isolinux_bin: ISOLINUX_BIN,
|
||||
}
|
||||
kernel: 'vmlinuz',
|
||||
ramdisk: 'initrd',
|
||||
CONF.isolinux_bin: ISOLINUX_BIN,
|
||||
}
|
||||
|
||||
# Open the deploy iso used to initiate deploy and copy the
|
||||
# efiboot.img i.e. boot loader to the current temporary
|
||||
@ -335,7 +335,7 @@ def fetch(context, image_href, path, image_service=None, force_raw=False):
|
||||
# checked before we got here.
|
||||
if not image_service:
|
||||
image_service = service.get_image_service(image_href,
|
||||
context=context)
|
||||
context=context)
|
||||
LOG.debug("Using %(image_service)s to download image %(image_href)s." %
|
||||
{'image_service': image_service.__class__,
|
||||
'image_href': image_href})
|
||||
@ -355,29 +355,30 @@ def image_to_raw(image_href, path, path_tmp):
|
||||
fmt = data.file_format
|
||||
if fmt is None:
|
||||
raise exception.ImageUnacceptable(
|
||||
reason=_("'qemu-img info' parsing failed."),
|
||||
image_id=image_href)
|
||||
reason=_("'qemu-img info' parsing failed."),
|
||||
image_id=image_href)
|
||||
|
||||
backing_file = data.backing_file
|
||||
if backing_file is not None:
|
||||
raise exception.ImageUnacceptable(image_id=image_href,
|
||||
raise exception.ImageUnacceptable(
|
||||
image_id=image_href,
|
||||
reason=_("fmt=%(fmt)s backed by: %(backing_file)s") %
|
||||
{'fmt': fmt,
|
||||
'backing_file': backing_file})
|
||||
{'fmt': fmt, 'backing_file': backing_file})
|
||||
|
||||
if fmt != "raw":
|
||||
staged = "%s.converted" % path
|
||||
LOG.debug("%(image)s was %(format)s, converting to raw" %
|
||||
{'image': image_href, 'format': fmt})
|
||||
{'image': image_href, 'format': fmt})
|
||||
with fileutils.remove_path_on_error(staged):
|
||||
convert_image(path_tmp, staged, 'raw')
|
||||
os.unlink(path_tmp)
|
||||
|
||||
data = qemu_img_info(staged)
|
||||
if data.file_format != "raw":
|
||||
raise exception.ImageConvertFailed(image_id=image_href,
|
||||
reason=_("Converted to raw, but format is now %s") %
|
||||
data.file_format)
|
||||
raise exception.ImageConvertFailed(
|
||||
image_id=image_href,
|
||||
reason=_("Converted to raw, but format is "
|
||||
"now %s") % data.file_format)
|
||||
|
||||
os.rename(staged, path)
|
||||
else:
|
||||
|
@ -63,11 +63,12 @@ def _get_ksclient(token=None):
|
||||
if token:
|
||||
return client.Client(token=token, auth_url=auth_url)
|
||||
else:
|
||||
return client.Client(username=CONF.keystone_authtoken.admin_user,
|
||||
password=CONF.keystone_authtoken.admin_password,
|
||||
tenant_name=CONF.keystone_authtoken.admin_tenant_name,
|
||||
region_name=CONF.keystone.region_name,
|
||||
auth_url=auth_url)
|
||||
return client.Client(
|
||||
username=CONF.keystone_authtoken.admin_user,
|
||||
password=CONF.keystone_authtoken.admin_password,
|
||||
tenant_name=CONF.keystone_authtoken.admin_tenant_name,
|
||||
region_name=CONF.keystone.region_name,
|
||||
auth_url=auth_url)
|
||||
except ksexception.Unauthorized:
|
||||
raise exception.KeystoneUnauthorized()
|
||||
except ksexception.AuthorizationFailure as err:
|
||||
@ -110,9 +111,10 @@ def get_service_url(service_type='baremetal', endpoint_type='internal'):
|
||||
'loaded'))
|
||||
|
||||
try:
|
||||
endpoint = ksclient.service_catalog.url_for(service_type=service_type,
|
||||
endpoint_type=endpoint_type,
|
||||
region_name=CONF.keystone.region_name)
|
||||
endpoint = ksclient.service_catalog.url_for(
|
||||
service_type=service_type,
|
||||
endpoint_type=endpoint_type,
|
||||
region_name=CONF.keystone.region_name)
|
||||
|
||||
except ksexception.EndpointNotFound:
|
||||
raise exception.CatalogNotFound(service_type=service_type,
|
||||
|
@ -115,7 +115,7 @@ def _link_ip_address_pxe_configs(task):
|
||||
ip_address_path = _get_pxe_ip_address_path(port_ip_address)
|
||||
utils.unlink_without_raise(ip_address_path)
|
||||
utils.create_link_without_raise(pxe_config_file_path,
|
||||
ip_address_path)
|
||||
ip_address_path)
|
||||
|
||||
|
||||
def _get_pxe_mac_path(mac, delimiter=None):
|
||||
|
@ -74,9 +74,9 @@ class RPCService(service.Service):
|
||||
self.handle_signal()
|
||||
self.manager.init_host()
|
||||
self.tg.add_dynamic_timer(
|
||||
self.manager.periodic_tasks,
|
||||
periodic_interval_max=cfg.CONF.periodic_interval,
|
||||
context=admin_context)
|
||||
self.manager.periodic_tasks,
|
||||
periodic_interval_max=cfg.CONF.periodic_interval,
|
||||
context=admin_context)
|
||||
|
||||
LOG.info(_LI('Created RPC server for service %(service)s on host '
|
||||
'%(host)s.'),
|
||||
|
@ -40,11 +40,11 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
# TODO(deva): add add'l state mappings here
|
||||
VERBS = {
|
||||
'active': 'deploy',
|
||||
'deleted': 'delete',
|
||||
'manage': 'manage',
|
||||
'provide': 'provide',
|
||||
'inspect': 'inspect',
|
||||
'active': 'deploy',
|
||||
'deleted': 'delete',
|
||||
'manage': 'manage',
|
||||
'provide': 'provide',
|
||||
'inspect': 'inspect',
|
||||
}
|
||||
""" Mapping of state-changing events that are PUT to the REST API
|
||||
|
||||
@ -175,7 +175,7 @@ REBOOT = 'rebooting'
|
||||
def on_exit(old_state, event):
|
||||
"""Used to log when a state is exited."""
|
||||
LOG.debug("Exiting old state '%s' in response to event '%s'",
|
||||
old_state, event)
|
||||
old_state, event)
|
||||
|
||||
|
||||
def on_enter(new_state, event):
|
||||
|
@ -30,7 +30,7 @@ swift_opts = [
|
||||
default=2,
|
||||
help='Maximum number of times to retry a Swift request, '
|
||||
'before failing.')
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
@ -446,7 +446,7 @@ def unlink_without_raise(path):
|
||||
return
|
||||
else:
|
||||
LOG.warn(_LW("Failed to unlink %(path)s, error: %(e)s"),
|
||||
{'path': path, 'e': e})
|
||||
{'path': path, 'e': e})
|
||||
|
||||
|
||||
def rmtree_without_raise(path):
|
||||
@ -455,7 +455,7 @@ def rmtree_without_raise(path):
|
||||
shutil.rmtree(path)
|
||||
except OSError as e:
|
||||
LOG.warn(_LW("Failed to remove dir %(path)s, error: %(e)s"),
|
||||
{'path': path, 'e': e})
|
||||
{'path': path, 'e': e})
|
||||
|
||||
|
||||
def write_to_file(path, contents):
|
||||
@ -472,7 +472,7 @@ def create_link_without_raise(source, link):
|
||||
else:
|
||||
LOG.warn(_LW("Failed to create symlink from %(source)s to %(link)s"
|
||||
", error: %(e)s"),
|
||||
{'source': source, 'link': link, 'e': e})
|
||||
{'source': source, 'link': link, 'e': e})
|
||||
|
||||
|
||||
def safe_rstrip(value, chars=None):
|
||||
@ -558,7 +558,7 @@ def check_dir(directory_to_check=None, required_space=1):
|
||||
# check if directory_to_check is passed in, if not set to tempdir
|
||||
if directory_to_check is None:
|
||||
directory_to_check = (tempfile.gettempdir() if CONF.tempdir
|
||||
is None else CONF.tempdir)
|
||||
is None else CONF.tempdir)
|
||||
|
||||
LOG.debug("checking directory: %s", directory_to_check)
|
||||
|
||||
|
@ -83,107 +83,106 @@ WORKER_SPAWN_lOCK = "conductor_worker_spawn"
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
conductor_opts = [
|
||||
cfg.StrOpt('api_url',
|
||||
help=('URL of Ironic API service. If not set ironic can '
|
||||
'get the current value from the keystone service '
|
||||
'catalog.')),
|
||||
cfg.IntOpt('heartbeat_interval',
|
||||
default=10,
|
||||
help='Seconds between conductor heart beats.'),
|
||||
cfg.IntOpt('heartbeat_timeout',
|
||||
default=60,
|
||||
help='Maximum time (in seconds) since the last check-in '
|
||||
'of a conductor. A conductor is considered inactive '
|
||||
'when this time has been exceeded.'),
|
||||
cfg.IntOpt('sync_power_state_interval',
|
||||
default=60,
|
||||
help='Interval between syncing the node power state to the '
|
||||
'database, in seconds.'),
|
||||
cfg.IntOpt('check_provision_state_interval',
|
||||
default=60,
|
||||
help='Interval between checks of provision timeouts, '
|
||||
'in seconds.'),
|
||||
cfg.IntOpt('deploy_callback_timeout',
|
||||
default=1800,
|
||||
help='Timeout (seconds) to wait for a callback from '
|
||||
'a deploy ramdisk. Set to 0 to disable timeout.'),
|
||||
cfg.BoolOpt('force_power_state_during_sync',
|
||||
default=True,
|
||||
help='During sync_power_state, should the hardware power '
|
||||
'state be set to the state recorded in the database '
|
||||
'(True) or should the database be updated based on '
|
||||
'the hardware state (False).'),
|
||||
cfg.IntOpt('power_state_sync_max_retries',
|
||||
default=3,
|
||||
help='During sync_power_state failures, limit the '
|
||||
'number of times Ironic should try syncing the '
|
||||
'hardware node power state with the node power state '
|
||||
'in DB'),
|
||||
cfg.IntOpt('periodic_max_workers',
|
||||
default=8,
|
||||
help='Maximum number of worker threads that can be started '
|
||||
'simultaneously by a periodic task. Should be less '
|
||||
'than RPC thread pool size.'),
|
||||
cfg.IntOpt('workers_pool_size',
|
||||
default=100,
|
||||
help='The size of the workers greenthread pool.'),
|
||||
cfg.IntOpt('node_locked_retry_attempts',
|
||||
default=3,
|
||||
help='Number of attempts to grab a node lock.'),
|
||||
cfg.IntOpt('node_locked_retry_interval',
|
||||
default=1,
|
||||
help='Seconds to sleep between node lock attempts.'),
|
||||
cfg.BoolOpt('send_sensor_data',
|
||||
default=False,
|
||||
help='Enable sending sensor data message via the '
|
||||
'notification bus'),
|
||||
cfg.IntOpt('send_sensor_data_interval',
|
||||
default=600,
|
||||
help='Seconds between conductor sending sensor data message'
|
||||
' to ceilometer via the notification bus.'),
|
||||
cfg.ListOpt('send_sensor_data_types',
|
||||
default=['ALL'],
|
||||
help='List of comma separated meter types which need to be'
|
||||
' sent to Ceilometer. The default value, "ALL", is a '
|
||||
'special value meaning send all the sensor data.'
|
||||
),
|
||||
cfg.IntOpt('sync_local_state_interval',
|
||||
default=180,
|
||||
help='When conductors join or leave the cluster, existing '
|
||||
'conductors may need to update any persistent '
|
||||
'local state as nodes are moved around the cluster. '
|
||||
'This option controls how often, in seconds, each '
|
||||
'conductor will check for nodes that it should '
|
||||
'"take over". Set it to a negative value to disable '
|
||||
'the check entirely.'),
|
||||
cfg.BoolOpt('configdrive_use_swift',
|
||||
default=False,
|
||||
help='Whether to upload the config drive to Swift.'),
|
||||
cfg.StrOpt('configdrive_swift_container',
|
||||
default='ironic_configdrive_container',
|
||||
help='Name of the Swift container to store config drive '
|
||||
'data. Used when configdrive_use_swift is True.'),
|
||||
cfg.IntOpt('inspect_timeout',
|
||||
default=1800,
|
||||
help='Timeout (seconds) for waiting for node inspection. '
|
||||
'0 - unlimited.'),
|
||||
cfg.BoolOpt('clean_nodes',
|
||||
default=True,
|
||||
help='Cleaning is a configurable set of steps, such as '
|
||||
'erasing disk drives, that are performed on the node '
|
||||
'to ensure it is in a baseline state and ready to be '
|
||||
'deployed to. '
|
||||
'This is done after instance deletion, and during '
|
||||
'the transition from a "managed" to "available" '
|
||||
'state. When enabled, the particular steps '
|
||||
'performed to clean a node depend on which driver '
|
||||
'that node is managed by; see the individual '
|
||||
'driver\'s documentation for details. '
|
||||
'NOTE: The introduction of the cleaning operation '
|
||||
'causes instance deletion to take significantly '
|
||||
'longer. In an environment where all tenants are '
|
||||
'trusted (eg, because there is only one tenant), '
|
||||
'this option could be safely disabled.'),
|
||||
cfg.StrOpt('api_url',
|
||||
help=('URL of Ironic API service. If not set ironic can '
|
||||
'get the current value from the keystone service '
|
||||
'catalog.')),
|
||||
cfg.IntOpt('heartbeat_interval',
|
||||
default=10,
|
||||
help='Seconds between conductor heart beats.'),
|
||||
cfg.IntOpt('heartbeat_timeout',
|
||||
default=60,
|
||||
help='Maximum time (in seconds) since the last check-in '
|
||||
'of a conductor. A conductor is considered inactive '
|
||||
'when this time has been exceeded.'),
|
||||
cfg.IntOpt('sync_power_state_interval',
|
||||
default=60,
|
||||
help='Interval between syncing the node power state to the '
|
||||
'database, in seconds.'),
|
||||
cfg.IntOpt('check_provision_state_interval',
|
||||
default=60,
|
||||
help='Interval between checks of provision timeouts, '
|
||||
'in seconds.'),
|
||||
cfg.IntOpt('deploy_callback_timeout',
|
||||
default=1800,
|
||||
help='Timeout (seconds) to wait for a callback from '
|
||||
'a deploy ramdisk. Set to 0 to disable timeout.'),
|
||||
cfg.BoolOpt('force_power_state_during_sync',
|
||||
default=True,
|
||||
help='During sync_power_state, should the hardware power '
|
||||
'state be set to the state recorded in the database '
|
||||
'(True) or should the database be updated based on '
|
||||
'the hardware state (False).'),
|
||||
cfg.IntOpt('power_state_sync_max_retries',
|
||||
default=3,
|
||||
help='During sync_power_state failures, limit the '
|
||||
'number of times Ironic should try syncing the '
|
||||
'hardware node power state with the node power state '
|
||||
'in DB'),
|
||||
cfg.IntOpt('periodic_max_workers',
|
||||
default=8,
|
||||
help='Maximum number of worker threads that can be started '
|
||||
'simultaneously by a periodic task. Should be less '
|
||||
'than RPC thread pool size.'),
|
||||
cfg.IntOpt('workers_pool_size',
|
||||
default=100,
|
||||
help='The size of the workers greenthread pool.'),
|
||||
cfg.IntOpt('node_locked_retry_attempts',
|
||||
default=3,
|
||||
help='Number of attempts to grab a node lock.'),
|
||||
cfg.IntOpt('node_locked_retry_interval',
|
||||
default=1,
|
||||
help='Seconds to sleep between node lock attempts.'),
|
||||
cfg.BoolOpt('send_sensor_data',
|
||||
default=False,
|
||||
help='Enable sending sensor data message via the '
|
||||
'notification bus'),
|
||||
cfg.IntOpt('send_sensor_data_interval',
|
||||
default=600,
|
||||
help='Seconds between conductor sending sensor data message'
|
||||
' to ceilometer via the notification bus.'),
|
||||
cfg.ListOpt('send_sensor_data_types',
|
||||
default=['ALL'],
|
||||
help='List of comma separated meter types which need to be'
|
||||
' sent to Ceilometer. The default value, "ALL", is a '
|
||||
'special value meaning send all the sensor data.'),
|
||||
cfg.IntOpt('sync_local_state_interval',
|
||||
default=180,
|
||||
help='When conductors join or leave the cluster, existing '
|
||||
'conductors may need to update any persistent '
|
||||
'local state as nodes are moved around the cluster. '
|
||||
'This option controls how often, in seconds, each '
|
||||
'conductor will check for nodes that it should '
|
||||
'"take over". Set it to a negative value to disable '
|
||||
'the check entirely.'),
|
||||
cfg.BoolOpt('configdrive_use_swift',
|
||||
default=False,
|
||||
help='Whether to upload the config drive to Swift.'),
|
||||
cfg.StrOpt('configdrive_swift_container',
|
||||
default='ironic_configdrive_container',
|
||||
help='Name of the Swift container to store config drive '
|
||||
'data. Used when configdrive_use_swift is True.'),
|
||||
cfg.IntOpt('inspect_timeout',
|
||||
default=1800,
|
||||
help='Timeout (seconds) for waiting for node inspection. '
|
||||
'0 - unlimited.'),
|
||||
cfg.BoolOpt('clean_nodes',
|
||||
default=True,
|
||||
help='Cleaning is a configurable set of steps, such as '
|
||||
'erasing disk drives, that are performed on the node '
|
||||
'to ensure it is in a baseline state and ready to be '
|
||||
'deployed to. '
|
||||
'This is done after instance deletion, and during '
|
||||
'the transition from a "managed" to "available" '
|
||||
'state. When enabled, the particular steps '
|
||||
'performed to clean a node depend on which driver '
|
||||
'that node is managed by; see the individual '
|
||||
'driver\'s documentation for details. '
|
||||
'NOTE: The introduction of the cleaning operation '
|
||||
'causes instance deletion to take significantly '
|
||||
'longer. In an environment where all tenants are '
|
||||
'trusted (eg, because there is only one tenant), '
|
||||
'this option could be safely disabled.'),
|
||||
]
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(conductor_opts, 'conductor')
|
||||
@ -237,7 +236,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
"""Event for the keepalive thread."""
|
||||
|
||||
self._worker_pool = greenpool.GreenPool(
|
||||
size=CONF.conductor.workers_pool_size)
|
||||
size=CONF.conductor.workers_pool_size)
|
||||
"""GreenPool of background workers for performing tasks async."""
|
||||
|
||||
self.ring_manager = hash.HashRingManager()
|
||||
@ -282,7 +281,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
{'hostname': self.host})
|
||||
cdr = self.dbapi.register_conductor({'hostname': self.host,
|
||||
'drivers': self.drivers},
|
||||
update_existing=True)
|
||||
update_existing=True)
|
||||
self.conductor = cdr
|
||||
|
||||
# Spawn a dedicated greenthread for the keepalive
|
||||
@ -481,7 +480,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
"Please update the code to use the "
|
||||
"@passthru decorator."))
|
||||
vendor_iface.validate(task, method=driver_method,
|
||||
**info)
|
||||
**info)
|
||||
task.spawn_after(self._spawn_worker,
|
||||
vendor_iface.vendor_passthru, task,
|
||||
method=driver_method, **info)
|
||||
@ -576,7 +575,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
|
||||
driver.vendor.driver_validate(method=driver_method, **info)
|
||||
ret = driver.vendor.driver_vendor_passthru(
|
||||
context, method=driver_method, **info)
|
||||
context, method=driver_method, **info)
|
||||
# DriverVendorPassthru was always sync
|
||||
return (ret, False)
|
||||
|
||||
@ -626,7 +625,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
extension='vendor interface')
|
||||
|
||||
return get_vendor_passthru_metadata(
|
||||
task.driver.vendor.vendor_routes)
|
||||
task.driver.vendor.vendor_routes)
|
||||
|
||||
@messaging.expected_exceptions(exception.UnsupportedDriverExtension,
|
||||
exception.DriverNotFound)
|
||||
@ -737,8 +736,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
err_handler=provisioning_error_handler)
|
||||
except exception.InvalidState:
|
||||
raise exception.InvalidStateRequested(
|
||||
action=event, node=task.node.uuid,
|
||||
state=task.node.provision_state)
|
||||
action=event, node=task.node.uuid,
|
||||
state=task.node.provision_state)
|
||||
|
||||
@messaging.expected_exceptions(exception.NoFreeConductorWorker,
|
||||
exception.NodeLocked,
|
||||
@ -780,8 +779,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
err_handler=provisioning_error_handler)
|
||||
except exception.InvalidState:
|
||||
raise exception.InvalidStateRequested(
|
||||
action='delete', node=task.node.uuid,
|
||||
state=task.node.provision_state)
|
||||
action='delete', node=task.node.uuid,
|
||||
state=task.node.provision_state)
|
||||
|
||||
def _do_node_tear_down(self, task):
|
||||
"""Internal RPC method to tear down an existing node deployment."""
|
||||
@ -897,7 +896,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
return
|
||||
|
||||
set_node_cleaning_steps(task)
|
||||
self._do_next_clean_step(task,
|
||||
self._do_next_clean_step(
|
||||
task,
|
||||
node.driver_internal_info.get('clean_steps', []),
|
||||
node.clean_step)
|
||||
|
||||
@ -1017,7 +1017,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
state=task.node.provision_state)
|
||||
|
||||
@periodic_task.periodic_task(
|
||||
spacing=CONF.conductor.sync_power_state_interval)
|
||||
spacing=CONF.conductor.sync_power_state_interval)
|
||||
def _sync_power_states(self, context):
|
||||
"""Periodic task to sync power states for the nodes.
|
||||
|
||||
@ -1067,7 +1067,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
task.node.maintenance):
|
||||
continue
|
||||
count = do_sync_power_state(
|
||||
task, self.power_state_sync_count[node_uuid])
|
||||
task, self.power_state_sync_count[node_uuid])
|
||||
if count:
|
||||
self.power_state_sync_count[node_uuid] = count
|
||||
else:
|
||||
@ -1086,7 +1086,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
eventlet.sleep(0)
|
||||
|
||||
@periodic_task.periodic_task(
|
||||
spacing=CONF.conductor.check_provision_state_interval)
|
||||
spacing=CONF.conductor.check_provision_state_interval)
|
||||
def _check_deploy_timeouts(self, context):
|
||||
"""Periodically checks whether a deploy RPC call has timed out.
|
||||
|
||||
@ -1127,7 +1127,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
task.node.save()
|
||||
|
||||
@periodic_task.periodic_task(
|
||||
spacing=CONF.conductor.sync_local_state_interval)
|
||||
spacing=CONF.conductor.sync_local_state_interval)
|
||||
def _sync_local_state(self, context):
|
||||
"""Perform any actions necessary to sync local state.
|
||||
|
||||
@ -1318,13 +1318,13 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
|
||||
"""
|
||||
LOG.debug('RPC destroy_port called for port %(port)s',
|
||||
{'port': port.uuid})
|
||||
{'port': port.uuid})
|
||||
with task_manager.acquire(context, port.node_id) as task:
|
||||
port.destroy()
|
||||
LOG.info(_LI('Successfully deleted port %(port)s. '
|
||||
'The node associated with the port was '
|
||||
'%(node)s'),
|
||||
{'port': port.uuid, 'node': task.node.uuid})
|
||||
{'port': port.uuid, 'node': task.node.uuid})
|
||||
|
||||
@messaging.expected_exceptions(exception.NodeLocked,
|
||||
exception.UnsupportedDriverExtension,
|
||||
@ -1416,9 +1416,9 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
with excutils.save_and_reraise_exception():
|
||||
op = _('enabling') if enabled else _('disabling')
|
||||
msg = (_('Error %(op)s the console on node %(node)s. '
|
||||
'Reason: %(error)s') % {'op': op,
|
||||
'node': node.uuid,
|
||||
'error': e})
|
||||
'Reason: %(error)s') % {'op': op,
|
||||
'node': node.uuid,
|
||||
'error': e})
|
||||
node.last_error = msg
|
||||
else:
|
||||
node.console_enabled = enabled
|
||||
@ -1454,7 +1454,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
# Log warning if there is no vif_port_id and an instance
|
||||
# is associated with the node.
|
||||
elif node.instance_uuid:
|
||||
LOG.warning(_LW("No VIF found for instance %(instance)s "
|
||||
LOG.warning(_LW(
|
||||
"No VIF found for instance %(instance)s "
|
||||
"port %(port)s when attempting to update port MAC "
|
||||
"address."),
|
||||
{'port': port_uuid, 'instance': node.instance_uuid})
|
||||
@ -1480,7 +1481,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
return driver.get_properties()
|
||||
|
||||
@periodic_task.periodic_task(
|
||||
spacing=CONF.conductor.send_sensor_data_interval)
|
||||
spacing=CONF.conductor.send_sensor_data_interval)
|
||||
def _send_sensor_data(self, context):
|
||||
"""Periodically sends sensor data to Ceilometer."""
|
||||
# do nothing if send_sensor_data option is False
|
||||
@ -1507,27 +1508,32 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
sensors_data = task.driver.management.get_sensors_data(
|
||||
task)
|
||||
except NotImplementedError:
|
||||
LOG.warn(_LW('get_sensors_data is not implemented for driver'
|
||||
LOG.warn(_LW(
|
||||
'get_sensors_data is not implemented for driver'
|
||||
' %(driver)s, node_uuid is %(node)s'),
|
||||
{'node': node_uuid, 'driver': driver})
|
||||
except exception.FailedToParseSensorData as fps:
|
||||
LOG.warn(_LW("During get_sensors_data, could not parse "
|
||||
LOG.warn(_LW(
|
||||
"During get_sensors_data, could not parse "
|
||||
"sensor data for node %(node)s. Error: %(err)s."),
|
||||
{'node': node_uuid, 'err': str(fps)})
|
||||
except exception.FailedToGetSensorData as fgs:
|
||||
LOG.warn(_LW("During get_sensors_data, could not get "
|
||||
LOG.warn(_LW(
|
||||
"During get_sensors_data, could not get "
|
||||
"sensor data for node %(node)s. Error: %(err)s."),
|
||||
{'node': node_uuid, 'err': str(fgs)})
|
||||
except exception.NodeNotFound:
|
||||
LOG.warn(_LW("During send_sensor_data, node %(node)s was not "
|
||||
"found and presumed deleted by another process."),
|
||||
{'node': node_uuid})
|
||||
LOG.warn(_LW(
|
||||
"During send_sensor_data, node %(node)s was not "
|
||||
"found and presumed deleted by another process."),
|
||||
{'node': node_uuid})
|
||||
except Exception as e:
|
||||
LOG.warn(_LW("Failed to get sensor data for node %(node)s. "
|
||||
LOG.warn(_LW(
|
||||
"Failed to get sensor data for node %(node)s. "
|
||||
"Error: %(error)s"), {'node': node_uuid, 'error': str(e)})
|
||||
else:
|
||||
message['payload'] = self._filter_out_unsupported_types(
|
||||
sensors_data)
|
||||
message['payload'] = (
|
||||
self._filter_out_unsupported_types(sensors_data))
|
||||
if message['payload']:
|
||||
self.notifier.info(context, "hardware.ipmi.metrics",
|
||||
message)
|
||||
@ -1551,7 +1557,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
return sensors_data
|
||||
|
||||
return dict((sensor_type, sensor_value) for (sensor_type, sensor_value)
|
||||
in sensors_data.items() if sensor_type.lower() in allowed)
|
||||
in sensors_data.items() if sensor_type.lower() in allowed)
|
||||
|
||||
@messaging.expected_exceptions(exception.NodeLocked,
|
||||
exception.UnsupportedDriverExtension,
|
||||
@ -1581,7 +1587,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
node = task.node
|
||||
if not getattr(task.driver, 'management', None):
|
||||
raise exception.UnsupportedDriverExtension(
|
||||
driver=node.driver, extension='management')
|
||||
driver=node.driver, extension='management')
|
||||
task.driver.management.validate(task)
|
||||
task.driver.management.set_boot_device(task, device,
|
||||
persistent=persistent)
|
||||
@ -1615,7 +1621,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
with task_manager.acquire(context, node_id) as task:
|
||||
if not getattr(task.driver, 'management', None):
|
||||
raise exception.UnsupportedDriverExtension(
|
||||
driver=task.node.driver, extension='management')
|
||||
driver=task.node.driver, extension='management')
|
||||
task.driver.management.validate(task)
|
||||
return task.driver.management.get_boot_device(task)
|
||||
|
||||
@ -1644,7 +1650,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
with task_manager.acquire(context, node_id, shared=True) as task:
|
||||
if not getattr(task.driver, 'management', None):
|
||||
raise exception.UnsupportedDriverExtension(
|
||||
driver=task.node.driver, extension='management')
|
||||
driver=task.node.driver, extension='management')
|
||||
return task.driver.management.get_supported_boot_devices()
|
||||
|
||||
@messaging.expected_exceptions(exception.NoFreeConductorWorker,
|
||||
@ -1676,7 +1682,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
with task_manager.acquire(context, node_id, shared=False) as task:
|
||||
if not getattr(task.driver, 'inspect', None):
|
||||
raise exception.UnsupportedDriverExtension(
|
||||
driver=task.node.driver, extension='inspect')
|
||||
driver=task.node.driver, extension='inspect')
|
||||
|
||||
try:
|
||||
task.driver.power.validate(task)
|
||||
@ -1685,7 +1691,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
exception.MissingParameterValue) as e:
|
||||
error = (_("RPC inspect_hardware failed to validate "
|
||||
"inspection or power info. Error: %(msg)s")
|
||||
% {'msg': e})
|
||||
% {'msg': e})
|
||||
raise exception.HardwareInspectionFailure(error=error)
|
||||
|
||||
try:
|
||||
@ -1696,8 +1702,8 @@ class ConductorManager(periodic_task.PeriodicTasks):
|
||||
|
||||
except exception.InvalidState:
|
||||
raise exception.InvalidStateRequested(
|
||||
action='inspect', node=task.node.uuid,
|
||||
state=task.node.provision_state)
|
||||
action='inspect', node=task.node.uuid,
|
||||
state=task.node.provision_state)
|
||||
|
||||
@periodic_task.periodic_task(
|
||||
spacing=CONF.conductor.check_provision_state_interval)
|
||||
@ -1805,11 +1811,11 @@ def power_state_error_handler(e, node, power_state):
|
||||
LOG.warning(_LW("No free conductor workers available to perform "
|
||||
"an action on node %(node)s, setting node's "
|
||||
"power state back to %(power_state)s."),
|
||||
{'node': node.uuid, 'power_state': power_state})
|
||||
{'node': node.uuid, 'power_state': power_state})
|
||||
|
||||
|
||||
def provisioning_error_handler(e, node, provision_state,
|
||||
target_provision_state):
|
||||
target_provision_state):
|
||||
"""Set the node's provisioning states if error occurs.
|
||||
|
||||
This hook gets called upon an exception being raised when spawning
|
||||
@ -1897,7 +1903,8 @@ def do_node_deploy(task, conductor_id, configdrive=None):
|
||||
_store_configdrive(node, configdrive)
|
||||
except exception.SwiftOperationError as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
handle_failure(e, task,
|
||||
handle_failure(
|
||||
e, task,
|
||||
_LW('Error while uploading the configdrive for '
|
||||
'%(node)s to Swift'),
|
||||
_('Failed to upload the configdrive to Swift. '
|
||||
@ -1907,7 +1914,8 @@ def do_node_deploy(task, conductor_id, configdrive=None):
|
||||
task.driver.deploy.prepare(task)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
handle_failure(e, task,
|
||||
handle_failure(
|
||||
e, task,
|
||||
_LW('Error while preparing to deploy to node %(node)s: '
|
||||
'%(err)s'),
|
||||
_("Failed to prepare to deploy. Error: %s"))
|
||||
@ -1916,7 +1924,8 @@ def do_node_deploy(task, conductor_id, configdrive=None):
|
||||
new_state = task.driver.deploy.deploy(task)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
handle_failure(e, task,
|
||||
handle_failure(
|
||||
e, task,
|
||||
_LW('Error in deploy of node %(node)s: %(err)s'),
|
||||
_("Failed to deploy. Error: %s"))
|
||||
|
||||
@ -1936,7 +1945,7 @@ def do_node_deploy(task, conductor_id, configdrive=None):
|
||||
else:
|
||||
LOG.error(_LE('Unexpected state %(state)s returned while '
|
||||
'deploying node %(node)s.'),
|
||||
{'state': new_state, 'node': node.uuid})
|
||||
{'state': new_state, 'node': node.uuid})
|
||||
finally:
|
||||
node.save()
|
||||
|
||||
@ -1955,12 +1964,12 @@ def handle_sync_power_state_max_retries_exceeded(task,
|
||||
"""
|
||||
node = task.node
|
||||
msg = (_("During sync_power_state, max retries exceeded "
|
||||
"for node %(node)s, node state %(actual)s "
|
||||
"does not match expected state '%(state)s'. "
|
||||
"Updating DB state to '%(actual)s' "
|
||||
"Switching node to maintenance mode.") %
|
||||
{'node': node.uuid, 'actual': actual_power_state,
|
||||
'state': node.power_state})
|
||||
"for node %(node)s, node state %(actual)s "
|
||||
"does not match expected state '%(state)s'. "
|
||||
"Updating DB state to '%(actual)s' "
|
||||
"Switching node to maintenance mode.") %
|
||||
{'node': node.uuid, 'actual': actual_power_state,
|
||||
'state': node.power_state})
|
||||
node.power_state = actual_power_state
|
||||
node.last_error = msg
|
||||
node.maintenance = True
|
||||
@ -2001,8 +2010,8 @@ def do_sync_power_state(task, count):
|
||||
power_state = task.driver.power.get_power_state(task)
|
||||
if power_state == states.ERROR:
|
||||
raise exception.PowerStateFailure(
|
||||
_("Power driver returned ERROR state "
|
||||
"while trying to sync power state."))
|
||||
_("Power driver returned ERROR state "
|
||||
"while trying to sync power state."))
|
||||
except Exception as e:
|
||||
# Stop if any exception is raised when getting the power state
|
||||
if count > max_retries:
|
||||
@ -2011,8 +2020,8 @@ def do_sync_power_state(task, count):
|
||||
LOG.warning(_LW("During sync_power_state, could not get power "
|
||||
"state for node %(node)s, attempt %(attempt)s of "
|
||||
"%(retries)s. Error: %(err)s."),
|
||||
{'node': node.uuid, 'attempt': count,
|
||||
'retries': max_retries, 'err': e})
|
||||
{'node': node.uuid, 'attempt': count,
|
||||
'retries': max_retries, 'err': e})
|
||||
return count
|
||||
else:
|
||||
# If node has no prior state AND we successfully got a state,
|
||||
@ -2021,7 +2030,7 @@ def do_sync_power_state(task, count):
|
||||
LOG.info(_LI("During sync_power_state, node %(node)s has no "
|
||||
"previous known state. Recording current state "
|
||||
"'%(state)s'."),
|
||||
{'node': node.uuid, 'state': power_state})
|
||||
{'node': node.uuid, 'state': power_state})
|
||||
node.power_state = power_state
|
||||
node.save()
|
||||
return 0
|
||||
@ -2039,14 +2048,15 @@ def do_sync_power_state(task, count):
|
||||
LOG.warning(_LW("During sync_power_state, node %(node)s state "
|
||||
"'%(actual)s' does not match expected state. "
|
||||
"Changing hardware state to '%(state)s'."),
|
||||
{'node': node.uuid, 'actual': power_state,
|
||||
'state': node.power_state})
|
||||
{'node': node.uuid, 'actual': power_state,
|
||||
'state': node.power_state})
|
||||
try:
|
||||
# node_power_action will update the node record
|
||||
# so don't do that again here.
|
||||
utils.node_power_action(task, node.power_state)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("Failed to change power state of node %(node)s "
|
||||
LOG.error(_LE(
|
||||
"Failed to change power state of node %(node)s "
|
||||
"to '%(state)s', attempt %(attempt)s of %(retries)s."),
|
||||
{'node': node.uuid,
|
||||
'state': node.power_state,
|
||||
@ -2056,8 +2066,8 @@ def do_sync_power_state(task, count):
|
||||
LOG.warning(_LW("During sync_power_state, node %(node)s state "
|
||||
"does not match expected state '%(state)s'. "
|
||||
"Updating recorded state to '%(actual)s'."),
|
||||
{'node': node.uuid, 'actual': power_state,
|
||||
'state': node.power_state})
|
||||
{'node': node.uuid, 'actual': power_state,
|
||||
'state': node.power_state})
|
||||
node.power_state = power_state
|
||||
node.save()
|
||||
|
||||
|
@ -254,7 +254,7 @@ class ConductorAPI(object):
|
||||
node_id=node_id)
|
||||
|
||||
def get_driver_vendor_passthru_methods(self, context, driver_name,
|
||||
topic=None):
|
||||
topic=None):
|
||||
"""Retrieve information about vendor methods of the given driver.
|
||||
|
||||
:param context: an admin context.
|
||||
|
@ -97,7 +97,7 @@ def node_power_action(task, new_state):
|
||||
if curr_state == states.ERROR:
|
||||
# be optimistic and continue action
|
||||
LOG.warn(_LW("Driver returns ERROR power state for node %s."),
|
||||
node.uuid)
|
||||
node.uuid)
|
||||
|
||||
# Set the target_power_state and clear any last_error, if we're
|
||||
# starting a new operation. This will expose to other processes
|
||||
@ -137,7 +137,7 @@ def cleanup_after_timeout(task):
|
||||
"""
|
||||
node = task.node
|
||||
msg = (_('Timeout reached while waiting for callback for node %s')
|
||||
% node.uuid)
|
||||
% node.uuid)
|
||||
node.last_error = msg
|
||||
LOG.error(msg)
|
||||
node.save()
|
||||
|
@ -30,13 +30,13 @@ from sqlalchemy.dialects import mysql
|
||||
|
||||
def upgrade():
|
||||
op.alter_column('nodes', 'name',
|
||||
existing_type=mysql.VARCHAR(length=63),
|
||||
type_=sa.String(length=255),
|
||||
existing_nullable=True)
|
||||
existing_type=mysql.VARCHAR(length=63),
|
||||
type_=sa.String(length=255),
|
||||
existing_nullable=True)
|
||||
|
||||
|
||||
def downgrade():
|
||||
op.alter_column('nodes', 'name',
|
||||
existing_type=sa.String(length=255),
|
||||
type_=mysql.VARCHAR(length=63),
|
||||
existing_nullable=True)
|
||||
existing_type=sa.String(length=255),
|
||||
type_=mysql.VARCHAR(length=63),
|
||||
existing_nullable=True)
|
||||
|
@ -27,9 +27,7 @@ import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.add_column('nodes',
|
||||
sa.Column('console_enabled', sa.Boolean)
|
||||
)
|
||||
op.add_column('nodes', sa.Column('console_enabled', sa.Boolean))
|
||||
|
||||
|
||||
def downgrade():
|
||||
|
@ -34,12 +34,12 @@ def upgrade():
|
||||
'nodes',
|
||||
sa.Column('conductor_affinity', sa.Integer(),
|
||||
sa.ForeignKey('conductors.id',
|
||||
name='nodes_conductor_affinity_fk'),
|
||||
name='nodes_conductor_affinity_fk'),
|
||||
nullable=True))
|
||||
|
||||
|
||||
def downgrade():
|
||||
op.drop_constraint('nodes_conductor_affinity_fk', 'nodes',
|
||||
type_='foreignkey')
|
||||
type_='foreignkey')
|
||||
op.drop_column('nodes', 'conductor_affinity')
|
||||
op.drop_column('conductors', 'online')
|
||||
|
@ -27,8 +27,8 @@ from sqlalchemy import String
|
||||
from sqlalchemy.sql import table, column
|
||||
|
||||
node = table('nodes',
|
||||
column('uuid', String(36)),
|
||||
column('provision_state', String(15)))
|
||||
column('uuid', String(36)),
|
||||
column('provision_state', String(15)))
|
||||
|
||||
|
||||
# NOTE(deva): We must represent the states as static strings in this migration
|
||||
|
@ -120,7 +120,7 @@ def add_port_filter_by_node(query, value):
|
||||
return query.filter_by(node_id=value)
|
||||
else:
|
||||
query = query.join(models.Node,
|
||||
models.Port.node_id == models.Node.id)
|
||||
models.Port.node_id == models.Node.id)
|
||||
return query.filter(models.Node.uuid == value)
|
||||
|
||||
|
||||
@ -129,7 +129,7 @@ def add_node_filter_by_chassis(query, value):
|
||||
return query.filter_by(chassis_id=value)
|
||||
else:
|
||||
query = query.join(models.Chassis,
|
||||
models.Node.chassis_id == models.Chassis.id)
|
||||
models.Node.chassis_id == models.Chassis.id)
|
||||
return query.filter(models.Chassis.uuid == value)
|
||||
|
||||
|
||||
@ -144,9 +144,9 @@ def _paginate_query(model, limit=None, marker=None, sort_key=None,
|
||||
query = db_utils.paginate_query(query, model, limit, sort_keys,
|
||||
marker=marker, sort_dir=sort_dir)
|
||||
except db_exc.InvalidSortKey:
|
||||
raise exception.InvalidParameterValue(_(
|
||||
'The sort_key value "%(key)s" is an invalid field for sorting')
|
||||
% {'key': sort_key})
|
||||
raise exception.InvalidParameterValue(
|
||||
_('The sort_key value "%(key)s" is an invalid field for sorting')
|
||||
% {'key': sort_key})
|
||||
return query.all()
|
||||
|
||||
|
||||
@ -182,13 +182,13 @@ class Connection(api.Connection):
|
||||
if 'provision_state' in filters:
|
||||
query = query.filter_by(provision_state=filters['provision_state'])
|
||||
if 'provisioned_before' in filters:
|
||||
limit = timeutils.utcnow() - datetime.timedelta(
|
||||
seconds=filters['provisioned_before'])
|
||||
limit = (timeutils.utcnow() -
|
||||
datetime.timedelta(seconds=filters['provisioned_before']))
|
||||
query = query.filter(models.Node.provision_updated_at < limit)
|
||||
if 'inspection_started_before' in filters:
|
||||
limit = ((timeutils.utcnow()) -
|
||||
(datetime.timedelta(
|
||||
seconds=filters['inspection_started_before'])))
|
||||
(datetime.timedelta(
|
||||
seconds=filters['inspection_started_before'])))
|
||||
query = query.filter(models.Node.inspection_started_at < limit)
|
||||
|
||||
return query
|
||||
@ -221,7 +221,7 @@ class Connection(api.Connection):
|
||||
query = add_identity_filter(query, node_id)
|
||||
# be optimistic and assume we usually create a reservation
|
||||
count = query.filter_by(reservation=None).update(
|
||||
{'reservation': tag}, synchronize_session=False)
|
||||
{'reservation': tag}, synchronize_session=False)
|
||||
try:
|
||||
node = query.one()
|
||||
if count != 1:
|
||||
@ -240,7 +240,7 @@ class Connection(api.Connection):
|
||||
query = add_identity_filter(query, node_id)
|
||||
# be optimistic and assume we usually release a reservation
|
||||
count = query.filter_by(reservation=tag).update(
|
||||
{'reservation': None}, synchronize_session=False)
|
||||
{'reservation': None}, synchronize_session=False)
|
||||
try:
|
||||
if count != 1:
|
||||
node = query.one()
|
||||
@ -365,8 +365,8 @@ class Connection(api.Connection):
|
||||
|
||||
# Prevent instance_uuid overwriting
|
||||
if values.get("instance_uuid") and ref.instance_uuid:
|
||||
raise exception.NodeAssociated(node=node_id,
|
||||
instance=ref.instance_uuid)
|
||||
raise exception.NodeAssociated(
|
||||
node=node_id, instance=ref.instance_uuid)
|
||||
|
||||
if 'provision_state' in values:
|
||||
values['provision_updated_at'] = timeutils.utcnow()
|
||||
@ -535,7 +535,7 @@ class Connection(api.Connection):
|
||||
ref = query.one()
|
||||
if ref.online is True and not update_existing:
|
||||
raise exception.ConductorAlreadyRegistered(
|
||||
conductor=values['hostname'])
|
||||
conductor=values['hostname'])
|
||||
except NoResultFound:
|
||||
ref = models.Conductor()
|
||||
ref.update(values)
|
||||
@ -579,8 +579,8 @@ class Connection(api.Connection):
|
||||
session = get_session()
|
||||
nodes = []
|
||||
with session.begin():
|
||||
query = model_query(models.Node, session=session).filter_by(
|
||||
reservation=hostname)
|
||||
query = (model_query(models.Node, session=session)
|
||||
.filter_by(reservation=hostname))
|
||||
nodes = [node['uuid'] for node in query]
|
||||
query.update({'reservation': None})
|
||||
|
||||
|
@ -117,7 +117,7 @@ class Chassis(Base):
|
||||
__table_args__ = (
|
||||
schema.UniqueConstraint('uuid', name='uniq_chassis0uuid'),
|
||||
table_args()
|
||||
)
|
||||
)
|
||||
id = Column(Integer, primary_key=True)
|
||||
uuid = Column(String(36))
|
||||
extra = Column(JSONEncodedDict)
|
||||
@ -131,7 +131,7 @@ class Conductor(Base):
|
||||
__table_args__ = (
|
||||
schema.UniqueConstraint('hostname', name='uniq_conductors0hostname'),
|
||||
table_args()
|
||||
)
|
||||
)
|
||||
id = Column(Integer, primary_key=True)
|
||||
hostname = Column(String(255), nullable=False)
|
||||
drivers = Column(JSONEncodedList)
|
||||
@ -179,9 +179,9 @@ class Node(Base):
|
||||
# When affinity and the hash ring's mapping do not match,
|
||||
# this indicates that a conductor should rebuild local state.
|
||||
conductor_affinity = Column(Integer,
|
||||
ForeignKey('conductors.id',
|
||||
name='nodes_conductor_affinity_fk'),
|
||||
nullable=True)
|
||||
ForeignKey('conductors.id',
|
||||
name='nodes_conductor_affinity_fk'),
|
||||
nullable=True)
|
||||
|
||||
maintenance = Column(Boolean, default=False)
|
||||
maintenance_reason = Column(Text, nullable=True)
|
||||
|
@ -53,7 +53,7 @@ neutron_opts = [
|
||||
help='UUID of the network to create Neutron ports on when '
|
||||
'booting to a ramdisk for cleaning/zapping using Neutron '
|
||||
'DHCP')
|
||||
]
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('my_ip', 'ironic.netconf')
|
||||
@ -282,7 +282,7 @@ class NeutronDHCPApi(base.BaseDHCP):
|
||||
LOG.warn(_LW("Some errors were encountered on node %(node)s"
|
||||
" while retrieving IP address on the following"
|
||||
" ports: %(ports)s."),
|
||||
{'node': task.node.uuid, 'ports': failures})
|
||||
{'node': task.node.uuid, 'ports': failures})
|
||||
|
||||
return ip_addresses
|
||||
|
||||
|
@ -99,8 +99,8 @@ class AgentAndVirtualBoxDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('pyremotevbox'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pyremotevbox library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pyremotevbox library"))
|
||||
self.power = virtualbox.VirtualBoxPower()
|
||||
self.deploy = agent.AgentDeploy()
|
||||
self.management = virtualbox.VirtualBoxManagement()
|
||||
|
@ -31,8 +31,8 @@ class PXEDracDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('pywsman'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_('Unable to import pywsman library'))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_('Unable to import pywsman library'))
|
||||
|
||||
self.power = power.DracPower()
|
||||
self.deploy = pxe.PXEDeploy()
|
||||
|
@ -100,8 +100,8 @@ class FakeIPMINativeDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('pyghmi'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pyghmi IPMI library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pyghmi IPMI library"))
|
||||
self.power = ipminative.NativeIPMIPower()
|
||||
self.console = ipminative.NativeIPMIShellinaboxConsole()
|
||||
self.deploy = fake.FakeDeploy()
|
||||
@ -114,8 +114,8 @@ class FakeSeaMicroDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('seamicroclient'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import seamicroclient library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import seamicroclient library"))
|
||||
self.power = seamicro.Power()
|
||||
self.deploy = fake.FakeDeploy()
|
||||
self.management = seamicro.Management()
|
||||
@ -138,8 +138,8 @@ class FakeIBootDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('iboot'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import iboot library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import iboot library"))
|
||||
self.power = iboot.IBootPower()
|
||||
self.deploy = fake.FakeDeploy()
|
||||
|
||||
@ -150,8 +150,8 @@ class FakeIloDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('proliantutils'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import proliantutils library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import proliantutils library"))
|
||||
self.power = ilo_power.IloPower()
|
||||
self.deploy = fake.FakeDeploy()
|
||||
self.management = ilo_management.IloManagement()
|
||||
@ -164,8 +164,8 @@ class FakeDracDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('pywsman'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_('Unable to import pywsman library'))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_('Unable to import pywsman library'))
|
||||
|
||||
self.power = drac_power.DracPower()
|
||||
self.deploy = fake.FakeDeploy()
|
||||
@ -178,8 +178,8 @@ class FakeSNMPDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('pysnmp'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pysnmp library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pysnmp library"))
|
||||
self.power = snmp.SNMPPower()
|
||||
self.deploy = fake.FakeDeploy()
|
||||
|
||||
@ -190,8 +190,8 @@ class FakeIRMCDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('scciclient'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import python-scciclient library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import python-scciclient library"))
|
||||
self.power = irmc_power.IRMCPower()
|
||||
self.deploy = fake.FakeDeploy()
|
||||
self.management = irmc_management.IRMCManagement()
|
||||
@ -203,8 +203,8 @@ class FakeVirtualBoxDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('pyremotevbox'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pyremotevbox library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pyremotevbox library"))
|
||||
self.power = virtualbox.VirtualBoxPower()
|
||||
self.deploy = fake.FakeDeploy()
|
||||
self.management = virtualbox.VirtualBoxManagement()
|
||||
@ -231,8 +231,8 @@ class FakeAMTDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('pywsman'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pywsman library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pywsman library"))
|
||||
self.power = amt_power.AMTPower()
|
||||
self.deploy = fake.FakeDeploy()
|
||||
self.management = amt_mgmt.AMTManagement()
|
||||
|
@ -39,8 +39,8 @@ class IloVirtualMediaIscsiDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('proliantutils'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import proliantutils library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import proliantutils library"))
|
||||
|
||||
self.power = power.IloPower()
|
||||
self.deploy = deploy.IloVirtualMediaIscsiDeploy()
|
||||
@ -63,8 +63,8 @@ class IloVirtualMediaAgentDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('proliantutils'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import proliantutils library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import proliantutils library"))
|
||||
|
||||
self.power = power.IloPower()
|
||||
self.deploy = deploy.IloVirtualMediaAgentDeploy()
|
||||
|
@ -65,7 +65,7 @@ agent_opts = [
|
||||
'your own TFTP server that allows booting the deploy '
|
||||
'ramdisks.'
|
||||
),
|
||||
]
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('my_ip', 'ironic.netconf')
|
||||
@ -489,7 +489,7 @@ class AgentVendorInterface(agent_base_vendor.BaseAgentVendor):
|
||||
if error is not None:
|
||||
# TODO(jimrollenhagen) power off if using neutron dhcp to
|
||||
# align with pxe driver?
|
||||
msg = _('node %(node)s command status errored: %(error)s') % (
|
||||
msg = (_('node %(node)s command status errored: %(error)s') %
|
||||
{'node': node.uuid, 'error': error})
|
||||
LOG.error(msg)
|
||||
deploy_utils.set_failed_state(task, msg)
|
||||
|
@ -43,7 +43,7 @@ agent_opts = [
|
||||
cfg.IntOpt('heartbeat_timeout',
|
||||
default=300,
|
||||
help='Maximum interval (in seconds) for agent heartbeats.'),
|
||||
]
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(agent_opts, group='agent')
|
||||
@ -126,7 +126,7 @@ class BaseAgentVendor(base.VendorInterface):
|
||||
if version not in self.supported_payload_versions:
|
||||
raise exception.InvalidParameterValue(_('Unknown lookup '
|
||||
'payload version: %s')
|
||||
% version)
|
||||
% version)
|
||||
|
||||
def _notify_conductor_resume_clean(self, task):
|
||||
uuid = task.node.uuid
|
||||
@ -477,7 +477,7 @@ class BaseAgentVendor(base.VendorInterface):
|
||||
msg = (_("Failed to install a bootloader when "
|
||||
"deploying node %(node)s. Error: %(error)s") %
|
||||
{'node': node.uuid,
|
||||
'error': result['command_error']})
|
||||
'error': result['command_error']})
|
||||
self._log_and_raise_deployment_error(task, msg)
|
||||
|
||||
try:
|
||||
|
@ -77,7 +77,7 @@ class AgentClient(object):
|
||||
'Unable to decode response as JSON.\n'
|
||||
'Request URL: %(url)s\nRequest body: "%(body)s"\n'
|
||||
'Response: "%(response)s"'
|
||||
) % ({'response': response.text, 'body': body, 'url': url})
|
||||
) % ({'response': response.text, 'body': body, 'url': url})
|
||||
LOG.error(msg)
|
||||
raise exception.IronicException(msg)
|
||||
|
||||
|
@ -166,8 +166,8 @@ def parse_driver_info(node):
|
||||
param = 'amt_protocol'
|
||||
protocol = info.get(param, CONF.amt.get(param[4:]))
|
||||
if protocol not in AMT_PROTOCOL_PORT_MAP:
|
||||
raise exception.InvalidParameterValue(_("Invalid "
|
||||
"protocol %s.") % protocol)
|
||||
raise exception.InvalidParameterValue(
|
||||
_("Invalid protocol %s.") % protocol)
|
||||
if not isinstance(value, six.binary_type):
|
||||
protocol = protocol.encode()
|
||||
d_info[param[4:]] = protocol
|
||||
|
@ -188,9 +188,10 @@ class AMTManagement(base.ManagementInterface):
|
||||
node = task.node
|
||||
|
||||
if device not in amt_common.BOOT_DEVICES_MAPPING:
|
||||
raise exception.InvalidParameterValue(_("set_boot_device called "
|
||||
"with invalid device %(device)s for node %(node_id)s.") %
|
||||
{'device': device, 'node_id': node.uuid})
|
||||
raise exception.InvalidParameterValue(
|
||||
_("set_boot_device called with invalid device "
|
||||
"%(device)s for node %(node_id)s."
|
||||
) % {'device': device, 'node_id': node.uuid})
|
||||
|
||||
# AMT/vPro doesn't support set boot_device persistent, so we have to
|
||||
# save amt_boot_device/amt_boot_persistent in driver_internal_info.
|
||||
@ -233,7 +234,7 @@ class AMTManagement(base.ManagementInterface):
|
||||
driver_internal_info = node.driver_internal_info
|
||||
if not driver_internal_info.get('amt_boot_persistent'):
|
||||
driver_internal_info['amt_boot_device'] = (
|
||||
amt_common.DEFAULT_BOOT_DEVICE)
|
||||
amt_common.DEFAULT_BOOT_DEVICE)
|
||||
driver_internal_info['amt_boot_persistent'] = True
|
||||
node.driver_internal_info = driver_internal_info
|
||||
node.save()
|
||||
|
@ -163,7 +163,7 @@ def _set_and_wait(task, target_state):
|
||||
driver = task.driver
|
||||
if target_state not in (states.POWER_ON, states.POWER_OFF):
|
||||
raise exception.InvalidParameterValue(_('Unsupported target_state: %s')
|
||||
% target_state)
|
||||
% target_state)
|
||||
elif target_state == states.POWER_ON:
|
||||
boot_device = node.driver_internal_info.get('amt_boot_device')
|
||||
if boot_device and boot_device != amt_common.DEFAULT_BOOT_DEVICE:
|
||||
|
@ -54,7 +54,7 @@ opts = [
|
||||
default=10,
|
||||
help='Time (in seconds) to wait for the console subprocess '
|
||||
'to start.'),
|
||||
]
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(opts, group='console')
|
||||
@ -157,8 +157,8 @@ def get_shellinabox_console_url(port):
|
||||
console_host = '[%s]' % console_host
|
||||
scheme = 'https' if CONF.console.terminal_cert_dir else 'http'
|
||||
return '%(scheme)s://%(host)s:%(port)s' % {'scheme': scheme,
|
||||
'host': console_host,
|
||||
'port': port}
|
||||
'host': console_host,
|
||||
'port': port}
|
||||
|
||||
|
||||
def start_shellinabox_console(node_uuid, port, console_cmd):
|
||||
@ -179,9 +179,9 @@ def start_shellinabox_console(node_uuid, port, console_cmd):
|
||||
pass
|
||||
except processutils.ProcessExecutionError as exc:
|
||||
LOG.warning(_LW("Failed to kill the old console process "
|
||||
"before starting a new shellinabox console "
|
||||
"for node %(node)s. Reason: %(err)s"),
|
||||
{'node': node_uuid, 'err': exc})
|
||||
"before starting a new shellinabox console "
|
||||
"for node %(node)s. Reason: %(err)s"),
|
||||
{'node': node_uuid, 'err': exc})
|
||||
|
||||
_ensure_console_pid_dir_exists()
|
||||
pid_file = _get_console_pid_file(node_uuid)
|
||||
@ -226,19 +226,21 @@ def start_shellinabox_console(node_uuid, port, console_cmd):
|
||||
raise loopingcall.LoopingCallDone()
|
||||
else:
|
||||
(stdout, stderr) = popen_obj.communicate()
|
||||
locals['errstr'] = _("Command: %(command)s.\n"
|
||||
"Exit code: %(return_code)s.\n"
|
||||
"Stdout: %(stdout)r\n"
|
||||
"Stderr: %(stderr)r") % {'command': ' '.join(args),
|
||||
'return_code': locals['returncode'],
|
||||
'stdout': stdout,
|
||||
'stderr': stderr}
|
||||
locals['errstr'] = _(
|
||||
"Command: %(command)s.\n"
|
||||
"Exit code: %(return_code)s.\n"
|
||||
"Stdout: %(stdout)r\n"
|
||||
"Stderr: %(stderr)r") % {
|
||||
'command': ' '.join(args),
|
||||
'return_code': locals['returncode'],
|
||||
'stdout': stdout,
|
||||
'stderr': stderr}
|
||||
LOG.warning(locals['errstr'])
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
if (time.time() > expiration):
|
||||
locals['errstr'] = _("Timeout while waiting for console"
|
||||
" subprocess to start for node %s.") % node_uuid
|
||||
locals['errstr'] = _("Timeout while waiting for console subprocess"
|
||||
"to start for node %s.") % node_uuid
|
||||
LOG.warning(locals['errstr'])
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
|
@ -63,7 +63,7 @@ deploy_opts = [
|
||||
default=3,
|
||||
help='Maximum attempts to verify an iSCSI connection is '
|
||||
'active, sleeping 1 second between attempts.'),
|
||||
]
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(deploy_opts, group='deploy')
|
||||
@ -146,8 +146,9 @@ def verify_iscsi_connection(target_iqn):
|
||||
break
|
||||
time.sleep(1)
|
||||
LOG.debug("iSCSI connection not active. Rechecking. Attempt "
|
||||
"%(attempt)d out of %(total)d", {"attempt": attempt + 1,
|
||||
"total": CONF.deploy.iscsi_verify_attempts})
|
||||
"%(attempt)d out of %(total)d",
|
||||
{"attempt": attempt + 1,
|
||||
"total": CONF.deploy.iscsi_verify_attempts})
|
||||
else:
|
||||
msg = _("iSCSI connection did not become active after attempting to "
|
||||
"verify %d times.") % CONF.deploy.iscsi_verify_attempts
|
||||
@ -208,12 +209,12 @@ def get_disk_identifier(dev):
|
||||
:returns The Disk Identifier.
|
||||
"""
|
||||
disk_identifier = utils.execute('hexdump', '-s', '440', '-n', '4',
|
||||
'-e', '''\"0x%08x\"''',
|
||||
dev,
|
||||
run_as_root=True,
|
||||
check_exit_code=[0],
|
||||
attempts=5,
|
||||
delay_on_retry=True)
|
||||
'-e', '''\"0x%08x\"''',
|
||||
dev,
|
||||
run_as_root=True,
|
||||
check_exit_code=[0],
|
||||
attempts=5,
|
||||
delay_on_retry=True)
|
||||
return disk_identifier[0]
|
||||
|
||||
|
||||
@ -258,12 +259,12 @@ def make_partitions(dev, root_mb, swap_mb, ephemeral_mb,
|
||||
|
||||
if ephemeral_mb:
|
||||
LOG.debug("Add ephemeral partition (%(size)d MB) to device: %(dev)s",
|
||||
{'dev': dev, 'size': ephemeral_mb})
|
||||
{'dev': dev, 'size': ephemeral_mb})
|
||||
part_num = dp.add_partition(ephemeral_mb)
|
||||
part_dict['ephemeral'] = part_template % part_num
|
||||
if swap_mb:
|
||||
LOG.debug("Add Swap partition (%(size)d MB) to device: %(dev)s",
|
||||
{'dev': dev, 'size': swap_mb})
|
||||
{'dev': dev, 'size': swap_mb})
|
||||
part_num = dp.add_partition(swap_mb, fs_type='linux-swap')
|
||||
part_dict['swap'] = part_template % part_num
|
||||
if configdrive_mb:
|
||||
@ -276,7 +277,7 @@ def make_partitions(dev, root_mb, swap_mb, ephemeral_mb,
|
||||
# enables tools like cloud-init's growroot utility to expand the root
|
||||
# partition until the end of the disk.
|
||||
LOG.debug("Add root partition (%(size)d MB) to device: %(dev)s",
|
||||
{'dev': dev, 'size': root_mb})
|
||||
{'dev': dev, 'size': root_mb})
|
||||
part_num = dp.add_partition(root_mb, bootable=(boot_option == "local" and
|
||||
boot_mode == "bios"))
|
||||
part_dict['root'] = part_template % part_num
|
||||
@ -295,8 +296,9 @@ def is_block_device(dev):
|
||||
s = os.stat(dev)
|
||||
except OSError as e:
|
||||
LOG.debug("Unable to stat device %(dev)s. Attempt %(attempt)d "
|
||||
"out of %(total)d. Error: %(err)s", {"dev": dev,
|
||||
"attempt": attempt + 1, "total": attempts, "err": e})
|
||||
"out of %(total)d. Error: %(err)s",
|
||||
{"dev": dev, "attempt": attempt + 1,
|
||||
"total": attempts, "err": e})
|
||||
time.sleep(1)
|
||||
else:
|
||||
return stat.S_ISBLK(s.st_mode)
|
||||
@ -600,8 +602,8 @@ def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format,
|
||||
'efi system partition'):
|
||||
part_device = part_dict.get(part)
|
||||
LOG.debug("Checking for %(part)s device (%(dev)s) on node "
|
||||
"%(node)s.", {'part': part, 'dev': part_device,
|
||||
'node': node_uuid})
|
||||
"%(node)s.",
|
||||
{'part': part, 'dev': part_device, 'node': node_uuid})
|
||||
if part_device and not is_block_device(part_device):
|
||||
raise exception.InstanceDeployFailure(
|
||||
_("'%(partition)s' device '%(part_device)s' not found") %
|
||||
@ -648,10 +650,11 @@ def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format,
|
||||
return uuids_to_return
|
||||
|
||||
|
||||
def deploy_partition_image(address, port, iqn, lun, image_path,
|
||||
root_mb, swap_mb, ephemeral_mb, ephemeral_format, node_uuid,
|
||||
preserve_ephemeral=False, configdrive=None,
|
||||
boot_option="netboot", boot_mode="bios"):
|
||||
def deploy_partition_image(
|
||||
address, port, iqn, lun, image_path,
|
||||
root_mb, swap_mb, ephemeral_mb, ephemeral_format, node_uuid,
|
||||
preserve_ephemeral=False, configdrive=None,
|
||||
boot_option="netboot", boot_mode="bios"):
|
||||
"""All-in-one function to deploy a partition image to a node.
|
||||
|
||||
:param address: The iSCSI IP address.
|
||||
@ -735,7 +738,7 @@ def _iscsi_setup_and_handle_errors(address, port, iqn, lun):
|
||||
login_iscsi(address, port, iqn)
|
||||
if not is_block_device(dev):
|
||||
raise exception.InstanceDeployFailure(_("Parent device '%s' not found")
|
||||
% dev)
|
||||
% dev)
|
||||
try:
|
||||
yield dev
|
||||
except processutils.ProcessExecutionError as err:
|
||||
@ -786,8 +789,8 @@ def check_for_missing_params(info_dict, error_msg, param_prefix=''):
|
||||
|
||||
if missing_info:
|
||||
exc_msg = _("%(error_msg)s. Missing are: %(missing_info)s")
|
||||
raise exception.MissingParameterValue(exc_msg %
|
||||
{'error_msg': error_msg, 'missing_info': missing_info})
|
||||
raise exception.MissingParameterValue(
|
||||
exc_msg % {'error_msg': error_msg, 'missing_info': missing_info})
|
||||
|
||||
|
||||
def fetch_images(ctx, cache, images_info, force_raw=True):
|
||||
|
@ -223,16 +223,16 @@ class Client(object):
|
||||
raise exception.DracOperationFailed(message=message)
|
||||
else:
|
||||
raise exception.DracUnexpectedReturnValue(
|
||||
expected_return_value=expected_return_value,
|
||||
actual_return_value=return_value)
|
||||
expected_return_value=expected_return_value,
|
||||
actual_return_value=return_value)
|
||||
|
||||
return root
|
||||
|
||||
def _get_root(self, doc):
|
||||
if doc is None or doc.root() is None:
|
||||
raise exception.DracClientError(
|
||||
last_error=self.client.last_error(),
|
||||
fault_string=self.client.fault_string(),
|
||||
response_code=self.client.response_code())
|
||||
last_error=self.client.last_error(),
|
||||
fault_string=self.client.fault_string(),
|
||||
response_code=self.client.response_code())
|
||||
root = doc.root()
|
||||
return ElementTree.fromstring(root.string())
|
||||
|
@ -91,10 +91,10 @@ def _get_next_boot_mode(node):
|
||||
# and another one for the OneTime if set
|
||||
boot_mode = None
|
||||
for i in items:
|
||||
instance_id = drac_common.find_xml(i, 'InstanceID',
|
||||
resource_uris.DCIM_BootConfigSetting).text
|
||||
is_next = drac_common.find_xml(i, 'IsNext',
|
||||
resource_uris.DCIM_BootConfigSetting).text
|
||||
instance_id = drac_common.find_xml(
|
||||
i, 'InstanceID', resource_uris.DCIM_BootConfigSetting).text
|
||||
is_next = drac_common.find_xml(
|
||||
i, 'IsNext', resource_uris.DCIM_BootConfigSetting).text
|
||||
|
||||
boot_mode = {'instance_id': instance_id, 'is_next': is_next}
|
||||
# If OneTime is set we should return it, because that's
|
||||
@ -162,8 +162,8 @@ def _check_for_config_job(node):
|
||||
if TARGET_DEVICE not in name.text:
|
||||
continue
|
||||
|
||||
job_status = drac_common.find_xml(i, 'JobStatus',
|
||||
resource_uris.DCIM_LifecycleJob).text
|
||||
job_status = drac_common.find_xml(
|
||||
i, 'JobStatus', resource_uris.DCIM_LifecycleJob).text
|
||||
# If job is already completed or failed we can
|
||||
# create another one.
|
||||
# Job Control Documentation: http://goo.gl/o1dDD3 (Section 7.2.3.2)
|
||||
@ -249,13 +249,14 @@ class DracManagement(base.ManagementInterface):
|
||||
{'node_uuid': task.node.uuid, 'error': exc,
|
||||
'device': device})
|
||||
|
||||
instance_id = drac_common.find_xml(doc, 'InstanceID',
|
||||
resource_uris.DCIM_BootSourceSetting).text
|
||||
instance_id = drac_common.find_xml(
|
||||
doc, 'InstanceID', resource_uris.DCIM_BootSourceSetting).text
|
||||
|
||||
source = 'OneTime'
|
||||
if persistent:
|
||||
source = drac_common.find_xml(doc, 'BootSourceType',
|
||||
resource_uris.DCIM_BootSourceSetting).text
|
||||
source = drac_common.find_xml(
|
||||
doc, 'BootSourceType',
|
||||
resource_uris.DCIM_BootSourceSetting).text
|
||||
|
||||
# NOTE(lucasagomes): Don't ask me why 'BootSourceType' is set
|
||||
# for 'InstanceID' and 'InstanceID' is set for 'source'! You
|
||||
@ -312,8 +313,8 @@ class DracManagement(base.ManagementInterface):
|
||||
'Reason: %(error)s.'),
|
||||
{'node_uuid': task.node.uuid, 'error': exc})
|
||||
|
||||
instance_id = drac_common.find_xml(doc, 'InstanceID',
|
||||
resource_uris.DCIM_BootSourceSetting).text
|
||||
instance_id = drac_common.find_xml(
|
||||
doc, 'InstanceID', resource_uris.DCIM_BootSourceSetting).text
|
||||
boot_device = next((key for (key, value) in _BOOT_DEVICES_MAP.items()
|
||||
if value in instance_id), None)
|
||||
return {'boot_device': boot_device, 'persistent': persistent}
|
||||
|
@ -45,8 +45,9 @@ class FakePower(base.PowerInterface):
|
||||
|
||||
def set_power_state(self, task, power_state):
|
||||
if power_state not in [states.POWER_ON, states.POWER_OFF]:
|
||||
raise exception.InvalidParameterValue(_("set_power_state called "
|
||||
"with an invalid power state: %s.") % power_state)
|
||||
raise exception.InvalidParameterValue(
|
||||
_("set_power_state called with an invalid power"
|
||||
"state: %s.") % power_state)
|
||||
task.node.power_state = power_state
|
||||
|
||||
def reboot(self, task):
|
||||
|
@ -51,8 +51,8 @@ def _parse_driver_info(node):
|
||||
info = node.driver_info or {}
|
||||
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
|
||||
if missing_info:
|
||||
raise exception.MissingParameterValue(_(
|
||||
"Missing the following iBoot credentials in node's"
|
||||
raise exception.MissingParameterValue(
|
||||
_("Missing the following iBoot credentials in node's"
|
||||
" driver_info: %s.") % missing_info)
|
||||
|
||||
address = info.get('iboot_address', None)
|
||||
@ -63,24 +63,24 @@ def _parse_driver_info(node):
|
||||
try:
|
||||
relay_id = int(relay_id)
|
||||
except ValueError:
|
||||
raise exception.InvalidParameterValue(_(
|
||||
"iBoot PDU relay id must be an integer."))
|
||||
raise exception.InvalidParameterValue(
|
||||
_("iBoot PDU relay id must be an integer."))
|
||||
|
||||
port = info.get('iboot_port', 9100)
|
||||
try:
|
||||
port = int(port)
|
||||
except ValueError:
|
||||
raise exception.InvalidParameterValue(_(
|
||||
"iBoot PDU port must be an integer."))
|
||||
raise exception.InvalidParameterValue(
|
||||
_("iBoot PDU port must be an integer."))
|
||||
|
||||
return {
|
||||
'address': address,
|
||||
'username': username,
|
||||
'password': password,
|
||||
'port': port,
|
||||
'relay_id': relay_id,
|
||||
'uuid': node.uuid,
|
||||
}
|
||||
'address': address,
|
||||
'username': username,
|
||||
'password': password,
|
||||
'port': port,
|
||||
'relay_id': relay_id,
|
||||
'uuid': node.uuid,
|
||||
}
|
||||
|
||||
|
||||
def _get_connection(driver_info):
|
||||
@ -112,7 +112,7 @@ def _power_status(driver_info):
|
||||
except IndexError:
|
||||
LOG.warning(_LW("Cannot get power status for node '%(node)s' at relay "
|
||||
"'%(relay)s'. iBoot get_relays() failed."),
|
||||
{'node': driver_info['uuid'], 'relay': relay_id})
|
||||
{'node': driver_info['uuid'], 'relay': relay_id})
|
||||
return states.ERROR
|
||||
|
||||
if status:
|
||||
@ -182,8 +182,8 @@ class IBootPower(base.PowerInterface):
|
||||
elif pstate == states.POWER_OFF:
|
||||
_switch(driver_info, False)
|
||||
else:
|
||||
raise exception.InvalidParameterValue(_(
|
||||
"set_power_state called with invalid "
|
||||
raise exception.InvalidParameterValue(
|
||||
_("set_power_state called with invalid "
|
||||
"power state %s.") % pstate)
|
||||
|
||||
state = _power_status(driver_info)
|
||||
|
@ -85,8 +85,8 @@ COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
|
||||
DEFAULT_BOOT_MODE = 'LEGACY'
|
||||
|
||||
BOOT_MODE_GENERIC_TO_ILO = {'bios': 'legacy', 'uefi': 'uefi'}
|
||||
BOOT_MODE_ILO_TO_GENERIC = dict((v, k)
|
||||
for (k, v) in BOOT_MODE_GENERIC_TO_ILO.items())
|
||||
BOOT_MODE_ILO_TO_GENERIC = dict(
|
||||
(v, k) for (k, v) in BOOT_MODE_GENERIC_TO_ILO.items())
|
||||
|
||||
|
||||
def parse_driver_info(node):
|
||||
@ -113,8 +113,8 @@ def parse_driver_info(node):
|
||||
missing_info.append(param)
|
||||
if missing_info:
|
||||
raise exception.MissingParameterValue(_(
|
||||
"The following required iLO parameters are missing from the "
|
||||
"node's driver_info: %s") % missing_info)
|
||||
"The following required iLO parameters are missing from the "
|
||||
"node's driver_info: %s") % missing_info)
|
||||
|
||||
not_integers = []
|
||||
for param in OPTIONAL_PROPERTIES:
|
||||
@ -134,8 +134,8 @@ def parse_driver_info(node):
|
||||
|
||||
if not_integers:
|
||||
raise exception.InvalidParameterValue(_(
|
||||
"The following iLO parameters from the node's driver_info "
|
||||
"should be integers: %s") % not_integers)
|
||||
"The following iLO parameters from the node's driver_info "
|
||||
"should be integers: %s") % not_integers)
|
||||
|
||||
return d_info
|
||||
|
||||
@ -287,12 +287,12 @@ def attach_vmedia(node, device, url):
|
||||
|
||||
try:
|
||||
ilo_object.insert_virtual_media(url, device=device)
|
||||
ilo_object.set_vm_status(device=device, boot_option='CONNECT',
|
||||
write_protect='YES')
|
||||
ilo_object.set_vm_status(
|
||||
device=device, boot_option='CONNECT', write_protect='YES')
|
||||
except ilo_error.IloError as ilo_exception:
|
||||
operation = _("Inserting virtual media %s") % device
|
||||
raise exception.IloOperationError(operation=operation,
|
||||
error=ilo_exception)
|
||||
raise exception.IloOperationError(
|
||||
operation=operation, error=ilo_exception)
|
||||
|
||||
LOG.info(_LI("Attached virtual media %s successfully."), device)
|
||||
|
||||
@ -318,11 +318,11 @@ def set_boot_mode(node, boot_mode):
|
||||
|
||||
try:
|
||||
ilo_object.set_pending_boot_mode(
|
||||
BOOT_MODE_GENERIC_TO_ILO[boot_mode].upper())
|
||||
BOOT_MODE_GENERIC_TO_ILO[boot_mode].upper())
|
||||
except ilo_error.IloError as ilo_exception:
|
||||
operation = _("Setting %s as boot mode") % boot_mode
|
||||
raise exception.IloOperationError(operation=operation,
|
||||
error=ilo_exception)
|
||||
raise exception.IloOperationError(
|
||||
operation=operation, error=ilo_exception)
|
||||
|
||||
LOG.info(_LI("Node %(uuid)s boot mode is set to %(boot_mode)s."),
|
||||
{'uuid': node.uuid, 'boot_mode': boot_mode})
|
||||
@ -367,15 +367,15 @@ def update_boot_mode(task):
|
||||
try:
|
||||
boot_mode = 'uefi'
|
||||
ilo_object.set_pending_boot_mode(
|
||||
BOOT_MODE_GENERIC_TO_ILO[boot_mode].upper())
|
||||
BOOT_MODE_GENERIC_TO_ILO[boot_mode].upper())
|
||||
except ilo_error.IloError as ilo_exception:
|
||||
operation = _("Setting %s as boot mode") % boot_mode
|
||||
raise exception.IloOperationError(operation=operation,
|
||||
error=ilo_exception)
|
||||
|
||||
LOG.debug("Node %(uuid)s boot mode is being set to %(boot_mode)s "
|
||||
"as pending boot mode is unknown.",
|
||||
{'uuid': node.uuid, 'boot_mode': boot_mode})
|
||||
"as pending boot mode is unknown.",
|
||||
{'uuid': node.uuid, 'boot_mode': boot_mode})
|
||||
|
||||
instance_info = node.instance_info
|
||||
instance_info['deploy_boot_mode'] = boot_mode
|
||||
@ -418,11 +418,11 @@ def setup_vmedia_for_boot(task, boot_iso, parameters=None):
|
||||
container = CONF.ilo.swift_ilo_container
|
||||
object_name = parsed_ref.path
|
||||
timeout = CONF.ilo.swift_object_expiry_timeout
|
||||
boot_iso_url = swift_api.get_temp_url(container, object_name,
|
||||
timeout)
|
||||
boot_iso_url = swift_api.get_temp_url(
|
||||
container, object_name, timeout)
|
||||
elif service_utils.is_glance_image(boot_iso):
|
||||
boot_iso_url = images.get_temp_url_for_glance_image(task.context,
|
||||
boot_iso)
|
||||
boot_iso_url = (
|
||||
images.get_temp_url_for_glance_image(task.context, boot_iso))
|
||||
|
||||
attach_vmedia(task.node, 'CDROM', boot_iso_url or boot_iso)
|
||||
|
||||
@ -507,7 +507,7 @@ def set_secure_boot_mode(task, flag):
|
||||
"""
|
||||
|
||||
operation = (_("Setting secure boot to %(flag)s for node %(node)s.") %
|
||||
{'flag': flag, 'node': task.node.uuid})
|
||||
{'flag': flag, 'node': task.node.uuid})
|
||||
ilo_object = get_ilo_object(task.node)
|
||||
|
||||
try:
|
||||
|
@ -54,11 +54,11 @@ clean_opts = [
|
||||
help='Priority for erase devices clean step. If unset, '
|
||||
'it defaults to 10. If set to 0, the step will be '
|
||||
'disabled and will not run during cleaning.')
|
||||
]
|
||||
]
|
||||
|
||||
REQUIRED_PROPERTIES = {
|
||||
'ilo_deploy_iso': _("UUID (from Glance) of the deployment ISO. "
|
||||
"Required.")
|
||||
"Required.")
|
||||
}
|
||||
COMMON_PROPERTIES = REQUIRED_PROPERTIES
|
||||
|
||||
@ -131,8 +131,8 @@ def _get_boot_iso(task, root_uuid):
|
||||
|
||||
image_href = deploy_info['image_source']
|
||||
image_properties = (
|
||||
images.get_image_properties(task.context,
|
||||
image_href, ['boot_iso', 'kernel_id', 'ramdisk_id']))
|
||||
images.get_image_properties(
|
||||
task.context, image_href, ['boot_iso', 'kernel_id', 'ramdisk_id']))
|
||||
|
||||
boot_iso_uuid = image_properties.get('boot_iso')
|
||||
kernel_href = (task.node.instance_info.get('kernel') or
|
||||
@ -448,7 +448,7 @@ class IloVirtualMediaIscsiDeploy(base.DeployInterface):
|
||||
# user to tear down such a Node.
|
||||
except exception.IloOperationNotSupported:
|
||||
LOG.warn(_LW('Secure boot mode is not supported for node %s'),
|
||||
task.node.uuid)
|
||||
task.node.uuid)
|
||||
return states.DELETED
|
||||
|
||||
def prepare(self, task):
|
||||
@ -528,7 +528,7 @@ class IloVirtualMediaAgentDeploy(base.DeployInterface):
|
||||
# Node.
|
||||
except exception.IloOperationNotSupported:
|
||||
LOG.warn(_LW('Secure boot mode is not supported for node %s'),
|
||||
task.node.uuid)
|
||||
task.node.uuid)
|
||||
return states.DELETED
|
||||
|
||||
def prepare(self, task):
|
||||
|
@ -58,8 +58,8 @@ def _create_ports_if_not_exist(node, macs):
|
||||
"%(node)s"), {'address': mac, 'node': node.uuid})
|
||||
except exception.MACAlreadyExists:
|
||||
LOG.warn(_LW("Port already exists for MAC address %(address)s "
|
||||
"for node %(node)s"), {'address': mac,
|
||||
'node': node.uuid})
|
||||
"for node %(node)s"),
|
||||
{'address': mac, 'node': node.uuid})
|
||||
|
||||
|
||||
def _get_essential_properties(node, ilo_object):
|
||||
@ -104,9 +104,9 @@ def _validate(node, data):
|
||||
raise exception.HardwareInspectionFailure(error=error)
|
||||
else:
|
||||
error = (_("Essential properties are expected to be in dictionary "
|
||||
"format, received %(properties)s from node "
|
||||
"%(node)s.") % {"properties": data['properties'],
|
||||
'node': node.uuid})
|
||||
"format, received %(properties)s from node "
|
||||
"%(node)s.") % {"properties": data['properties'],
|
||||
'node': node.uuid})
|
||||
raise exception.HardwareInspectionFailure(error=error)
|
||||
else:
|
||||
error = (_("The node %s didn't return 'properties' as the key with "
|
||||
@ -117,7 +117,7 @@ def _validate(node, data):
|
||||
if not isinstance(data['macs'], dict):
|
||||
error = (_("Node %(node)s didn't return MACs %(macs)s "
|
||||
"in dictionary format.")
|
||||
% {"macs": data['macs'], 'node': node.uuid})
|
||||
% {"macs": data['macs'], 'node': node.uuid})
|
||||
raise exception.HardwareInspectionFailure(error=error)
|
||||
else:
|
||||
error = (_("The node %s didn't return 'macs' as the key with "
|
||||
@ -170,17 +170,17 @@ def _update_capabilities(node, new_capabilities):
|
||||
# occur in malformed capabilities like:
|
||||
# properties/capabilities='boot_mode:bios,boot_option'.
|
||||
msg = (_("Node %(node)s has invalid capabilities string "
|
||||
"%(capabilities)s, unable to modify the node "
|
||||
"properties['capabilities'] string")
|
||||
% {'node': node.uuid, 'capabilities': node_capabilities})
|
||||
"%(capabilities)s, unable to modify the node "
|
||||
"properties['capabilities'] string")
|
||||
% {'node': node.uuid, 'capabilities': node_capabilities})
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
if isinstance(new_capabilities, dict):
|
||||
cap_dict.update(new_capabilities)
|
||||
else:
|
||||
msg = (_("The expected format of capabilities from inspection "
|
||||
"is dictionary while node %(node)s returned "
|
||||
"%(capabilities)s.") % {'node': node.uuid,
|
||||
'capabilities': new_capabilities})
|
||||
"%(capabilities)s.")
|
||||
% {'node': node.uuid, 'capabilities': new_capabilities})
|
||||
raise exception.HardwareInspectionFailure(error=msg)
|
||||
return ','.join(['%(key)s:%(value)s' % {'key': key, 'value': value}
|
||||
for key, value in six.iteritems(cap_dict)])
|
||||
@ -201,7 +201,7 @@ def _get_capabilities(node, ilo_object):
|
||||
capabilities = ilo_object.get_server_capabilities()
|
||||
except ilo_error.IloError:
|
||||
LOG.debug(("Node %s did not return any additional capabilities."),
|
||||
node.uuid)
|
||||
node.uuid)
|
||||
|
||||
return capabilities
|
||||
|
||||
@ -247,7 +247,7 @@ class IloInspect(base.InspectInterface):
|
||||
state = task.driver.power.get_power_state(task)
|
||||
except exception.IloOperationError as ilo_exception:
|
||||
operation = (_("Inspecting hardware (get_power_state) on %s")
|
||||
% task.node.uuid)
|
||||
% task.node.uuid)
|
||||
raise exception.IloOperationError(operation=operation,
|
||||
error=ilo_exception)
|
||||
if state != states.POWER_ON:
|
||||
@ -287,13 +287,13 @@ class IloInspect(base.InspectInterface):
|
||||
|
||||
LOG.debug(("Node properties for %(node)s are updated as "
|
||||
"%(properties)s"),
|
||||
{'properties': inspected_properties,
|
||||
'node': task.node.uuid})
|
||||
{'properties': inspected_properties,
|
||||
'node': task.node.uuid})
|
||||
|
||||
LOG.info(_LI("Node %s inspected."), task.node.uuid)
|
||||
if power_turned_on:
|
||||
conductor_utils.node_power_action(task, states.POWER_OFF)
|
||||
LOG.info(_LI("The node %s was powered on for inspection. "
|
||||
"Powered off the node as inspection completed."),
|
||||
task.node.uuid)
|
||||
task.node.uuid)
|
||||
return states.MANAGEABLE
|
||||
|
@ -33,12 +33,13 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
ilo_error = importutils.try_import('proliantutils.exception')
|
||||
|
||||
BOOT_DEVICE_MAPPING_TO_ILO = {boot_devices.PXE: 'NETWORK',
|
||||
boot_devices.DISK: 'HDD',
|
||||
boot_devices.CDROM: 'CDROM'
|
||||
}
|
||||
BOOT_DEVICE_ILO_TO_GENERIC = {v: k
|
||||
for k, v in BOOT_DEVICE_MAPPING_TO_ILO.items()}
|
||||
BOOT_DEVICE_MAPPING_TO_ILO = {
|
||||
boot_devices.PXE: 'NETWORK',
|
||||
boot_devices.DISK: 'HDD',
|
||||
boot_devices.CDROM: 'CDROM'
|
||||
}
|
||||
BOOT_DEVICE_ILO_TO_GENERIC = {
|
||||
v: k for k, v in BOOT_DEVICE_MAPPING_TO_ILO.items()}
|
||||
|
||||
MANAGEMENT_PROPERTIES = ilo_common.REQUIRED_PROPERTIES.copy()
|
||||
MANAGEMENT_PROPERTIES.update(ilo_common.CLEAN_PROPERTIES)
|
||||
@ -88,8 +89,9 @@ def _execute_ilo_clean_step(node, step, *args, **kwargs):
|
||||
# The specified clean step is not present in the proliantutils
|
||||
# package. Raise exception to update the proliantutils package
|
||||
# to newer version.
|
||||
raise exception.NodeCleaningFailure(_("Clean step '%s' not "
|
||||
"found. 'proliantutils' package needs to be updated.") % step)
|
||||
raise exception.NodeCleaningFailure(
|
||||
_("Clean step '%s' not found. 'proliantutils' package needs to be "
|
||||
"updated.") % step)
|
||||
try:
|
||||
clean_step(*args, **kwargs)
|
||||
except ilo_error.IloCommandNotSupportedError:
|
||||
@ -97,11 +99,12 @@ def _execute_ilo_clean_step(node, step, *args, **kwargs):
|
||||
# Log the failure and continue with cleaning.
|
||||
LOG.warn(_LW("'%(step)s' clean step is not supported on node "
|
||||
"%(uuid)s. Skipping the clean step."),
|
||||
{'step': step, 'uuid': node.uuid})
|
||||
{'step': step, 'uuid': node.uuid})
|
||||
except ilo_error.IloError as ilo_exception:
|
||||
raise exception.NodeCleaningFailure(_("Clean step %(step)s failed "
|
||||
"on node %(node)s with error: %(err)s") %
|
||||
{'node': node.uuid, 'step': step, 'err': ilo_exception})
|
||||
raise exception.NodeCleaningFailure(_(
|
||||
"Clean step %(step)s failed "
|
||||
"on node %(node)s with error: %(err)s") %
|
||||
{'node': node.uuid, 'step': step, 'err': ilo_exception})
|
||||
|
||||
|
||||
class IloManagement(base.ManagementInterface):
|
||||
@ -211,7 +214,7 @@ class IloManagement(base.ManagementInterface):
|
||||
error=ilo_exception)
|
||||
|
||||
LOG.debug("Node %(uuid)s set to boot from %(device)s.",
|
||||
{'uuid': task.node.uuid, 'device': device})
|
||||
{'uuid': task.node.uuid, 'device': device})
|
||||
|
||||
def get_sensors_data(self, task):
|
||||
"""Get sensors data.
|
||||
|
@ -157,14 +157,14 @@ def _set_power_state(task, target_state):
|
||||
target_state = states.POWER_ON
|
||||
else:
|
||||
msg = _("_set_power_state called with invalid power state "
|
||||
"'%s'") % target_state
|
||||
"'%s'") % target_state
|
||||
raise exception.InvalidParameterValue(msg)
|
||||
|
||||
except ilo_error.IloError as ilo_exception:
|
||||
LOG.error(_LE("iLO set_power_state failed to set state to %(tstate)s "
|
||||
" for node %(node_id)s with error: %(error)s"),
|
||||
{'tstate': target_state, 'node_id': node.uuid,
|
||||
'error': ilo_exception})
|
||||
{'tstate': target_state, 'node_id': node.uuid,
|
||||
'error': ilo_exception})
|
||||
operation = _('iLO set_power_state')
|
||||
raise exception.IloOperationError(operation=operation,
|
||||
error=ilo_exception)
|
||||
@ -176,7 +176,7 @@ def _set_power_state(task, target_state):
|
||||
timeout = (CONF.ilo.power_wait) * (CONF.ilo.power_retry)
|
||||
LOG.error(_LE("iLO failed to change state to %(tstate)s "
|
||||
"within %(timeout)s sec"),
|
||||
{'tstate': target_state, 'timeout': timeout})
|
||||
{'tstate': target_state, 'timeout': timeout})
|
||||
raise exception.PowerStateFailure(pstate=target_state)
|
||||
|
||||
|
||||
|
@ -204,7 +204,7 @@ class ImageCache(object):
|
||||
amount = self._clean_up_ensure_cache_size(survived, amount)
|
||||
if amount is not None and amount > 0:
|
||||
LOG.warn(_LW("Cache clean up was unable to reclaim %(required)d "
|
||||
"MiB of disk space, still %(left)d MiB required"),
|
||||
"MiB of disk space, still %(left)d MiB required"),
|
||||
{'required': amount_copy / 1024 / 1024,
|
||||
'left': amount / 1024 / 1024})
|
||||
|
||||
@ -264,7 +264,7 @@ class ImageCache(object):
|
||||
total_size = sum(os.path.getsize(f)
|
||||
for f in total_listing)
|
||||
while listing and (total_size > self._cache_size or
|
||||
(amount is not None and amount > 0)):
|
||||
(amount is not None and amount > 0)):
|
||||
file_name, last_used, stat = listing.pop()
|
||||
try:
|
||||
os.unlink(file_name)
|
||||
@ -373,7 +373,7 @@ def clean_up_caches(ctx, directory, images_info):
|
||||
after trying all the caches.
|
||||
"""
|
||||
total_size = sum(images.download_size(ctx, uuid)
|
||||
for (uuid, path) in images_info)
|
||||
for (uuid, path) in images_info)
|
||||
_clean_up_caches(directory, total_size)
|
||||
|
||||
|
||||
|
@ -58,7 +58,7 @@ opts = [
|
||||
'sent to a server. There is a risk with some hardware '
|
||||
'that setting this too low may cause the BMC to crash. '
|
||||
'Recommended setting is 5 seconds.'),
|
||||
]
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(opts, group='ipmi')
|
||||
@ -139,8 +139,8 @@ def _power_on(driver_info):
|
||||
"following error: %(error)s")
|
||||
try:
|
||||
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
wait = CONF.ipmi.retry_timeout
|
||||
ret = ipmicmd.set_power('on', wait)
|
||||
except pyghmi_exception.IpmiException as e:
|
||||
@ -169,8 +169,8 @@ def _power_off(driver_info):
|
||||
"following error: %(error)s")
|
||||
try:
|
||||
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
wait = CONF.ipmi.retry_timeout
|
||||
ret = ipmicmd.set_power('off', wait)
|
||||
except pyghmi_exception.IpmiException as e:
|
||||
@ -201,8 +201,8 @@ def _reboot(driver_info):
|
||||
"following error: %(error)s")
|
||||
try:
|
||||
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
wait = CONF.ipmi.retry_timeout
|
||||
ret = ipmicmd.set_power('boot', wait)
|
||||
except pyghmi_exception.IpmiException as e:
|
||||
@ -228,8 +228,8 @@ def _power_status(driver_info):
|
||||
|
||||
try:
|
||||
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
ret = ipmicmd.get_power()
|
||||
except pyghmi_exception.IpmiException as e:
|
||||
LOG.warning(_LW("IPMI get power state failed for node %(node_id)s "
|
||||
@ -261,15 +261,15 @@ def _get_sensors_data(driver_info):
|
||||
"""
|
||||
try:
|
||||
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
ret = ipmicmd.get_sensor_data()
|
||||
except Exception as e:
|
||||
LOG.error(_LE("IPMI get sensor data failed for node %(node_id)s "
|
||||
"with the following error: %(error)s"),
|
||||
{'node_id': driver_info['uuid'], 'error': e})
|
||||
{'node_id': driver_info['uuid'], 'error': e})
|
||||
raise exception.FailedToGetSensorData(
|
||||
node=driver_info['uuid'], error=e)
|
||||
node=driver_info['uuid'], error=e)
|
||||
|
||||
if not ret:
|
||||
return {}
|
||||
@ -279,13 +279,14 @@ def _get_sensors_data(driver_info):
|
||||
# ignore the sensor data which has no sensor reading value
|
||||
if not reading.value:
|
||||
continue
|
||||
sensors_data.setdefault(reading.type,
|
||||
sensors_data.setdefault(
|
||||
reading.type,
|
||||
{})[reading.name] = {
|
||||
'Sensor Reading': '%s %s' % (reading.value, reading.units),
|
||||
'Sensor ID': reading.name,
|
||||
'States': str(reading.states),
|
||||
'Units': reading.units,
|
||||
'Health': str(reading.health)}
|
||||
'Sensor Reading': '%s %s' % (reading.value, reading.units),
|
||||
'Sensor ID': reading.name,
|
||||
'States': str(reading.states),
|
||||
'Units': reading.units,
|
||||
'Health': str(reading.health)}
|
||||
|
||||
return sensors_data
|
||||
|
||||
@ -340,9 +341,9 @@ class NativeIPMIPower(base.PowerInterface):
|
||||
elif pstate == states.POWER_OFF:
|
||||
_power_off(driver_info)
|
||||
else:
|
||||
raise exception.InvalidParameterValue(_(
|
||||
"set_power_state called with an invalid power state: %s."
|
||||
) % pstate)
|
||||
raise exception.InvalidParameterValue(
|
||||
_("set_power_state called with an invalid power state: %s."
|
||||
) % pstate)
|
||||
|
||||
@task_manager.require_exclusive_lock
|
||||
def reboot(self, task):
|
||||
@ -411,8 +412,8 @@ class NativeIPMIManagement(base.ManagementInterface):
|
||||
driver_info = _parse_driver_info(task.node)
|
||||
try:
|
||||
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
bootdev = _BOOT_DEVICES_MAP[device]
|
||||
ipmicmd.set_bootdev(bootdev, persist=persistent)
|
||||
except pyghmi_exception.IpmiException as e:
|
||||
@ -442,8 +443,8 @@ class NativeIPMIManagement(base.ManagementInterface):
|
||||
response = {'boot_device': None}
|
||||
try:
|
||||
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
userid=driver_info['username'],
|
||||
password=driver_info['password'])
|
||||
ret = ipmicmd.get_bootdev()
|
||||
# FIXME(lucasagomes): pyghmi doesn't seem to handle errors
|
||||
# consistently, for some errors it raises an exception
|
||||
@ -514,7 +515,7 @@ class NativeIPMIShellinaboxConsole(base.ConsoleInterface):
|
||||
|
||||
path = _console_pwfile_path(driver_info['uuid'])
|
||||
pw_file = console_utils.make_persistent_password_file(
|
||||
path, driver_info['password'])
|
||||
path, driver_info['password'])
|
||||
|
||||
console_cmd = ("/:%(uid)s:%(gid)s:HOME:pyghmicons %(bmc)s"
|
||||
" %(user)s"
|
||||
|
@ -298,18 +298,18 @@ def _parse_driver_info(node):
|
||||
{'priv_level': priv_level, 'valid_levels': valid_priv_lvls})
|
||||
|
||||
return {
|
||||
'address': address,
|
||||
'username': username,
|
||||
'password': password,
|
||||
'port': port,
|
||||
'uuid': node.uuid,
|
||||
'priv_level': priv_level,
|
||||
'local_address': local_address,
|
||||
'transit_channel': transit_channel,
|
||||
'transit_address': transit_address,
|
||||
'target_channel': target_channel,
|
||||
'target_address': target_address
|
||||
}
|
||||
'address': address,
|
||||
'username': username,
|
||||
'password': password,
|
||||
'port': port,
|
||||
'uuid': node.uuid,
|
||||
'priv_level': priv_level,
|
||||
'local_address': local_address,
|
||||
'transit_channel': transit_channel,
|
||||
'transit_address': transit_address,
|
||||
'target_channel': target_channel,
|
||||
'target_address': target_address
|
||||
}
|
||||
|
||||
|
||||
def _exec_ipmitool(driver_info, command):
|
||||
@ -359,7 +359,7 @@ def _exec_ipmitool(driver_info, command):
|
||||
# NOTE(deva): ensure that no communications are sent to a BMC more
|
||||
# often than once every min_command_interval seconds.
|
||||
time_till_next_poll = CONF.ipmi.min_command_interval - (
|
||||
time.time() - LAST_CMD_TIME.get(driver_info['address'], 0))
|
||||
time.time() - LAST_CMD_TIME.get(driver_info['address'], 0))
|
||||
if time_till_next_poll > 0:
|
||||
time.sleep(time_till_next_poll)
|
||||
# Resetting the list that will be utilized so the password arguments
|
||||
@ -368,9 +368,7 @@ def _exec_ipmitool(driver_info, command):
|
||||
# 'ipmitool' command will prompt password if there is no '-f'
|
||||
# option, we set it to '\0' to write a password file to support
|
||||
# empty password
|
||||
with _make_password_file(
|
||||
driver_info['password'] or '\0'
|
||||
) as pw_file:
|
||||
with _make_password_file(driver_info['password'] or '\0') as pw_file:
|
||||
cmd_args.append('-f')
|
||||
cmd_args.append(pw_file)
|
||||
cmd_args.extend(command.split(" "))
|
||||
@ -384,23 +382,18 @@ def _exec_ipmitool(driver_info, command):
|
||||
if ((time.time() > end_time) or
|
||||
(num_tries == 0) or
|
||||
not err_list):
|
||||
LOG.error(_LE('IPMI Error while attempting '
|
||||
'"%(cmd)s" for node %(node)s. '
|
||||
'Error: %(error)s'),
|
||||
{
|
||||
'node': driver_info['uuid'],
|
||||
'cmd': e.cmd,
|
||||
'error': e
|
||||
LOG.error(_LE('IPMI Error while attempting "%(cmd)s"'
|
||||
'for node %(node)s. Error: %(error)s'), {
|
||||
'node': driver_info['uuid'],
|
||||
'cmd': e.cmd, 'error': e
|
||||
})
|
||||
else:
|
||||
ctxt.reraise = False
|
||||
LOG.warning(_LW('IPMI Error encountered, retrying '
|
||||
'"%(cmd)s" for node %(node)s. '
|
||||
'Error: %(error)s'),
|
||||
{
|
||||
'node': driver_info['uuid'],
|
||||
'cmd': e.cmd,
|
||||
'error': e
|
||||
'"%(cmd)s" for node %(node)s. '
|
||||
'Error: %(error)s'), {
|
||||
'node': driver_info['uuid'],
|
||||
'cmd': e.cmd, 'error': e
|
||||
})
|
||||
finally:
|
||||
LAST_CMD_TIME[driver_info['address']] = time.time()
|
||||
@ -454,7 +447,7 @@ def _set_and_wait(target_state, driver_info):
|
||||
exception.IPMIFailure):
|
||||
# Log failures but keep trying
|
||||
LOG.warning(_LW("IPMI power %(state)s failed for node %(node)s."),
|
||||
{'state': state_name, 'node': driver_info['uuid']})
|
||||
{'state': state_name, 'node': driver_info['uuid']})
|
||||
finally:
|
||||
mutable['iter'] += 1
|
||||
|
||||
@ -466,8 +459,8 @@ def _set_and_wait(target_state, driver_info):
|
||||
# Stop if the next loop would exceed maximum retry_timeout
|
||||
LOG.error(_LE('IPMI power %(state)s timed out after '
|
||||
'%(tries)s retries on node %(node_id)s.'),
|
||||
{'state': state_name, 'tries': mutable['iter'],
|
||||
'node_id': driver_info['uuid']})
|
||||
{'state': state_name, 'tries': mutable['iter'],
|
||||
'node_id': driver_info['uuid']})
|
||||
mutable['power'] = states.ERROR
|
||||
raise loopingcall.LoopingCallDone()
|
||||
else:
|
||||
@ -559,7 +552,8 @@ def _get_sensor_type(node, sensor_data_dict):
|
||||
raise exception.FailedToParseSensorData(
|
||||
node=node.uuid,
|
||||
error=(_("parse ipmi sensor data failed, unknown sensor type"
|
||||
" data: %(sensors_data)s"), {'sensors_data': sensor_data_dict}))
|
||||
" data: %(sensors_data)s"),
|
||||
{'sensors_data': sensor_data_dict}))
|
||||
|
||||
|
||||
def _parse_ipmi_sensors_data(node, sensors_data):
|
||||
@ -589,7 +583,8 @@ def _parse_ipmi_sensors_data(node, sensors_data):
|
||||
|
||||
# ignore the sensors which has no current 'Sensor Reading' data
|
||||
if 'Sensor Reading' in sensor_data_dict:
|
||||
sensors_data_dict.setdefault(sensor_type,
|
||||
sensors_data_dict.setdefault(
|
||||
sensor_type,
|
||||
{})[sensor_data_dict['Sensor ID']] = sensor_data_dict
|
||||
|
||||
# get nothing, no valid sensor data
|
||||
@ -597,7 +592,8 @@ def _parse_ipmi_sensors_data(node, sensors_data):
|
||||
raise exception.FailedToParseSensorData(
|
||||
node=node.uuid,
|
||||
error=(_("parse ipmi sensor data failed, get nothing with input"
|
||||
" data: %(sensors_data)s") % {'sensors_data': sensors_data}))
|
||||
" data: %(sensors_data)s")
|
||||
% {'sensors_data': sensors_data}))
|
||||
return sensors_data_dict
|
||||
|
||||
|
||||
@ -659,9 +655,9 @@ class IPMIPower(base.PowerInterface):
|
||||
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
|
||||
except OSError:
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to locate usable ipmitool command in "
|
||||
"the system path when checking ipmitool version"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to locate usable ipmitool command in "
|
||||
"the system path when checking ipmitool version"))
|
||||
_check_temp_dir()
|
||||
|
||||
def get_properties(self):
|
||||
@ -716,8 +712,9 @@ class IPMIPower(base.PowerInterface):
|
||||
elif pstate == states.POWER_OFF:
|
||||
state = _power_off(driver_info)
|
||||
else:
|
||||
raise exception.InvalidParameterValue(_("set_power_state called "
|
||||
"with invalid power state %s.") % pstate)
|
||||
raise exception.InvalidParameterValue(
|
||||
_("set_power_state called "
|
||||
"with invalid power state %s.") % pstate)
|
||||
|
||||
if state != pstate:
|
||||
raise exception.PowerStateFailure(pstate=pstate)
|
||||
@ -751,9 +748,9 @@ class IPMIManagement(base.ManagementInterface):
|
||||
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
|
||||
except OSError:
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to locate usable ipmitool command in "
|
||||
"the system path when checking ipmitool version"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to locate usable ipmitool command in "
|
||||
"the system path when checking ipmitool version"))
|
||||
_check_temp_dir()
|
||||
|
||||
def validate(self, task):
|
||||
@ -997,9 +994,9 @@ class IPMIShellinaboxConsole(base.ConsoleInterface):
|
||||
_check_option_support(['timing', 'single_bridge', 'dual_bridge'])
|
||||
except OSError:
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to locate usable ipmitool command in "
|
||||
"the system path when checking ipmitool version"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to locate usable ipmitool command in "
|
||||
"the system path when checking ipmitool version"))
|
||||
_check_temp_dir()
|
||||
|
||||
def get_properties(self):
|
||||
@ -1036,7 +1033,7 @@ class IPMIShellinaboxConsole(base.ConsoleInterface):
|
||||
|
||||
path = _console_pwfile_path(driver_info['uuid'])
|
||||
pw_file = console_utils.make_persistent_password_file(
|
||||
path, driver_info['password'])
|
||||
path, driver_info['password'])
|
||||
|
||||
ipmi_cmd = ("/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s"
|
||||
" -I lanplus -U %(user)s -f %(pwfile)s"
|
||||
@ -1074,7 +1071,7 @@ class IPMIShellinaboxConsole(base.ConsoleInterface):
|
||||
console_utils.stop_shellinabox_console(driver_info['uuid'])
|
||||
finally:
|
||||
utils.unlink_without_raise(
|
||||
_console_pwfile_path(driver_info['uuid']))
|
||||
_console_pwfile_path(driver_info['uuid']))
|
||||
|
||||
def get_console(self, task):
|
||||
"""Get the type and connection information about the console."""
|
||||
|
@ -61,7 +61,7 @@ def _get_sensors_data(task):
|
||||
"with the following error: %(error)s"),
|
||||
{'node_id': task.node.uuid, 'error': e})
|
||||
raise exception.FailedToGetSensorData(
|
||||
node=task.node.uuid, error=e)
|
||||
node=task.node.uuid, error=e)
|
||||
|
||||
sensors_data = {}
|
||||
for sdr in sensor:
|
||||
|
@ -71,7 +71,7 @@ pxe_opts = [
|
||||
cfg.StrOpt('disk_devices',
|
||||
default='cciss/c0d0,sda,hda,vda',
|
||||
help='The disk devices to scan while doing the deploy.'),
|
||||
]
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(pxe_opts, group='pxe')
|
||||
@ -160,11 +160,11 @@ def parse_instance_info(node):
|
||||
|
||||
preserve_ephemeral = info.get('preserve_ephemeral', False)
|
||||
try:
|
||||
i_info['preserve_ephemeral'] = strutils.bool_from_string(
|
||||
preserve_ephemeral, strict=True)
|
||||
i_info['preserve_ephemeral'] = (
|
||||
strutils.bool_from_string(preserve_ephemeral, strict=True))
|
||||
except ValueError as e:
|
||||
raise exception.InvalidParameterValue(err_msg_invalid %
|
||||
{'param': 'preserve_ephemeral', 'reason': e})
|
||||
raise exception.InvalidParameterValue(
|
||||
err_msg_invalid % {'param': 'preserve_ephemeral', 'reason': e})
|
||||
return i_info
|
||||
|
||||
|
||||
@ -237,12 +237,12 @@ def get_deploy_info(node, **kwargs):
|
||||
raise exception.InvalidParameterValue(_("Deploy key does not match"))
|
||||
|
||||
params = {
|
||||
'address': kwargs.get('address'),
|
||||
'port': kwargs.get('port', '3260'),
|
||||
'iqn': kwargs.get('iqn'),
|
||||
'lun': kwargs.get('lun', '1'),
|
||||
'image_path': _get_image_file_path(node.uuid),
|
||||
'node_uuid': node.uuid}
|
||||
'address': kwargs.get('address'),
|
||||
'port': kwargs.get('port', '3260'),
|
||||
'iqn': kwargs.get('iqn'),
|
||||
'lun': kwargs.get('lun', '1'),
|
||||
'image_path': _get_image_file_path(node.uuid),
|
||||
'node_uuid': node.uuid}
|
||||
|
||||
is_whole_disk_image = node.driver_internal_info['is_whole_disk_image']
|
||||
if not is_whole_disk_image:
|
||||
@ -255,9 +255,9 @@ def get_deploy_info(node, **kwargs):
|
||||
|
||||
missing = [key for key in params if params[key] is None]
|
||||
if missing:
|
||||
raise exception.MissingParameterValue(_(
|
||||
"Parameters %s were not passed to ironic"
|
||||
" for deploy.") % missing)
|
||||
raise exception.MissingParameterValue(
|
||||
_("Parameters %s were not passed to ironic"
|
||||
" for deploy.") % missing)
|
||||
|
||||
if is_whole_disk_image:
|
||||
return params
|
||||
@ -325,7 +325,7 @@ def continue_deploy(task, **kwargs):
|
||||
except Exception as e:
|
||||
msg = (_('Deploy failed for instance %(instance)s. '
|
||||
'Error: %(error)s') %
|
||||
{'instance': node.instance_uuid, 'error': e})
|
||||
{'instance': node.instance_uuid, 'error': e})
|
||||
_fail_deploy(task, msg)
|
||||
|
||||
root_uuid_or_disk_id = uuid_dict_returned.get(
|
||||
@ -536,8 +536,9 @@ def validate(task):
|
||||
"""
|
||||
node = task.node
|
||||
if not driver_utils.get_node_mac_addresses(task):
|
||||
raise exception.MissingParameterValue(_("Node %s does not have "
|
||||
"any port associated with it.") % node.uuid)
|
||||
raise exception.MissingParameterValue(
|
||||
_("Node %s does not have any port associated with it.")
|
||||
% node.uuid)
|
||||
|
||||
try:
|
||||
# TODO(lucasagomes): Validate the format of the URL
|
||||
|
@ -50,12 +50,12 @@ from ironic.openstack.common import fileutils
|
||||
pxe_opts = [
|
||||
cfg.StrOpt('pxe_config_template',
|
||||
default=paths.basedir_def(
|
||||
'drivers/modules/pxe_config.template'),
|
||||
'drivers/modules/pxe_config.template'),
|
||||
help='On ironic-conductor node, template file for PXE '
|
||||
'configuration.'),
|
||||
cfg.StrOpt('uefi_pxe_config_template',
|
||||
default=paths.basedir_def(
|
||||
'drivers/modules/elilo_efi_pxe_config.template'),
|
||||
'drivers/modules/elilo_efi_pxe_config.template'),
|
||||
help='On ironic-conductor node, template file for PXE '
|
||||
'configuration for UEFI boot loader.'),
|
||||
cfg.StrOpt('tftp_server',
|
||||
@ -77,20 +77,20 @@ pxe_opts = [
|
||||
default='elilo.efi',
|
||||
help='Bootfile DHCP parameter for UEFI boot mode.'),
|
||||
cfg.StrOpt('http_url',
|
||||
help='ironic-conductor node\'s HTTP server URL. '
|
||||
'Example: http://192.1.2.3:8080'),
|
||||
help='ironic-conductor node\'s HTTP server URL. '
|
||||
'Example: http://192.1.2.3:8080'),
|
||||
cfg.StrOpt('http_root',
|
||||
default='/httpboot',
|
||||
help='ironic-conductor node\'s HTTP root path.'),
|
||||
default='/httpboot',
|
||||
help='ironic-conductor node\'s HTTP root path.'),
|
||||
cfg.BoolOpt('ipxe_enabled',
|
||||
default=False,
|
||||
help='Enable iPXE boot.'),
|
||||
cfg.StrOpt('ipxe_boot_script',
|
||||
default=paths.basedir_def(
|
||||
'drivers/modules/boot.ipxe'),
|
||||
'drivers/modules/boot.ipxe'),
|
||||
help='On ironic-conductor node, the path to the main iPXE '
|
||||
'script file.'),
|
||||
]
|
||||
]
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -222,10 +222,10 @@ def validate_boot_option_for_uefi(node):
|
||||
LOG.error(_LE("Whole disk image with netboot is not supported in UEFI "
|
||||
"boot mode."))
|
||||
raise exception.InvalidParameterValue(_(
|
||||
"Conflict: Whole disk image being used for deploy, but "
|
||||
"cannot be used with node %(node_uuid)s configured to use "
|
||||
"UEFI boot with netboot option") %
|
||||
{'node_uuid': node.uuid})
|
||||
"Conflict: Whole disk image being used for deploy, but "
|
||||
"cannot be used with node %(node_uuid)s configured to use "
|
||||
"UEFI boot with netboot option") %
|
||||
{'node_uuid': node.uuid})
|
||||
|
||||
|
||||
@image_cache.cleanup(priority=25)
|
||||
@ -390,8 +390,9 @@ class PXEDeploy(base.DeployInterface):
|
||||
# TODO(deva): optimize this if rerun on existing files
|
||||
if CONF.pxe.ipxe_enabled:
|
||||
# Copy the iPXE boot script to HTTP root directory
|
||||
bootfile_path = os.path.join(CONF.pxe.http_root,
|
||||
os.path.basename(CONF.pxe.ipxe_boot_script))
|
||||
bootfile_path = os.path.join(
|
||||
CONF.pxe.http_root,
|
||||
os.path.basename(CONF.pxe.ipxe_boot_script))
|
||||
shutil.copyfile(CONF.pxe.ipxe_boot_script, bootfile_path)
|
||||
pxe_info = _get_image_info(node, task.context)
|
||||
pxe_options = _build_pxe_config_options(node, pxe_info,
|
||||
@ -417,19 +418,21 @@ class PXEDeploy(base.DeployInterface):
|
||||
# but let's guard, just in case it's missing
|
||||
iwdi = node.driver_internal_info.get('is_whole_disk_image')
|
||||
try:
|
||||
root_uuid_or_disk_id = node.driver_internal_info[
|
||||
'root_uuid_or_disk_id']
|
||||
root_uuid_or_disk_id = (
|
||||
node.driver_internal_info['root_uuid_or_disk_id'])
|
||||
except KeyError:
|
||||
if not iwdi:
|
||||
LOG.warn(_LW("The UUID for the root partition can't be "
|
||||
"found, unable to switch the pxe config from "
|
||||
"deployment mode to service (boot) mode for node "
|
||||
"%(node)s"), {"node": node.uuid})
|
||||
LOG.warn(_LW(
|
||||
"The UUID for the root partition can't be "
|
||||
"found, unable to switch the pxe config from "
|
||||
"deployment mode to service (boot) mode for node "
|
||||
"%(node)s"), {"node": node.uuid})
|
||||
else:
|
||||
LOG.warn(_LW("The disk id for the whole disk image can't "
|
||||
"be found, unable to switch the pxe config from "
|
||||
"deployment mode to service (boot) mode for "
|
||||
"node %(node)s"), {"node": node.uuid})
|
||||
LOG.warn(_LW(
|
||||
"The disk id for the whole disk image can't "
|
||||
"be found, unable to switch the pxe config from "
|
||||
"deployment mode to service (boot) mode for "
|
||||
"node %(node)s"), {"node": node.uuid})
|
||||
else:
|
||||
pxe_config_path = pxe_utils.get_pxe_config_file_path(
|
||||
node.uuid)
|
||||
|
@ -651,10 +651,10 @@ class ShellinaboxConsole(base.ConsoleInterface):
|
||||
chassis_ip = urlparse.urlparse(driver_info['api_endpoint']).netloc
|
||||
|
||||
seamicro_cmd = ("/:%(uid)s:%(gid)s:HOME:telnet %(chassis)s %(port)s"
|
||||
% {'uid': os.getuid(),
|
||||
'gid': os.getgid(),
|
||||
'chassis': chassis_ip,
|
||||
'port': telnet_port})
|
||||
% {'uid': os.getuid(),
|
||||
'gid': os.getgid(),
|
||||
'chassis': chassis_ip,
|
||||
'port': telnet_port})
|
||||
|
||||
console_utils.start_shellinabox_console(driver_info['uuid'],
|
||||
driver_info['port'],
|
||||
|
@ -56,7 +56,7 @@ opts = [
|
||||
cfg.IntOpt('power_timeout',
|
||||
default=10,
|
||||
help='Seconds to wait for power action to be completed')
|
||||
]
|
||||
]
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -155,12 +155,12 @@ class SNMPClient(object):
|
||||
if error_indication:
|
||||
# SNMP engine-level error.
|
||||
raise exception.SNMPFailure(operation="GET",
|
||||
error=error_indication)
|
||||
error=error_indication)
|
||||
|
||||
if error_status:
|
||||
# SNMP PDU error.
|
||||
raise exception.SNMPFailure(operation="GET",
|
||||
error=error_status.prettyPrint())
|
||||
error=error_status.prettyPrint())
|
||||
|
||||
# We only expect a single value back
|
||||
name, val = var_binds[0]
|
||||
@ -185,12 +185,12 @@ class SNMPClient(object):
|
||||
if error_indication:
|
||||
# SNMP engine-level error.
|
||||
raise exception.SNMPFailure(operation="SET",
|
||||
error=error_indication)
|
||||
error=error_indication)
|
||||
|
||||
if error_status:
|
||||
# SNMP PDU error.
|
||||
raise exception.SNMPFailure(operation="SET",
|
||||
error=error_status.prettyPrint())
|
||||
error=error_status.prettyPrint())
|
||||
|
||||
|
||||
def _get_client(snmp_info):
|
||||
@ -522,11 +522,11 @@ class SNMPDriverEatonPower(SNMPDriverBase):
|
||||
|
||||
# A dictionary of supported drivers keyed by snmp_driver attribute
|
||||
DRIVER_CLASSES = {
|
||||
'apc': SNMPDriverAPC,
|
||||
'aten': SNMPDriverAten,
|
||||
'cyberpower': SNMPDriverCyberPower,
|
||||
'eatonpower': SNMPDriverEatonPower,
|
||||
'teltronix': SNMPDriverTeltronix
|
||||
'apc': SNMPDriverAPC,
|
||||
'aten': SNMPDriverAten,
|
||||
'cyberpower': SNMPDriverCyberPower,
|
||||
'eatonpower': SNMPDriverEatonPower,
|
||||
'teltronix': SNMPDriverTeltronix
|
||||
}
|
||||
|
||||
|
||||
|
@ -116,14 +116,17 @@ def _get_command_sets(virt_type):
|
||||
'reboot_cmd': 'controlvm {_NodeName_} reset',
|
||||
'list_all': "list vms|awk -F'\"' '{print $2}'",
|
||||
'list_running': 'list runningvms',
|
||||
'get_node_macs': ("showvminfo --machinereadable {_NodeName_} | "
|
||||
'get_node_macs': (
|
||||
"showvminfo --machinereadable {_NodeName_} | "
|
||||
"awk -F '\"' '/macaddress/{print $2}'"),
|
||||
'set_boot_device': ('{_BaseCmd_} modifyvm {_NodeName_} '
|
||||
'set_boot_device': (
|
||||
'{_BaseCmd_} modifyvm {_NodeName_} '
|
||||
'--boot1 {_BootDevice_}'),
|
||||
'get_boot_device': ("{_BaseCmd_} showvminfo "
|
||||
'get_boot_device': (
|
||||
"{_BaseCmd_} showvminfo "
|
||||
"--machinereadable {_NodeName_} | "
|
||||
"awk -F '\"' '/boot1/{print $2}'"),
|
||||
}
|
||||
}
|
||||
elif virt_type == 'vmware':
|
||||
return {
|
||||
'base_cmd': 'LC_ALL=C /bin/vim-cmd',
|
||||
@ -156,14 +159,18 @@ def _get_command_sets(virt_type):
|
||||
'stop_cmd': 'destroy {_NodeName_}',
|
||||
'reboot_cmd': 'reset {_NodeName_}',
|
||||
'list_all': "list --all | tail -n +2 | awk -F\" \" '{print $2}'",
|
||||
'list_running': ("list --all|grep running | "
|
||||
'list_running': (
|
||||
"list --all|grep running | "
|
||||
"awk -v qc='\"' -F\" \" '{print qc$2qc}'"),
|
||||
'get_node_macs': ("dumpxml {_NodeName_} | "
|
||||
'get_node_macs': (
|
||||
"dumpxml {_NodeName_} | "
|
||||
"awk -F \"'\" '/mac address/{print $2}'| tr -d ':'"),
|
||||
'set_boot_device': ("EDITOR=\"sed -i '/<boot \(dev\|order\)=*\>/d;"
|
||||
'set_boot_device': (
|
||||
"EDITOR=\"sed -i '/<boot \(dev\|order\)=*\>/d;"
|
||||
"/<\/os>/i\<boot dev=\\\"{_BootDevice_}\\\"/>'\" "
|
||||
"{_BaseCmd_} edit {_NodeName_}"),
|
||||
'get_boot_device': ("{_BaseCmd_} dumpxml {_NodeName_} | "
|
||||
'get_boot_device': (
|
||||
"{_BaseCmd_} dumpxml {_NodeName_} | "
|
||||
"awk '/boot dev=/ { gsub( \".*dev=\" Q, \"\" ); "
|
||||
"gsub( Q \".*\", \"\" ); print; }' "
|
||||
"Q=\"'\" RS=\"[<>]\" | "
|
||||
@ -182,14 +189,17 @@ def _get_command_sets(virt_type):
|
||||
'reboot_cmd': 'reset {_NodeName_}',
|
||||
'list_all': "list -a -o name |tail -n +2",
|
||||
'list_running': 'list -o name |tail -n +2',
|
||||
'get_node_macs': ("list -j -i \"{_NodeName_}\" | "
|
||||
'get_node_macs': (
|
||||
"list -j -i \"{_NodeName_}\" | "
|
||||
"awk -F'\"' '/\"mac\":/ {print $4}' | "
|
||||
"sed 's/\\(..\\)\\(..\\)\\(..\\)\\(..\\)\\(..\\)\\(..\\)/"
|
||||
"\\1:\\2:\\3:\\4:\\5\\6/' | "
|
||||
"tr '[:upper:]' '[:lower:]'"),
|
||||
'set_boot_device': ("{_BaseCmd_} set {_NodeName_} "
|
||||
'set_boot_device': (
|
||||
"{_BaseCmd_} set {_NodeName_} "
|
||||
"--device-bootorder \"{_BootDevice_}\""),
|
||||
'get_boot_device': ("{_BaseCmd_} list -i {_NodeName_} | "
|
||||
'get_boot_device': (
|
||||
"{_BaseCmd_} list -i {_NodeName_} | "
|
||||
"awk '/^Boot order:/ {print $3}'"),
|
||||
}
|
||||
else:
|
||||
@ -302,12 +312,12 @@ def _parse_driver_info(node):
|
||||
|
||||
# NOTE(deva): we map 'address' from API to 'host' for common utils
|
||||
res = {
|
||||
'host': address,
|
||||
'username': username,
|
||||
'port': port,
|
||||
'virt_type': virt_type,
|
||||
'uuid': node.uuid
|
||||
}
|
||||
'host': address,
|
||||
'username': username,
|
||||
'port': port,
|
||||
'virt_type': virt_type,
|
||||
'uuid': node.uuid
|
||||
}
|
||||
|
||||
cmd_set = _get_command_sets(virt_type)
|
||||
res['cmd_set'] = cmd_set
|
||||
@ -505,8 +515,9 @@ class SSHPower(base.PowerInterface):
|
||||
node.
|
||||
"""
|
||||
if not driver_utils.get_node_mac_addresses(task):
|
||||
raise exception.MissingParameterValue(_("Node %s does not have "
|
||||
"any port associated with it.") % task.node.uuid)
|
||||
raise exception.MissingParameterValue(
|
||||
_("Node %s does not have any port associated with it."
|
||||
) % task.node.uuid)
|
||||
try:
|
||||
_get_connection(task.node)
|
||||
except exception.SSHConnectFailed as e:
|
||||
@ -558,8 +569,9 @@ class SSHPower(base.PowerInterface):
|
||||
elif pstate == states.POWER_OFF:
|
||||
state = _power_off(ssh_obj, driver_info)
|
||||
else:
|
||||
raise exception.InvalidParameterValue(_("set_power_state called "
|
||||
"with invalid power state %s.") % pstate)
|
||||
raise exception.InvalidParameterValue(
|
||||
_("set_power_state called with invalid power state %s."
|
||||
) % pstate)
|
||||
|
||||
if state != pstate:
|
||||
raise exception.PowerStateFailure(pstate=pstate)
|
||||
|
@ -32,18 +32,18 @@ if pyremotevbox:
|
||||
from pyremotevbox import vbox as virtualbox
|
||||
|
||||
IRONIC_TO_VIRTUALBOX_DEVICE_MAPPING = {
|
||||
boot_devices.PXE: 'Network',
|
||||
boot_devices.DISK: 'HardDisk',
|
||||
boot_devices.CDROM: 'DVD',
|
||||
}
|
||||
VIRTUALBOX_TO_IRONIC_DEVICE_MAPPING = {v: k
|
||||
for k, v in IRONIC_TO_VIRTUALBOX_DEVICE_MAPPING.items()}
|
||||
boot_devices.PXE: 'Network',
|
||||
boot_devices.DISK: 'HardDisk',
|
||||
boot_devices.CDROM: 'DVD',
|
||||
}
|
||||
VIRTUALBOX_TO_IRONIC_DEVICE_MAPPING = {
|
||||
v: k for k, v in IRONIC_TO_VIRTUALBOX_DEVICE_MAPPING.items()}
|
||||
|
||||
VIRTUALBOX_TO_IRONIC_POWER_MAPPING = {
|
||||
'PoweredOff': states.POWER_OFF,
|
||||
'Running': states.POWER_ON,
|
||||
'Error': states.ERROR
|
||||
}
|
||||
'PoweredOff': states.POWER_OFF,
|
||||
'Running': states.POWER_ON,
|
||||
'Error': states.ERROR
|
||||
}
|
||||
|
||||
opts = [
|
||||
cfg.IntOpt('port',
|
||||
@ -109,7 +109,7 @@ def _parse_driver_info(node):
|
||||
|
||||
if missing_params:
|
||||
msg = (_("The following parameters are missing in driver_info: %s") %
|
||||
', '.join(missing_params))
|
||||
', '.join(missing_params))
|
||||
raise exception.MissingParameterValue(msg)
|
||||
|
||||
for param in OPTIONAL_PROPERTIES:
|
||||
@ -335,8 +335,8 @@ class VirtualBoxManagement(base.ManagementInterface):
|
||||
try:
|
||||
boot_dev = IRONIC_TO_VIRTUALBOX_DEVICE_MAPPING[device]
|
||||
except KeyError:
|
||||
raise exception.InvalidParameterValue(_(
|
||||
"Invalid boot device %s specified.") % device)
|
||||
raise exception.InvalidParameterValue(
|
||||
_("Invalid boot device %s specified.") % device)
|
||||
|
||||
try:
|
||||
_run_virtualbox_method(task.node, 'set_boot_device',
|
||||
|
@ -99,8 +99,8 @@ class PXEAndIPMINativeDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('pyghmi'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pyghmi library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pyghmi library"))
|
||||
self.power = ipminative.NativeIPMIPower()
|
||||
self.console = ipminative.NativeIPMIShellinaboxConsole()
|
||||
self.deploy = pxe.PXEDeploy()
|
||||
@ -124,8 +124,8 @@ class PXEAndSeaMicroDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('seamicroclient'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import seamicroclient library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import seamicroclient library"))
|
||||
self.power = seamicro.Power()
|
||||
self.deploy = pxe.PXEDeploy()
|
||||
self.management = seamicro.Management()
|
||||
@ -152,8 +152,8 @@ class PXEAndIBootDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('iboot'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import iboot library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import iboot library"))
|
||||
self.power = iboot.IBootPower()
|
||||
self.deploy = pxe.PXEDeploy()
|
||||
self.vendor = pxe.VendorPassthru()
|
||||
@ -172,8 +172,8 @@ class PXEAndIloDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('proliantutils'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import proliantutils library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import proliantutils library"))
|
||||
self.power = ilo_power.IloPower()
|
||||
self.deploy = ilo_deploy.IloPXEDeploy()
|
||||
self.vendor = ilo_deploy.IloPXEVendorPassthru()
|
||||
@ -218,8 +218,8 @@ class PXEAndIRMCDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('scciclient'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import python-scciclient library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import python-scciclient library"))
|
||||
self.power = irmc_power.IRMCPower()
|
||||
self.console = ipmitool.IPMIShellinaboxConsole()
|
||||
self.deploy = pxe.PXEDeploy()
|
||||
@ -242,8 +242,8 @@ class PXEAndVirtualBoxDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('pyremotevbox'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pyremotevbox library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pyremotevbox library"))
|
||||
self.power = virtualbox.VirtualBoxPower()
|
||||
self.deploy = pxe.PXEDeploy()
|
||||
self.management = virtualbox.VirtualBoxManagement()
|
||||
@ -262,8 +262,8 @@ class PXEAndAMTDriver(base.BaseDriver):
|
||||
def __init__(self):
|
||||
if not importutils.try_import('pywsman'):
|
||||
raise exception.DriverLoadError(
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pywsman library"))
|
||||
driver=self.__class__.__name__,
|
||||
reason=_("Unable to import pywsman library"))
|
||||
self.power = amt_power.AMTPower()
|
||||
self.deploy = pxe.PXEDeploy()
|
||||
self.management = amt_management.AMTManagement()
|
||||
|
@ -142,7 +142,7 @@ def get_node_capability(node, capability):
|
||||
return parts[1]
|
||||
else:
|
||||
LOG.warn(_LW("Ignoring malformed capability '%s'. "
|
||||
"Format should be 'key:val'."), node_capability)
|
||||
"Format should be 'key:val'."), node_capability)
|
||||
|
||||
|
||||
def add_node_capability(task, capability, value):
|
||||
|
@ -195,7 +195,7 @@ class IronicObject(object):
|
||||
fields = {
|
||||
'created_at': obj_utils.datetime_or_str_or_none,
|
||||
'updated_at': obj_utils.datetime_or_str_or_none,
|
||||
}
|
||||
}
|
||||
obj_extra_fields = []
|
||||
|
||||
_attr_created_at_from_primitive = obj_utils.dt_deserializer
|
||||
@ -454,8 +454,8 @@ class IronicObject(object):
|
||||
|
||||
def as_dict(self):
|
||||
return dict((k, getattr(self, k))
|
||||
for k in self.fields
|
||||
if hasattr(self, k))
|
||||
for k in self.fields
|
||||
if hasattr(self, k))
|
||||
|
||||
|
||||
class ObjectListBase(object):
|
||||
@ -468,7 +468,7 @@ class ObjectListBase(object):
|
||||
"""
|
||||
fields = {
|
||||
'objects': list,
|
||||
}
|
||||
}
|
||||
|
||||
# This is a dictionary of my_version:child_version mappings so that
|
||||
# we can support backleveling our contents based on the version
|
||||
|
@ -25,10 +25,10 @@ class Conductor(base.IronicObject):
|
||||
dbapi = db_api.get_instance()
|
||||
|
||||
fields = {
|
||||
'id': int,
|
||||
'drivers': utils.list_or_none,
|
||||
'hostname': str,
|
||||
}
|
||||
'id': int,
|
||||
'drivers': utils.list_or_none,
|
||||
'hostname': str,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _from_db_object(conductor, db_obj):
|
||||
@ -53,7 +53,7 @@ class Conductor(base.IronicObject):
|
||||
def save(self, context):
|
||||
"""Save is not supported by Conductor objects."""
|
||||
raise NotImplementedError(
|
||||
_('Cannot update a conductor record directly.'))
|
||||
_('Cannot update a conductor record directly.'))
|
||||
|
||||
@base.remotable
|
||||
def refresh(self, context=None):
|
||||
|
@ -41,55 +41,55 @@ class Node(base.IronicObject):
|
||||
dbapi = db_api.get_instance()
|
||||
|
||||
fields = {
|
||||
'id': int,
|
||||
'id': int,
|
||||
|
||||
'uuid': obj_utils.str_or_none,
|
||||
'name': obj_utils.str_or_none,
|
||||
'chassis_id': obj_utils.int_or_none,
|
||||
'instance_uuid': obj_utils.str_or_none,
|
||||
'uuid': obj_utils.str_or_none,
|
||||
'name': obj_utils.str_or_none,
|
||||
'chassis_id': obj_utils.int_or_none,
|
||||
'instance_uuid': obj_utils.str_or_none,
|
||||
|
||||
'driver': obj_utils.str_or_none,
|
||||
'driver_info': obj_utils.dict_or_none,
|
||||
'driver_internal_info': obj_utils.dict_or_none,
|
||||
'driver': obj_utils.str_or_none,
|
||||
'driver_info': obj_utils.dict_or_none,
|
||||
'driver_internal_info': obj_utils.dict_or_none,
|
||||
|
||||
# A clean step dictionary, indicating the current clean step
|
||||
# being executed, or None, indicating cleaning is not in progress
|
||||
# or has not yet started.
|
||||
'clean_step': obj_utils.dict_or_none,
|
||||
# A clean step dictionary, indicating the current clean step
|
||||
# being executed, or None, indicating cleaning is not in progress
|
||||
# or has not yet started.
|
||||
'clean_step': obj_utils.dict_or_none,
|
||||
|
||||
'instance_info': obj_utils.dict_or_none,
|
||||
'properties': obj_utils.dict_or_none,
|
||||
'reservation': obj_utils.str_or_none,
|
||||
# a reference to the id of the conductor service, not its hostname,
|
||||
# that has most recently performed some action which could require
|
||||
# local state to be maintained (eg, built a PXE config)
|
||||
'conductor_affinity': obj_utils.int_or_none,
|
||||
'instance_info': obj_utils.dict_or_none,
|
||||
'properties': obj_utils.dict_or_none,
|
||||
'reservation': obj_utils.str_or_none,
|
||||
# a reference to the id of the conductor service, not its hostname,
|
||||
# that has most recently performed some action which could require
|
||||
# local state to be maintained (eg, built a PXE config)
|
||||
'conductor_affinity': obj_utils.int_or_none,
|
||||
|
||||
# One of states.POWER_ON|POWER_OFF|NOSTATE|ERROR
|
||||
'power_state': obj_utils.str_or_none,
|
||||
# One of states.POWER_ON|POWER_OFF|NOSTATE|ERROR
|
||||
'power_state': obj_utils.str_or_none,
|
||||
|
||||
# Set to one of states.POWER_ON|POWER_OFF when a power operation
|
||||
# starts, and set to NOSTATE when the operation finishes
|
||||
# (successfully or unsuccessfully).
|
||||
'target_power_state': obj_utils.str_or_none,
|
||||
# Set to one of states.POWER_ON|POWER_OFF when a power operation
|
||||
# starts, and set to NOSTATE when the operation finishes
|
||||
# (successfully or unsuccessfully).
|
||||
'target_power_state': obj_utils.str_or_none,
|
||||
|
||||
'provision_state': obj_utils.str_or_none,
|
||||
'provision_updated_at': obj_utils.datetime_or_str_or_none,
|
||||
'target_provision_state': obj_utils.str_or_none,
|
||||
'provision_state': obj_utils.str_or_none,
|
||||
'provision_updated_at': obj_utils.datetime_or_str_or_none,
|
||||
'target_provision_state': obj_utils.str_or_none,
|
||||
|
||||
'maintenance': bool,
|
||||
'maintenance_reason': obj_utils.str_or_none,
|
||||
'console_enabled': bool,
|
||||
'maintenance': bool,
|
||||
'maintenance_reason': obj_utils.str_or_none,
|
||||
'console_enabled': bool,
|
||||
|
||||
# Any error from the most recent (last) asynchronous transaction
|
||||
# that started but failed to finish.
|
||||
'last_error': obj_utils.str_or_none,
|
||||
# Any error from the most recent (last) asynchronous transaction
|
||||
# that started but failed to finish.
|
||||
'last_error': obj_utils.str_or_none,
|
||||
|
||||
'inspection_finished_at': obj_utils.datetime_or_str_or_none,
|
||||
'inspection_started_at': obj_utils.datetime_or_str_or_none,
|
||||
'inspection_finished_at': obj_utils.datetime_or_str_or_none,
|
||||
'inspection_started_at': obj_utils.datetime_or_str_or_none,
|
||||
|
||||
'extra': obj_utils.dict_or_none,
|
||||
}
|
||||
'extra': obj_utils.dict_or_none,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _from_db_object(node, db_node):
|
||||
|
@ -38,11 +38,11 @@ class TestACL(base.FunctionalTest):
|
||||
|
||||
def get_json(self, path, expect_errors=False, headers=None, q=[], **param):
|
||||
return super(TestACL, self).get_json(path,
|
||||
expect_errors=expect_errors,
|
||||
headers=headers,
|
||||
q=q,
|
||||
extra_environ=self.environ,
|
||||
**param)
|
||||
expect_errors=expect_errors,
|
||||
headers=headers,
|
||||
q=q,
|
||||
extra_environ=self.environ,
|
||||
**param)
|
||||
|
||||
def _make_app(self):
|
||||
cfg.CONF.set_override('cache', 'fake.cache',
|
||||
@ -58,8 +58,8 @@ class TestACL(base.FunctionalTest):
|
||||
autospec=True) as mock_get_node:
|
||||
mock_get_node.return_value = self.fake_db_node
|
||||
|
||||
response = self.get_json(self.node_path,
|
||||
headers={'X-Auth-Token': utils.ADMIN_TOKEN})
|
||||
response = self.get_json(
|
||||
self.node_path, headers={'X-Auth-Token': utils.ADMIN_TOKEN})
|
||||
|
||||
self.assertEqual(self.fake_db_node['uuid'], response['uuid'])
|
||||
mock_get_node.assert_called_once_with(self.fake_db_node['uuid'])
|
||||
|
@ -63,26 +63,28 @@ class TestVersion(base.FunctionalTest):
|
||||
|
||||
def test_parse_headers_ok(self):
|
||||
version = cbase.Version.parse_headers(
|
||||
{cbase.Version.string: '123.456'}, mock.ANY, mock.ANY)
|
||||
{cbase.Version.string: '123.456'}, mock.ANY, mock.ANY)
|
||||
self.assertEqual((123, 456), version)
|
||||
|
||||
def test_parse_headers_latest(self):
|
||||
for s in ['latest', 'LATEST']:
|
||||
version = cbase.Version.parse_headers(
|
||||
{cbase.Version.string: s}, mock.ANY, '1.9')
|
||||
{cbase.Version.string: s}, mock.ANY, '1.9')
|
||||
self.assertEqual((1, 9), version)
|
||||
|
||||
def test_parse_headers_bad_length(self):
|
||||
self.assertRaises(exc.HTTPNotAcceptable,
|
||||
cbase.Version.parse_headers,
|
||||
{cbase.Version.string: '1'},
|
||||
mock.ANY,
|
||||
mock.ANY)
|
||||
self.assertRaises(exc.HTTPNotAcceptable,
|
||||
cbase.Version.parse_headers,
|
||||
{cbase.Version.string: '1.2.3'},
|
||||
mock.ANY,
|
||||
mock.ANY)
|
||||
self.assertRaises(
|
||||
exc.HTTPNotAcceptable,
|
||||
cbase.Version.parse_headers,
|
||||
{cbase.Version.string: '1'},
|
||||
mock.ANY,
|
||||
mock.ANY)
|
||||
self.assertRaises(
|
||||
exc.HTTPNotAcceptable,
|
||||
cbase.Version.parse_headers,
|
||||
{cbase.Version.string: '1.2.3'},
|
||||
mock.ANY,
|
||||
mock.ANY)
|
||||
|
||||
def test_parse_no_header(self):
|
||||
# this asserts that the minimum version string of "1.1" is applied
|
||||
|
@ -297,7 +297,7 @@ class TestTrustedCallHook(base.FunctionalTest):
|
||||
reqstate.set_context()
|
||||
trusted_call_hook = hooks.TrustedCallHook()
|
||||
self.assertRaises(webob_exc.HTTPForbidden,
|
||||
trusted_call_hook.before, reqstate)
|
||||
trusted_call_hook.before, reqstate)
|
||||
|
||||
def test_trusted_call_hook_admin(self):
|
||||
headers = fake_headers(admin=True)
|
||||
|
@ -200,7 +200,7 @@ class TestPatch(api_base.FunctionalTest):
|
||||
result = self.get_json('/chassis/%s' % chassis.uuid)
|
||||
self.assertEqual(description, result['description'])
|
||||
return_updated_at = timeutils.parse_isotime(
|
||||
result['updated_at']).replace(tzinfo=None)
|
||||
result['updated_at']).replace(tzinfo=None)
|
||||
self.assertEqual(test_time, return_updated_at)
|
||||
|
||||
def test_replace_multi(self):
|
||||
@ -261,9 +261,10 @@ class TestPatch(api_base.FunctionalTest):
|
||||
|
||||
def test_remove_non_existent_property_fail(self):
|
||||
chassis = obj_utils.get_test_chassis(self.context)
|
||||
response = self.patch_json('/chassis/%s' % chassis.uuid,
|
||||
[{'path': '/extra/non-existent', 'op': 'remove'}],
|
||||
expect_errors=True)
|
||||
response = self.patch_json(
|
||||
'/chassis/%s' % chassis.uuid,
|
||||
[{'path': '/extra/non-existent', 'op': 'remove'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
@ -330,7 +331,7 @@ class TestPost(api_base.FunctionalTest):
|
||||
self.assertEqual(cdict['uuid'], result['uuid'])
|
||||
self.assertFalse(result['updated_at'])
|
||||
return_created_at = timeutils.parse_isotime(
|
||||
result['created_at']).replace(tzinfo=None)
|
||||
result['created_at']).replace(tzinfo=None)
|
||||
self.assertEqual(test_time, return_created_at)
|
||||
# Check location header
|
||||
self.assertIsNotNone(response.location)
|
||||
@ -363,7 +364,7 @@ class TestPost(api_base.FunctionalTest):
|
||||
ndict = apiutils.node_post_data()
|
||||
ndict['chassis_uuid'] = chassis.uuid
|
||||
response = self.post_json('/chassis/nodes', ndict,
|
||||
expect_errors=True)
|
||||
expect_errors=True)
|
||||
self.assertEqual(403, response.status_int)
|
||||
|
||||
def test_create_chassis_valid_extra(self):
|
||||
|
@ -158,7 +158,7 @@ class TestListDrivers(base.FunctionalTest):
|
||||
data = self.get_json(path)
|
||||
self.assertEqual(return_value, data)
|
||||
get_methods_mock.assert_called_once_with(mock.ANY, self.d1,
|
||||
topic=mock.ANY)
|
||||
topic=mock.ANY)
|
||||
|
||||
# Now let's test the cache: Reset the mock
|
||||
get_methods_mock.reset_mock()
|
||||
@ -212,7 +212,7 @@ class TestDriverProperties(base.FunctionalTest):
|
||||
driver._DRIVER_PROPERTIES = {}
|
||||
driver_name = 'bad_driver'
|
||||
mock_topic.side_effect = exception.DriverNotFound(
|
||||
driver_name=driver_name)
|
||||
driver_name=driver_name)
|
||||
mock_properties.return_value = {'prop1': 'Property 1. Required.'}
|
||||
ret = self.get_json('/drivers/%s/properties' % driver_name,
|
||||
expect_errors=True)
|
||||
@ -227,7 +227,7 @@ class TestDriverProperties(base.FunctionalTest):
|
||||
driver_name = 'driver'
|
||||
mock_topic.return_value = 'driver_topic'
|
||||
mock_properties.side_effect = exception.DriverNotFound(
|
||||
driver_name=driver_name)
|
||||
driver_name=driver_name)
|
||||
ret = self.get_json('/drivers/%s/properties' % driver_name,
|
||||
expect_errors=True)
|
||||
self.assertEqual(404, ret.status_int)
|
||||
|
@ -73,8 +73,8 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
associated_nodes = []
|
||||
for id in range(4):
|
||||
node = obj_utils.create_test_node(
|
||||
self.context, uuid=uuidutils.generate_uuid(),
|
||||
instance_uuid=uuidutils.generate_uuid())
|
||||
self.context, uuid=uuidutils.generate_uuid(),
|
||||
instance_uuid=uuidutils.generate_uuid())
|
||||
associated_nodes.append(node.uuid)
|
||||
return {'associated': associated_nodes,
|
||||
'unassociated': unassociated_nodes}
|
||||
@ -86,8 +86,8 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
def test_one(self):
|
||||
node = obj_utils.create_test_node(self.context,
|
||||
chassis_id=self.chassis.id)
|
||||
data = self.get_json('/nodes',
|
||||
headers={api_base.Version.string: str(api_v1.MAX_VER)})
|
||||
data = self.get_json(
|
||||
'/nodes', headers={api_base.Version.string: str(api_v1.MAX_VER)})
|
||||
self.assertIn('instance_uuid', data['nodes'][0])
|
||||
self.assertIn('maintenance', data['nodes'][0])
|
||||
self.assertIn('power_state', data['nodes'][0])
|
||||
@ -112,8 +112,9 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
def test_get_one(self):
|
||||
node = obj_utils.create_test_node(self.context,
|
||||
chassis_id=self.chassis.id)
|
||||
data = self.get_json('/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: str(api_v1.MAX_VER)})
|
||||
data = self.get_json(
|
||||
'/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: str(api_v1.MAX_VER)})
|
||||
self.assertEqual(node.uuid, data['uuid'])
|
||||
self.assertIn('driver', data)
|
||||
self.assertIn('driver_info', data)
|
||||
@ -134,8 +135,9 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
def test_detail(self):
|
||||
node = obj_utils.create_test_node(self.context,
|
||||
chassis_id=self.chassis.id)
|
||||
data = self.get_json('/nodes/detail',
|
||||
headers={api_base.Version.string: str(api_v1.MAX_VER)})
|
||||
data = self.get_json(
|
||||
'/nodes/detail',
|
||||
headers={api_base.Version.string: str(api_v1.MAX_VER)})
|
||||
self.assertEqual(node.uuid, data['nodes'][0]["uuid"])
|
||||
self.assertIn('name', data['nodes'][0])
|
||||
self.assertIn('driver', data['nodes'][0])
|
||||
@ -164,49 +166,52 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
node = obj_utils.create_test_node(self.context,
|
||||
provision_state=states.AVAILABLE)
|
||||
|
||||
data = self.get_json('/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: str(api_v1.MIN_VER)})
|
||||
data = self.get_json(
|
||||
'/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: str(api_v1.MIN_VER)})
|
||||
self.assertEqual(states.NOSTATE, data['provision_state'])
|
||||
|
||||
data = self.get_json('/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: "1.2"})
|
||||
headers={api_base.Version.string: "1.2"})
|
||||
self.assertEqual(states.AVAILABLE, data['provision_state'])
|
||||
|
||||
def test_hide_fields_in_newer_versions_driver_internal(self):
|
||||
node = obj_utils.create_test_node(self.context,
|
||||
driver_internal_info={"foo": "bar"})
|
||||
data = self.get_json('/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: str(api_v1.MIN_VER)})
|
||||
data = self.get_json(
|
||||
'/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: str(api_v1.MIN_VER)})
|
||||
self.assertNotIn('driver_internal_info', data)
|
||||
|
||||
data = self.get_json('/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: "1.3"})
|
||||
headers={api_base.Version.string: "1.3"})
|
||||
self.assertEqual({"foo": "bar"}, data['driver_internal_info'])
|
||||
|
||||
def test_hide_fields_in_newer_versions_name(self):
|
||||
node = obj_utils.create_test_node(self.context,
|
||||
name="fish")
|
||||
data = self.get_json('/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: "1.4"})
|
||||
headers={api_base.Version.string: "1.4"})
|
||||
self.assertNotIn('name', data)
|
||||
|
||||
data = self.get_json('/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
self.assertEqual('fish', data['name'])
|
||||
|
||||
def test_hide_fields_in_newer_versions_inspection(self):
|
||||
some_time = datetime.datetime(2015, 3, 18, 19, 20)
|
||||
node = obj_utils.create_test_node(self.context,
|
||||
inspection_started_at=some_time)
|
||||
data = self.get_json('/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: str(api_v1.MIN_VER)})
|
||||
data = self.get_json(
|
||||
'/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: str(api_v1.MIN_VER)})
|
||||
self.assertNotIn('inspection_finished_at', data)
|
||||
self.assertNotIn('inspection_started_at', data)
|
||||
|
||||
data = self.get_json('/nodes/%s' % node.uuid,
|
||||
headers={api_base.Version.string: "1.6"})
|
||||
headers={api_base.Version.string: "1.6"})
|
||||
started = timeutils.parse_isotime(
|
||||
data['inspection_started_at']).replace(tzinfo=None)
|
||||
data['inspection_started_at']).replace(tzinfo=None)
|
||||
self.assertEqual(some_time, started)
|
||||
self.assertEqual(None, data['inspection_finished_at'])
|
||||
|
||||
@ -233,7 +238,7 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
nodes.append(node.uuid)
|
||||
node_names.append(name)
|
||||
data = self.get_json('/nodes',
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
names = [n['name'] for n in data['nodes']]
|
||||
self.assertEqual(len(nodes), len(data['nodes']))
|
||||
self.assertEqual(sorted(node_names), sorted(names))
|
||||
@ -348,7 +353,7 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
self.assertEqual(fake_state, data['provision_state'])
|
||||
self.assertEqual(fake_state, data['target_provision_state'])
|
||||
prov_up_at = timeutils.parse_isotime(
|
||||
data['provision_updated_at']).replace(tzinfo=None)
|
||||
data['provision_updated_at']).replace(tzinfo=None)
|
||||
self.assertEqual(test_time, prov_up_at)
|
||||
self.assertEqual(fake_error, data['last_error'])
|
||||
self.assertFalse(data['console_enabled'])
|
||||
@ -368,13 +373,13 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
provision_updated_at=test_time,
|
||||
last_error=fake_error)
|
||||
data = self.get_json('/nodes/%s/states' % node.name,
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
self.assertEqual(fake_state, data['power_state'])
|
||||
self.assertEqual(fake_state, data['target_power_state'])
|
||||
self.assertEqual(fake_state, data['provision_state'])
|
||||
self.assertEqual(fake_state, data['target_provision_state'])
|
||||
prov_up_at = timeutils.parse_isotime(
|
||||
data['provision_updated_at']).replace(tzinfo=None)
|
||||
data['provision_updated_at']).replace(tzinfo=None)
|
||||
self.assertEqual(test_time, prov_up_at)
|
||||
self.assertEqual(fake_error, data['last_error'])
|
||||
self.assertFalse(data['console_enabled'])
|
||||
@ -387,7 +392,7 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
instance_uuid = node.instance_uuid
|
||||
|
||||
data = self.get_json('/nodes?instance_uuid=%s' % instance_uuid,
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
|
||||
self.assertThat(data['nodes'], HasLength(1))
|
||||
self.assertEqual(node['instance_uuid'],
|
||||
@ -556,7 +561,7 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
'console_info': expected_console_info}
|
||||
mock_gci.return_value = expected_console_info
|
||||
data = self.get_json('/nodes/%s/states/console' % node.name,
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
self.assertEqual(expected_data, data)
|
||||
mock_gci.assert_called_once_with(mock.ANY, node.uuid, 'test-topic')
|
||||
|
||||
@ -566,8 +571,8 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
'console_info': None}
|
||||
with mock.patch.object(rpcapi.ConductorAPI,
|
||||
'get_console_information') as mock_gci:
|
||||
mock_gci.side_effect = exception.NodeConsoleNotEnabled(
|
||||
node=node.uuid)
|
||||
mock_gci.side_effect = (
|
||||
exception.NodeConsoleNotEnabled(node=node.uuid))
|
||||
data = self.get_json('/nodes/%s/states/console' % node.uuid)
|
||||
self.assertEqual(expected_data, data)
|
||||
mock_gci.assert_called_once_with(mock.ANY, node.uuid, 'test-topic')
|
||||
@ -577,7 +582,7 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
with mock.patch.object(rpcapi.ConductorAPI,
|
||||
'get_console_information') as mock_gci:
|
||||
mock_gci.side_effect = exception.UnsupportedDriverExtension(
|
||||
extension='console', driver='test-driver')
|
||||
extension='console', driver='test-driver')
|
||||
ret = self.get_json('/nodes/%s/states/console' % node.uuid,
|
||||
expect_errors=True)
|
||||
self.assertEqual(400, ret.status_code)
|
||||
@ -598,7 +603,7 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
expected_data = {'boot_device': boot_devices.PXE, 'persistent': True}
|
||||
mock_gbd.return_value = expected_data
|
||||
data = self.get_json('/nodes/%s/management/boot_device' % node.name,
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
self.assertEqual(expected_data, data)
|
||||
mock_gbd.assert_called_once_with(mock.ANY, node.uuid, 'test-topic')
|
||||
|
||||
@ -606,7 +611,7 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
def test_get_boot_device_iface_not_supported(self, mock_gbd):
|
||||
node = obj_utils.create_test_node(self.context)
|
||||
mock_gbd.side_effect = exception.UnsupportedDriverExtension(
|
||||
extension='management', driver='test-driver')
|
||||
extension='management', driver='test-driver')
|
||||
ret = self.get_json('/nodes/%s/management/boot_device' % node.uuid,
|
||||
expect_errors=True)
|
||||
self.assertEqual(400, ret.status_code)
|
||||
@ -628,8 +633,8 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
mock_gsbd.return_value = [boot_devices.PXE]
|
||||
node = obj_utils.create_test_node(self.context, name='spam')
|
||||
data = self.get_json(
|
||||
'/nodes/%s/management/boot_device/supported' % node.name,
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
'/nodes/%s/management/boot_device/supported' % node.name,
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
expected_data = {'supported_boot_devices': [boot_devices.PXE]}
|
||||
self.assertEqual(expected_data, data)
|
||||
mock_gsbd.assert_called_once_with(mock.ANY, node.uuid, 'test-topic')
|
||||
@ -638,7 +643,7 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
def test_get_supported_boot_devices_iface_not_supported(self, mock_gsbd):
|
||||
node = obj_utils.create_test_node(self.context)
|
||||
mock_gsbd.side_effect = exception.UnsupportedDriverExtension(
|
||||
extension='management', driver='test-driver')
|
||||
extension='management', driver='test-driver')
|
||||
ret = self.get_json('/nodes/%s/management/boot_device/supported' %
|
||||
node.uuid, expect_errors=True)
|
||||
self.assertEqual(400, ret.status_code)
|
||||
@ -672,7 +677,7 @@ class TestListNodes(test_api_base.FunctionalTest):
|
||||
def test_validate_by_name(self, mock_vdi):
|
||||
node = obj_utils.create_test_node(self.context, name='spam')
|
||||
self.get_json('/nodes/validate?node=%s' % node.name,
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
# note that this should be node.uuid here as we get that from the
|
||||
# rpc_node lookup and pass that downwards
|
||||
mock_vdi.assert_called_once_with(mock.ANY, node.uuid, 'test-topic')
|
||||
@ -685,8 +690,8 @@ class TestPatch(test_api_base.FunctionalTest):
|
||||
self.chassis = obj_utils.create_test_chassis(self.context)
|
||||
self.node = obj_utils.create_test_node(self.context, name='node-57',
|
||||
chassis_id=self.chassis.id)
|
||||
self.node_no_name = obj_utils.create_test_node(self.context,
|
||||
uuid='deadbeef-0000-1111-2222-333333333333',
|
||||
self.node_no_name = obj_utils.create_test_node(
|
||||
self.context, uuid='deadbeef-0000-1111-2222-333333333333',
|
||||
chassis_id=self.chassis.id)
|
||||
p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
|
||||
self.mock_gtf = p.start()
|
||||
@ -706,15 +711,16 @@ class TestPatch(test_api_base.FunctionalTest):
|
||||
.return_value
|
||||
.updated_at) = "2013-12-03T06:20:41.184720+00:00"
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
[{'path': '/instance_uuid',
|
||||
'value': 'aaaaaaaa-1111-bbbb-2222-cccccccccccc',
|
||||
'op': 'replace'}])
|
||||
[{'path': '/instance_uuid',
|
||||
'value':
|
||||
'aaaaaaaa-1111-bbbb-2222-cccccccccccc',
|
||||
'op': 'replace'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_code)
|
||||
self.assertEqual(self.mock_update_node.return_value.updated_at,
|
||||
timeutils.parse_isotime(response.json['updated_at']))
|
||||
self.mock_update_node.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
|
||||
def test_update_by_name_unsupported(self):
|
||||
self.mock_update_node.return_value = self.node
|
||||
@ -723,11 +729,11 @@ class TestPatch(test_api_base.FunctionalTest):
|
||||
.return_value
|
||||
.updated_at) = "2013-12-03T06:20:41.184720+00:00"
|
||||
response = self.patch_json(
|
||||
'/nodes/%s' % self.node.name,
|
||||
[{'path': '/instance_uuid',
|
||||
'value': 'aaaaaaaa-1111-bbbb-2222-cccccccccccc',
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
'/nodes/%s' % self.node.name,
|
||||
[{'path': '/instance_uuid',
|
||||
'value': 'aaaaaaaa-1111-bbbb-2222-cccccccccccc',
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual(404, response.status_code)
|
||||
self.assertFalse(self.mock_update_node.called)
|
||||
|
||||
@ -738,17 +744,17 @@ class TestPatch(test_api_base.FunctionalTest):
|
||||
.return_value
|
||||
.updated_at) = "2013-12-03T06:20:41.184720+00:00"
|
||||
response = self.patch_json(
|
||||
'/nodes/%s' % self.node.name,
|
||||
[{'path': '/instance_uuid',
|
||||
'value': 'aaaaaaaa-1111-bbbb-2222-cccccccccccc',
|
||||
'op': 'replace'}],
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
'/nodes/%s' % self.node.name,
|
||||
[{'path': '/instance_uuid',
|
||||
'value': 'aaaaaaaa-1111-bbbb-2222-cccccccccccc',
|
||||
'op': 'replace'}],
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_code)
|
||||
self.assertEqual(self.mock_update_node.return_value.updated_at,
|
||||
timeutils.parse_isotime(response.json['updated_at']))
|
||||
self.mock_update_node.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
|
||||
def test_update_state(self):
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
@ -760,8 +766,8 @@ class TestPatch(test_api_base.FunctionalTest):
|
||||
|
||||
def test_update_fails_bad_driver_info(self):
|
||||
fake_err = 'Fake Error Message'
|
||||
self.mock_update_node.side_effect = exception.InvalidParameterValue(
|
||||
fake_err)
|
||||
self.mock_update_node.side_effect = (
|
||||
exception.InvalidParameterValue(fake_err))
|
||||
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
[{'path': '/driver_info/this',
|
||||
@ -775,7 +781,7 @@ class TestPatch(test_api_base.FunctionalTest):
|
||||
self.assertEqual(400, response.status_code)
|
||||
|
||||
self.mock_update_node.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
|
||||
def test_update_fails_bad_driver(self):
|
||||
self.mock_gtf.side_effect = exception.NoValidHost('Fake Error')
|
||||
@ -800,23 +806,25 @@ class TestPatch(test_api_base.FunctionalTest):
|
||||
self.assertEqual(200, response.status_code)
|
||||
|
||||
self.mock_update_node.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
|
||||
def test_add_root(self):
|
||||
self.mock_update_node.return_value = self.node
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
[{'path': '/instance_uuid',
|
||||
'value': 'aaaaaaaa-1111-bbbb-2222-cccccccccccc',
|
||||
'op': 'add'}])
|
||||
[{'path': '/instance_uuid',
|
||||
'value':
|
||||
'aaaaaaaa-1111-bbbb-2222-cccccccccccc',
|
||||
'op': 'add'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_code)
|
||||
self.mock_update_node.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
|
||||
def test_add_root_non_existent(self):
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
|
||||
expect_errors=True)
|
||||
[{'path': '/foo', 'value': 'bar',
|
||||
'op': 'add'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
@ -831,12 +839,13 @@ class TestPatch(test_api_base.FunctionalTest):
|
||||
self.assertEqual(200, response.status_code)
|
||||
|
||||
self.mock_update_node.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
|
||||
def test_remove_non_existent_property_fail(self):
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
[{'path': '/extra/non-existent', 'op': 'remove'}],
|
||||
expect_errors=True)
|
||||
[{'path': '/extra/non-existent',
|
||||
'op': 'remove'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
@ -864,7 +873,7 @@ class TestPatch(test_api_base.FunctionalTest):
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_code)
|
||||
self.mock_update_node.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
|
||||
def test_patch_ports_subresource(self):
|
||||
response = self.patch_json('/nodes/%s/ports' % self.node.uuid,
|
||||
@ -893,7 +902,7 @@ class TestPatch(test_api_base.FunctionalTest):
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_code)
|
||||
self.mock_update_node.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
|
||||
def test_add_state_in_cleaning(self):
|
||||
node = obj_utils.create_test_node(
|
||||
@ -920,55 +929,56 @@ class TestPatch(test_api_base.FunctionalTest):
|
||||
def test_replace_chassis_uuid(self):
|
||||
self.mock_update_node.return_value = self.node
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
[{'path': '/chassis_uuid',
|
||||
'value': self.chassis.uuid,
|
||||
'op': 'replace'}])
|
||||
[{'path': '/chassis_uuid',
|
||||
'value': self.chassis.uuid,
|
||||
'op': 'replace'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_code)
|
||||
|
||||
def test_add_chassis_uuid(self):
|
||||
self.mock_update_node.return_value = self.node
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
[{'path': '/chassis_uuid',
|
||||
'value': self.chassis.uuid,
|
||||
'op': 'add'}])
|
||||
[{'path': '/chassis_uuid',
|
||||
'value': self.chassis.uuid,
|
||||
'op': 'add'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_code)
|
||||
|
||||
def test_add_chassis_id(self):
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
[{'path': '/chassis_id',
|
||||
'value': '1',
|
||||
'op': 'add'}],
|
||||
expect_errors=True)
|
||||
[{'path': '/chassis_id',
|
||||
'value': '1',
|
||||
'op': 'add'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
def test_replace_chassis_id(self):
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
[{'path': '/chassis_id',
|
||||
'value': '1',
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
[{'path': '/chassis_id',
|
||||
'value': '1',
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
def test_remove_chassis_id(self):
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
[{'path': '/chassis_id',
|
||||
'op': 'remove'}],
|
||||
expect_errors=True)
|
||||
[{'path': '/chassis_id',
|
||||
'op': 'remove'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
def test_replace_non_existent_chassis_uuid(self):
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
[{'path': '/chassis_uuid',
|
||||
'value': 'eeeeeeee-dddd-cccc-bbbb-aaaaaaaaaaaa',
|
||||
'op': 'replace'}], expect_errors=True)
|
||||
[{'path': '/chassis_uuid',
|
||||
'value':
|
||||
'eeeeeeee-dddd-cccc-bbbb-aaaaaaaaaaaa',
|
||||
'op': 'replace'}], expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
@ -1000,21 +1010,21 @@ class TestPatch(test_api_base.FunctionalTest):
|
||||
self.assertEqual(200, response.status_code)
|
||||
|
||||
self.mock_update_node.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
|
||||
def test_replace_maintenance_by_name(self):
|
||||
self.mock_update_node.return_value = self.node
|
||||
|
||||
response = self.patch_json(
|
||||
'/nodes/%s' % self.node.name,
|
||||
[{'path': '/maintenance', 'op': 'replace',
|
||||
'value': 'true'}],
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
'/nodes/%s' % self.node.name,
|
||||
[{'path': '/maintenance', 'op': 'replace',
|
||||
'value': 'true'}],
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_code)
|
||||
|
||||
self.mock_update_node.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
mock.ANY, mock.ANY, 'test-topic')
|
||||
|
||||
def test_replace_consoled_enabled(self):
|
||||
response = self.patch_json('/nodes/%s' % self.node.uuid,
|
||||
@ -1135,7 +1145,7 @@ class TestPost(test_api_base.FunctionalTest):
|
||||
self.assertEqual(ndict['uuid'], result['uuid'])
|
||||
self.assertFalse(result['updated_at'])
|
||||
return_created_at = timeutils.parse_isotime(
|
||||
result['created_at']).replace(tzinfo=None)
|
||||
result['created_at']).replace(tzinfo=None)
|
||||
self.assertEqual(test_time, return_created_at)
|
||||
# Check location header
|
||||
self.assertIsNotNone(response.location)
|
||||
@ -1193,7 +1203,7 @@ class TestPost(test_api_base.FunctionalTest):
|
||||
response = self.post_json('/nodes/%s/vendor_passthru/test' % node.uuid,
|
||||
info)
|
||||
mock_vendor.assert_called_once_with(
|
||||
mock.ANY, node.uuid, 'test', 'POST', info, 'test-topic')
|
||||
mock.ANY, node.uuid, 'test', 'POST', info, 'test-topic')
|
||||
self.assertEqual(expected_return_value, response.body)
|
||||
self.assertEqual(expected_status, response.status_code)
|
||||
|
||||
@ -1211,7 +1221,7 @@ class TestPost(test_api_base.FunctionalTest):
|
||||
info,
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
mock_vendor.assert_called_once_with(
|
||||
mock.ANY, node.uuid, 'test', 'POST', info, 'test-topic')
|
||||
mock.ANY, node.uuid, 'test', 'POST', info, 'test-topic')
|
||||
self.assertEqual(expected_return_value, response.body)
|
||||
self.assertEqual(expected_status, response.status_code)
|
||||
|
||||
@ -1271,7 +1281,7 @@ class TestPost(test_api_base.FunctionalTest):
|
||||
response = self.post_json('/nodes/%s/vendor_passthru/test' % uuid,
|
||||
info, expect_errors=True)
|
||||
mock_vendor.assert_called_once_with(
|
||||
mock.ANY, uuid, 'test', 'POST', info, 'test-topic')
|
||||
mock.ANY, uuid, 'test', 'POST', info, 'test-topic')
|
||||
self.assertEqual(400, response.status_code)
|
||||
|
||||
def test_vendor_passthru_without_method(self):
|
||||
@ -1320,7 +1330,7 @@ class TestPost(test_api_base.FunctionalTest):
|
||||
|
||||
def test_create_node_with_chassis_uuid(self):
|
||||
ndict = test_api_utils.post_get_test_node(
|
||||
chassis_uuid=self.chassis.uuid)
|
||||
chassis_uuid=self.chassis.uuid)
|
||||
response = self.post_json('/nodes', ndict)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(201, response.status_int)
|
||||
@ -1334,7 +1344,7 @@ class TestPost(test_api_base.FunctionalTest):
|
||||
|
||||
def test_create_node_chassis_uuid_not_found(self):
|
||||
ndict = test_api_utils.post_get_test_node(
|
||||
chassis_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e')
|
||||
chassis_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e')
|
||||
response = self.post_json('/nodes', ndict, expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_int)
|
||||
@ -1358,7 +1368,7 @@ class TestPost(test_api_base.FunctionalTest):
|
||||
data = self.get_json(path)
|
||||
self.assertEqual(return_value, data)
|
||||
get_methods_mock.assert_called_once_with(mock.ANY, node.uuid,
|
||||
topic=mock.ANY)
|
||||
topic=mock.ANY)
|
||||
|
||||
# Now let's test the cache: Reset the mock
|
||||
get_methods_mock.reset_mock()
|
||||
@ -1396,7 +1406,7 @@ class TestDelete(test_api_base.FunctionalTest):
|
||||
def test_delete_node_by_name(self, mock_dn):
|
||||
node = obj_utils.create_test_node(self.context, name='foo')
|
||||
self.delete('/nodes/%s' % node.name,
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
mock_dn.assert_called_once_with(mock.ANY, node.uuid, 'test-topic')
|
||||
|
||||
@mock.patch.object(objects.Node, 'get_by_uuid')
|
||||
@ -1426,8 +1436,8 @@ class TestDelete(test_api_base.FunctionalTest):
|
||||
mock_gbn.side_effect = exception.NodeNotFound(node=node.name)
|
||||
|
||||
response = self.delete('/nodes/%s' % node.name,
|
||||
headers={api_base.Version.string: "1.5"},
|
||||
expect_errors=True)
|
||||
headers={api_base.Version.string: "1.5"},
|
||||
expect_errors=True)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
@ -1442,10 +1452,10 @@ class TestDelete(test_api_base.FunctionalTest):
|
||||
@mock.patch.object(rpcapi.ConductorAPI, 'destroy_node')
|
||||
def test_delete_associated(self, mock_dn):
|
||||
node = obj_utils.create_test_node(
|
||||
self.context,
|
||||
instance_uuid='aaaaaaaa-1111-bbbb-2222-cccccccccccc')
|
||||
mock_dn.side_effect = exception.NodeAssociated(node=node.uuid,
|
||||
instance=node.instance_uuid)
|
||||
self.context,
|
||||
instance_uuid='aaaaaaaa-1111-bbbb-2222-cccccccccccc')
|
||||
mock_dn.side_effect = exception.NodeAssociated(
|
||||
node=node.uuid, instance=node.instance_uuid)
|
||||
|
||||
response = self.delete('/nodes/%s' % node.uuid, expect_errors=True)
|
||||
self.assertEqual(409, response.status_int)
|
||||
@ -1475,7 +1485,7 @@ class TestDelete(test_api_base.FunctionalTest):
|
||||
name='foo')
|
||||
mock_get.return_value = node
|
||||
response = self.delete('/nodes/%s/maintenance' % node.name,
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
headers={api_base.Version.string: "1.5"})
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertEqual(b'', response.body)
|
||||
self.assertEqual(False, node.maintenance)
|
||||
@ -1489,8 +1499,9 @@ class TestPut(test_api_base.FunctionalTest):
|
||||
|
||||
def setUp(self):
|
||||
super(TestPut, self).setUp()
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
provision_state=states.AVAILABLE, name='node-39')
|
||||
self.node = obj_utils.create_test_node(
|
||||
self.context,
|
||||
provision_state=states.AVAILABLE, name='node-39')
|
||||
p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for')
|
||||
self.mock_gtf = p.start()
|
||||
self.mock_gtf.return_value = 'test-topic'
|
||||
@ -1568,7 +1579,7 @@ class TestPut(test_api_base.FunctionalTest):
|
||||
self.assertEqual(202, ret.status_code)
|
||||
self.assertEqual(b'', ret.body)
|
||||
self.mock_dnd.assert_called_once_with(
|
||||
mock.ANY, self.node.uuid, False, None, 'test-topic')
|
||||
mock.ANY, self.node.uuid, False, None, 'test-topic')
|
||||
# Check location header
|
||||
self.assertIsNotNone(ret.location)
|
||||
expected_location = '/v1/nodes/%s/states' % self.node.uuid
|
||||
@ -1588,7 +1599,7 @@ class TestPut(test_api_base.FunctionalTest):
|
||||
self.assertEqual(202, ret.status_code)
|
||||
self.assertEqual(b'', ret.body)
|
||||
self.mock_dnd.assert_called_once_with(
|
||||
mock.ANY, self.node.uuid, False, None, 'test-topic')
|
||||
mock.ANY, self.node.uuid, False, None, 'test-topic')
|
||||
|
||||
def test_provision_with_deploy_configdrive(self):
|
||||
ret = self.put_json('/nodes/%s/states/provision' % self.node.uuid,
|
||||
@ -1596,7 +1607,7 @@ class TestPut(test_api_base.FunctionalTest):
|
||||
self.assertEqual(202, ret.status_code)
|
||||
self.assertEqual(b'', ret.body)
|
||||
self.mock_dnd.assert_called_once_with(
|
||||
mock.ANY, self.node.uuid, False, 'foo', 'test-topic')
|
||||
mock.ANY, self.node.uuid, False, 'foo', 'test-topic')
|
||||
# Check location header
|
||||
self.assertIsNotNone(ret.location)
|
||||
expected_location = '/v1/nodes/%s/states' % self.node.uuid
|
||||
@ -1619,7 +1630,7 @@ class TestPut(test_api_base.FunctionalTest):
|
||||
self.assertEqual(202, ret.status_code)
|
||||
self.assertEqual(b'', ret.body)
|
||||
self.mock_dntd.assert_called_once_with(
|
||||
mock.ANY, node.uuid, 'test-topic')
|
||||
mock.ANY, node.uuid, 'test-topic')
|
||||
# Check location header
|
||||
self.assertIsNotNone(ret.location)
|
||||
expected_location = '/v1/nodes/%s/states' % node.uuid
|
||||
@ -1647,7 +1658,7 @@ class TestPut(test_api_base.FunctionalTest):
|
||||
self.assertEqual(202, ret.status_code)
|
||||
self.assertEqual(b'', ret.body)
|
||||
self.mock_dntd.assert_called_once_with(
|
||||
mock.ANY, node.uuid, 'test-topic')
|
||||
mock.ANY, node.uuid, 'test-topic')
|
||||
# Check location header
|
||||
self.assertIsNotNone(ret.location)
|
||||
expected_location = '/v1/nodes/%s/states' % node.uuid
|
||||
@ -1669,7 +1680,7 @@ class TestPut(test_api_base.FunctionalTest):
|
||||
self.assertEqual(202, ret.status_code)
|
||||
self.assertEqual(b'', ret.body)
|
||||
self.mock_dnd.assert_called_once_with(
|
||||
mock.ANY, node.uuid, False, None, 'test-topic')
|
||||
mock.ANY, node.uuid, False, None, 'test-topic')
|
||||
# Check location header
|
||||
self.assertIsNotNone(ret.location)
|
||||
expected_location = '/v1/nodes/%s/states' % node.uuid
|
||||
@ -1772,7 +1783,7 @@ class TestPut(test_api_base.FunctionalTest):
|
||||
self.assertEqual(202, ret.status_code)
|
||||
self.assertEqual(b'', ret.body)
|
||||
mock_scm.assert_called_once_with(mock.ANY, self.node.uuid,
|
||||
True, 'test-topic')
|
||||
True, 'test-topic')
|
||||
|
||||
def test_set_console_mode_disabled(self):
|
||||
with mock.patch.object(rpcapi.ConductorAPI,
|
||||
@ -1812,7 +1823,7 @@ class TestPut(test_api_base.FunctionalTest):
|
||||
with mock.patch.object(rpcapi.ConductorAPI,
|
||||
'set_console_mode') as mock_scm:
|
||||
mock_scm.side_effect = exception.UnsupportedDriverExtension(
|
||||
extension='console', driver='test-driver')
|
||||
extension='console', driver='test-driver')
|
||||
ret = self.put_json('/nodes/%s/states/console' % self.node.uuid,
|
||||
{'enabled': "true"}, expect_errors=True)
|
||||
self.assertEqual(400, ret.status_code)
|
||||
@ -1855,7 +1866,7 @@ class TestPut(test_api_base.FunctionalTest):
|
||||
@mock.patch.object(rpcapi.ConductorAPI, 'set_boot_device')
|
||||
def test_set_boot_device_not_supported(self, mock_sbd):
|
||||
mock_sbd.side_effect = exception.UnsupportedDriverExtension(
|
||||
extension='management', driver='test-driver')
|
||||
extension='management', driver='test-driver')
|
||||
device = boot_devices.PXE
|
||||
ret = self.put_json('/nodes/%s/management/boot_device'
|
||||
% self.node.uuid, {'boot_device': device},
|
||||
|
@ -103,10 +103,10 @@ class TestListPorts(api_base.FunctionalTest):
|
||||
def test_many(self):
|
||||
ports = []
|
||||
for id_ in range(5):
|
||||
port = obj_utils.create_test_port(self.context,
|
||||
node_id=self.node.id,
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
address='52:54:00:cf:2d:3%s' % id_)
|
||||
port = obj_utils.create_test_port(
|
||||
self.context, node_id=self.node.id,
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
address='52:54:00:cf:2d:3%s' % id_)
|
||||
ports.append(port.uuid)
|
||||
data = self.get_json('/ports')
|
||||
self.assertEqual(len(ports), len(data['ports']))
|
||||
@ -130,10 +130,11 @@ class TestListPorts(api_base.FunctionalTest):
|
||||
def test_collection_links(self):
|
||||
ports = []
|
||||
for id_ in range(5):
|
||||
port = obj_utils.create_test_port(self.context,
|
||||
node_id=self.node.id,
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
address='52:54:00:cf:2d:3%s' % id_)
|
||||
port = obj_utils.create_test_port(
|
||||
self.context,
|
||||
node_id=self.node.id,
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
address='52:54:00:cf:2d:3%s' % id_)
|
||||
ports.append(port.uuid)
|
||||
data = self.get_json('/ports/?limit=3')
|
||||
self.assertEqual(3, len(data['ports']))
|
||||
@ -145,10 +146,11 @@ class TestListPorts(api_base.FunctionalTest):
|
||||
cfg.CONF.set_override('max_limit', 3, 'api')
|
||||
ports = []
|
||||
for id_ in range(5):
|
||||
port = obj_utils.create_test_port(self.context,
|
||||
node_id=self.node.id,
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
address='52:54:00:cf:2d:3%s' % id_)
|
||||
port = obj_utils.create_test_port(
|
||||
self.context,
|
||||
node_id=self.node.id,
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
address='52:54:00:cf:2d:3%s' % id_)
|
||||
ports.append(port.uuid)
|
||||
data = self.get_json('/ports')
|
||||
self.assertEqual(3, len(data['ports']))
|
||||
@ -186,10 +188,11 @@ class TestListPorts(api_base.FunctionalTest):
|
||||
def test_sort_key(self):
|
||||
ports = []
|
||||
for id_ in range(3):
|
||||
port = obj_utils.create_test_port(self.context,
|
||||
node_id=self.node.id,
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
address='52:54:00:cf:2d:3%s' % id_)
|
||||
port = obj_utils.create_test_port(
|
||||
self.context,
|
||||
node_id=self.node.id,
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
address='52:54:00:cf:2d:3%s' % id_)
|
||||
ports.append(port.uuid)
|
||||
data = self.get_json('/ports?sort_key=uuid')
|
||||
uuids = [n['uuid'] for n in data['ports']]
|
||||
@ -227,14 +230,14 @@ class TestListPorts(api_base.FunctionalTest):
|
||||
mock_get_rpc_node.return_value = self.node
|
||||
obj_utils.create_test_port(self.context, node_id=self.node.id)
|
||||
self.get_json('/ports/detail?node_uuid=%s&node=%s' %
|
||||
(self.node.uuid, 'node-name'))
|
||||
(self.node.uuid, 'node-name'))
|
||||
mock_get_rpc_node.assert_called_once_with(self.node.uuid)
|
||||
|
||||
@mock.patch.object(api_utils, 'get_rpc_node')
|
||||
def test_get_all_by_node_name_not_supported(self, mock_get_rpc_node):
|
||||
# GET /v1/ports specifying node_name - name not supported
|
||||
mock_get_rpc_node.side_effect = exception.InvalidUuidOrName(
|
||||
name=self.node.uuid)
|
||||
mock_get_rpc_node.side_effect = (
|
||||
exception.InvalidUuidOrName(name=self.node.uuid))
|
||||
for i in range(3):
|
||||
obj_utils.create_test_port(self.context,
|
||||
node_id=self.node.id,
|
||||
@ -258,8 +261,8 @@ class TestListPorts(api_base.FunctionalTest):
|
||||
@mock.patch.object(api_utils, 'get_rpc_node')
|
||||
def test_detail_by_node_name_not_supported(self, mock_get_rpc_node):
|
||||
# GET /v1/ports/detail specifying node_name - name not supported
|
||||
mock_get_rpc_node.side_effect = exception.InvalidUuidOrName(
|
||||
name=self.node.uuid)
|
||||
mock_get_rpc_node.side_effect = (
|
||||
exception.InvalidUuidOrName(name=self.node.uuid))
|
||||
obj_utils.create_test_port(self.context, node_id=self.node.id)
|
||||
data = self.get_json('/ports/detail?node=%s' % 'test-node',
|
||||
expect_errors=True)
|
||||
@ -271,7 +274,7 @@ class TestListPorts(api_base.FunctionalTest):
|
||||
# GET /v1/ports/detail specifying node and node_uuid. In this case
|
||||
# we expect the node_uuid interface to be used.
|
||||
self.get_json('/ports/detail?node=%s&node_uuid=%s' %
|
||||
('test-node', self.node.uuid))
|
||||
('test-node', self.node.uuid))
|
||||
mock_gpc.assert_called_once_with(self.node.uuid, mock.ANY, mock.ANY,
|
||||
mock.ANY, mock.ANY, mock.ANY,
|
||||
mock.ANY, mock.ANY)
|
||||
@ -355,7 +358,7 @@ class TestPatch(api_base.FunctionalTest):
|
||||
[{'path': '/address',
|
||||
'value': address,
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(409, response.status_code)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
@ -367,46 +370,46 @@ class TestPatch(api_base.FunctionalTest):
|
||||
def test_replace_node_uuid(self, mock_upd):
|
||||
mock_upd.return_value = self.port
|
||||
response = self.patch_json('/ports/%s' % self.port.uuid,
|
||||
[{'path': '/node_uuid',
|
||||
'value': self.node.uuid,
|
||||
'op': 'replace'}])
|
||||
[{'path': '/node_uuid',
|
||||
'value': self.node.uuid,
|
||||
'op': 'replace'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_code)
|
||||
|
||||
def test_add_node_uuid(self, mock_upd):
|
||||
mock_upd.return_value = self.port
|
||||
response = self.patch_json('/ports/%s' % self.port.uuid,
|
||||
[{'path': '/node_uuid',
|
||||
'value': self.node.uuid,
|
||||
'op': 'add'}])
|
||||
[{'path': '/node_uuid',
|
||||
'value': self.node.uuid,
|
||||
'op': 'add'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(200, response.status_code)
|
||||
|
||||
def test_add_node_id(self, mock_upd):
|
||||
response = self.patch_json('/ports/%s' % self.port.uuid,
|
||||
[{'path': '/node_id',
|
||||
'value': '1',
|
||||
'op': 'add'}],
|
||||
expect_errors=True)
|
||||
[{'path': '/node_id',
|
||||
'value': '1',
|
||||
'op': 'add'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertFalse(mock_upd.called)
|
||||
|
||||
def test_replace_node_id(self, mock_upd):
|
||||
response = self.patch_json('/ports/%s' % self.port.uuid,
|
||||
[{'path': '/node_id',
|
||||
'value': '1',
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
[{'path': '/node_id',
|
||||
'value': '1',
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertFalse(mock_upd.called)
|
||||
|
||||
def test_remove_node_id(self, mock_upd):
|
||||
response = self.patch_json('/ports/%s' % self.port.uuid,
|
||||
[{'path': '/node_id',
|
||||
'op': 'remove'}],
|
||||
expect_errors=True)
|
||||
[{'path': '/node_id',
|
||||
'op': 'remove'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertFalse(mock_upd.called)
|
||||
@ -414,10 +417,10 @@ class TestPatch(api_base.FunctionalTest):
|
||||
def test_replace_non_existent_node_uuid(self, mock_upd):
|
||||
node_uuid = '12506333-a81c-4d59-9987-889ed5f8687b'
|
||||
response = self.patch_json('/ports/%s' % self.port.uuid,
|
||||
[{'path': '/node_uuid',
|
||||
'value': node_uuid,
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
[{'path': '/node_uuid',
|
||||
'value': node_uuid,
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertIn(node_uuid, response.json['error_message'])
|
||||
@ -595,7 +598,7 @@ class TestPost(api_base.FunctionalTest):
|
||||
self.assertEqual(pdict['uuid'], result['uuid'])
|
||||
self.assertFalse(result['updated_at'])
|
||||
return_created_at = timeutils.parse_isotime(
|
||||
result['created_at']).replace(tzinfo=None)
|
||||
result['created_at']).replace(tzinfo=None)
|
||||
self.assertEqual(test_time, return_created_at)
|
||||
# Check location header
|
||||
self.assertIsNotNone(response.location)
|
||||
@ -687,7 +690,7 @@ class TestPost(api_base.FunctionalTest):
|
||||
|
||||
def test_create_port_node_uuid_not_found(self):
|
||||
pdict = post_get_test_port(
|
||||
node_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e')
|
||||
node_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e')
|
||||
response = self.post_json('/ports', pdict, expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_int)
|
||||
@ -728,8 +731,7 @@ class TestDelete(api_base.FunctionalTest):
|
||||
self.assertIn(self.port.address, response.json['error_message'])
|
||||
|
||||
def test_delete_port_byid(self, mock_dpt):
|
||||
self.delete('/ports/%s' % self.port.uuid,
|
||||
expect_errors=True)
|
||||
self.delete('/ports/%s' % self.port.uuid, expect_errors=True)
|
||||
self.assertTrue(mock_dpt.called)
|
||||
|
||||
def test_delete_port_node_locked(self, mock_dpt):
|
||||
|
@ -45,25 +45,25 @@ class TestCheckVersions(test_base.TestCase):
|
||||
self.version.major = v1_api.BASE_VERSION + 1
|
||||
self.version.minor = v1_api.MIN_VER.minor
|
||||
self.assertRaises(
|
||||
webob_exc.HTTPNotAcceptable,
|
||||
v1_api.Controller()._check_version,
|
||||
self.version)
|
||||
webob_exc.HTTPNotAcceptable,
|
||||
v1_api.Controller()._check_version,
|
||||
self.version)
|
||||
|
||||
def test_check_version_too_low(self):
|
||||
self.version.major = v1_api.BASE_VERSION
|
||||
self.version.minor = v1_api.MIN_VER.minor - 1
|
||||
self.assertRaises(
|
||||
webob_exc.HTTPNotAcceptable,
|
||||
v1_api.Controller()._check_version,
|
||||
self.version)
|
||||
webob_exc.HTTPNotAcceptable,
|
||||
v1_api.Controller()._check_version,
|
||||
self.version)
|
||||
|
||||
def test_check_version_too_high(self):
|
||||
self.version.major = v1_api.BASE_VERSION
|
||||
self.version.minor = v1_api.MAX_VER.minor + 1
|
||||
e = self.assertRaises(
|
||||
webob_exc.HTTPNotAcceptable,
|
||||
v1_api.Controller()._check_version,
|
||||
self.version, {'fake-headers': v1_api.MAX_VER.minor})
|
||||
webob_exc.HTTPNotAcceptable,
|
||||
v1_api.Controller()._check_version,
|
||||
self.version, {'fake-headers': v1_api.MAX_VER.minor})
|
||||
self.assertEqual(v1_api.MAX_VER.minor, e.headers['fake-headers'])
|
||||
|
||||
def test_check_version_ok(self):
|
||||
|
@ -117,8 +117,8 @@ class TestJsonPatchType(base.TestCase):
|
||||
|
||||
def _patch_json(self, params, expect_errors=False):
|
||||
return self.app.patch_json('/test', params=params,
|
||||
headers={'Accept': 'application/json'},
|
||||
expect_errors=expect_errors)
|
||||
headers={'Accept': 'application/json'},
|
||||
expect_errors=expect_errors)
|
||||
|
||||
def test_valid_patches(self):
|
||||
valid_patches = [{'path': '/extra/foo', 'op': 'remove'},
|
||||
|
@ -102,7 +102,7 @@ class TestCase(testtools.TestCase):
|
||||
# registry
|
||||
objects_base.IronicObject.indirection_api = None
|
||||
self._base_test_obj_backup = copy.copy(
|
||||
objects_base.IronicObject._obj_classes)
|
||||
objects_base.IronicObject._obj_classes)
|
||||
self.addCleanup(self._restore_obj_registry)
|
||||
|
||||
self.addCleanup(self._clear_attrs)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -50,8 +50,7 @@ class RPCAPITestCase(base.DbTestCase):
|
||||
super(RPCAPITestCase, self).setUp()
|
||||
self.fake_node = dbutils.get_test_node(driver='fake-driver')
|
||||
self.fake_node_obj = objects.Node._from_db_object(
|
||||
objects.Node(self.context),
|
||||
self.fake_node)
|
||||
objects.Node(self.context), self.fake_node)
|
||||
|
||||
def test_serialized_instance_has_uuid(self):
|
||||
self.assertTrue('uuid' in self.fake_node)
|
||||
@ -73,16 +72,16 @@ class RPCAPITestCase(base.DbTestCase):
|
||||
|
||||
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
rpcapi.get_topic_for,
|
||||
self.fake_node_obj)
|
||||
rpcapi.get_topic_for,
|
||||
self.fake_node_obj)
|
||||
|
||||
def test_get_topic_doesnt_cache(self):
|
||||
CONF.set_override('host', 'fake-host')
|
||||
|
||||
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
rpcapi.get_topic_for,
|
||||
self.fake_node_obj)
|
||||
rpcapi.get_topic_for,
|
||||
self.fake_node_obj)
|
||||
|
||||
self.dbapi.register_conductor({'hostname': 'fake-host',
|
||||
'drivers': ['fake-driver']})
|
||||
|
@ -190,8 +190,8 @@ class TaskManagerTestCase(tests_db_base.DbTestCase):
|
||||
get_driver_mock, reserve_mock,
|
||||
release_mock, node_get_mock):
|
||||
reserve_mock.return_value = self.node
|
||||
get_driver_mock.side_effect = exception.DriverNotFound(
|
||||
driver_name='foo')
|
||||
get_driver_mock.side_effect = (
|
||||
exception.DriverNotFound(driver_name='foo'))
|
||||
|
||||
self.assertRaises(exception.DriverNotFound,
|
||||
task_manager.TaskManager,
|
||||
@ -282,8 +282,8 @@ class TaskManagerTestCase(tests_db_base.DbTestCase):
|
||||
get_driver_mock, reserve_mock,
|
||||
release_mock, node_get_mock):
|
||||
node_get_mock.return_value = self.node
|
||||
get_driver_mock.side_effect = exception.DriverNotFound(
|
||||
driver_name='foo')
|
||||
get_driver_mock.side_effect = (
|
||||
exception.DriverNotFound(driver_name='foo'))
|
||||
|
||||
self.assertRaises(exception.DriverNotFound,
|
||||
task_manager.TaskManager,
|
||||
@ -310,7 +310,7 @@ class TaskManagerTestCase(tests_db_base.DbTestCase):
|
||||
|
||||
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
|
||||
thread_mock.link.assert_called_once_with(
|
||||
task._thread_release_resources)
|
||||
task._thread_release_resources)
|
||||
self.assertFalse(thread_mock.cancel.called)
|
||||
# Since we mocked link(), we're testing that __exit__ didn't
|
||||
# release resources pending the finishing of the background
|
||||
@ -423,9 +423,9 @@ class TaskManagerTestCase(tests_db_base.DbTestCase):
|
||||
'fake-argument')
|
||||
|
||||
@mock.patch.object(states.machine, 'copy')
|
||||
def test_init_prepares_fsm(self, copy_mock, get_ports_mock,
|
||||
get_driver_mock, reserve_mock, release_mock,
|
||||
node_get_mock):
|
||||
def test_init_prepares_fsm(
|
||||
self, copy_mock, get_ports_mock, get_driver_mock, reserve_mock,
|
||||
release_mock, node_get_mock):
|
||||
m = mock.Mock(spec=fsm.FSM)
|
||||
reserve_mock.return_value = self.node
|
||||
copy_mock.return_value = m
|
||||
@ -462,9 +462,9 @@ class TaskManagerStateModelTestCases(tests_base.TestCase):
|
||||
self.fsm.process_event.side_effect = exception.InvalidState('test')
|
||||
|
||||
self.assertRaises(
|
||||
exception.InvalidState,
|
||||
self.task.process_event,
|
||||
self.task, 'fake')
|
||||
exception.InvalidState,
|
||||
self.task.process_event,
|
||||
self.task, 'fake')
|
||||
self.assertEqual(0, self.task.spawn_after.call_count)
|
||||
self.assertFalse(self.task.node.save.called)
|
||||
|
||||
@ -473,8 +473,9 @@ class TaskManagerStateModelTestCases(tests_base.TestCase):
|
||||
arg = mock.Mock()
|
||||
kwarg = mock.Mock()
|
||||
self.task.process_event = task_manager.TaskManager.process_event
|
||||
self.task.process_event(self.task, 'fake',
|
||||
callback=cb, call_args=[arg], call_kwargs={'mock': kwarg})
|
||||
self.task.process_event(
|
||||
self.task, 'fake', callback=cb, call_args=[arg],
|
||||
call_kwargs={'mock': kwarg})
|
||||
self.fsm.process_event.assert_called_once_with('fake')
|
||||
self.task.spawn_after.assert_called_with(cb, arg, mock=kwarg)
|
||||
self.assertEqual(1, self.task.node.save.call_count)
|
||||
@ -491,19 +492,19 @@ class TaskManagerStateModelTestCases(tests_base.TestCase):
|
||||
self.node.target_provision_state = target_provision_state
|
||||
self.task.process_event = task_manager.TaskManager.process_event
|
||||
|
||||
self.task.process_event(self.task, 'fake',
|
||||
callback=cb, call_args=[arg], call_kwargs={'mock': kwarg},
|
||||
err_handler=er)
|
||||
self.task.process_event(
|
||||
self.task, 'fake', callback=cb, call_args=[arg],
|
||||
call_kwargs={'mock': kwarg}, err_handler=er)
|
||||
|
||||
self.task.set_spawn_error_hook.assert_called_once_with(er,
|
||||
self.node, provision_state, target_provision_state)
|
||||
self.task.set_spawn_error_hook.assert_called_once_with(
|
||||
er, self.node, provision_state, target_provision_state)
|
||||
self.fsm.process_event.assert_called_once_with('fake')
|
||||
self.task.spawn_after.assert_called_with(cb, arg, mock=kwarg)
|
||||
self.assertEqual(1, self.task.node.save.call_count)
|
||||
self.assertIsNone(self.node.last_error)
|
||||
self.assertNotEqual(provision_state, self.node.provision_state)
|
||||
self.assertNotEqual(target_provision_state,
|
||||
self.node.target_provision_state)
|
||||
self.node.target_provision_state)
|
||||
|
||||
|
||||
@task_manager.require_exclusive_lock
|
||||
|
@ -49,7 +49,7 @@ def mock_the_extension_manager(driver="fake", namespace="ironic.drivers"):
|
||||
lambda x: True))
|
||||
mock_ext_mgr = driver_factory.DriverFactory()
|
||||
mock_ext = mock_ext_mgr._extension_manager._load_one_plugin(
|
||||
entry_point, True, [], {}, False)
|
||||
entry_point, True, [], {}, False)
|
||||
mock_ext_mgr._extension_manager.extensions = [mock_ext]
|
||||
mock_ext_mgr._extension_manager.by_name = dict((e.name, e)
|
||||
for e in [mock_ext])
|
||||
|
@ -37,7 +37,7 @@ _DB_CACHE = None
|
||||
class Database(fixtures.Fixture):
|
||||
|
||||
def __init__(self, db_api, db_migrate, sql_connection,
|
||||
sqlite_db, sqlite_clean_db):
|
||||
sqlite_db, sqlite_clean_db):
|
||||
self.sql_connection = sql_connection
|
||||
self.sqlite_db = sqlite_db
|
||||
self.sqlite_clean_db = sqlite_clean_db
|
||||
|
@ -44,8 +44,7 @@ class SqlAlchemyCustomTypesTestCase(base.DbTestCase):
|
||||
def test_JSONEncodedDict_type_check(self):
|
||||
self.assertRaises(db_exc.DBError,
|
||||
self.dbapi.create_chassis,
|
||||
{'extra':
|
||||
['this is not a dict']})
|
||||
{'extra': ['this is not a dict']})
|
||||
|
||||
def test_JSONEncodedLict_default_value(self):
|
||||
# Create conductor w/o extra specified.
|
||||
|
@ -31,9 +31,9 @@ class DbConductorTestCase(base.DbTestCase):
|
||||
c = utils.get_test_conductor()
|
||||
self.dbapi.register_conductor(c)
|
||||
self.assertRaises(
|
||||
exception.ConductorAlreadyRegistered,
|
||||
self.dbapi.register_conductor,
|
||||
c)
|
||||
exception.ConductorAlreadyRegistered,
|
||||
self.dbapi.register_conductor,
|
||||
c)
|
||||
|
||||
def test_register_conductor_override(self):
|
||||
c = utils.get_test_conductor()
|
||||
@ -52,17 +52,17 @@ class DbConductorTestCase(base.DbTestCase):
|
||||
def test_get_conductor_not_found(self):
|
||||
self._create_test_cdr()
|
||||
self.assertRaises(
|
||||
exception.ConductorNotFound,
|
||||
self.dbapi.get_conductor,
|
||||
'bad-hostname')
|
||||
exception.ConductorNotFound,
|
||||
self.dbapi.get_conductor,
|
||||
'bad-hostname')
|
||||
|
||||
def test_unregister_conductor(self):
|
||||
c = self._create_test_cdr()
|
||||
self.dbapi.unregister_conductor(c.hostname)
|
||||
self.assertRaises(
|
||||
exception.ConductorNotFound,
|
||||
self.dbapi.unregister_conductor,
|
||||
c.hostname)
|
||||
exception.ConductorNotFound,
|
||||
self.dbapi.unregister_conductor,
|
||||
c.hostname)
|
||||
|
||||
@mock.patch.object(timeutils, 'utcnow', autospec=True)
|
||||
def test_touch_conductor(self, mock_utcnow):
|
||||
@ -82,9 +82,9 @@ class DbConductorTestCase(base.DbTestCase):
|
||||
# it will only update existing ones
|
||||
self._create_test_cdr()
|
||||
self.assertRaises(
|
||||
exception.ConductorNotFound,
|
||||
self.dbapi.touch_conductor,
|
||||
'bad-hostname')
|
||||
exception.ConductorNotFound,
|
||||
self.dbapi.touch_conductor,
|
||||
'bad-hostname')
|
||||
|
||||
def test_touch_offline_conductor(self):
|
||||
# Ensure that a conductor's periodic heartbeat task can make the
|
||||
@ -92,9 +92,9 @@ class DbConductorTestCase(base.DbTestCase):
|
||||
c = self._create_test_cdr()
|
||||
self.dbapi.unregister_conductor(c.hostname)
|
||||
self.assertRaises(
|
||||
exception.ConductorNotFound,
|
||||
self.dbapi.get_conductor,
|
||||
c.hostname)
|
||||
exception.ConductorNotFound,
|
||||
self.dbapi.get_conductor,
|
||||
c.hostname)
|
||||
self.dbapi.touch_conductor(c.hostname)
|
||||
self.dbapi.get_conductor(c.hostname)
|
||||
|
||||
|
@ -104,11 +104,13 @@ class DbNodeTestCase(base.DbTestCase):
|
||||
self.assertEqual(uuids, dict((r[0], r[2]) for r in res))
|
||||
|
||||
def test_get_nodeinfo_list_with_filters(self):
|
||||
node1 = utils.create_test_node(driver='driver-one',
|
||||
node1 = utils.create_test_node(
|
||||
driver='driver-one',
|
||||
instance_uuid=uuidutils.generate_uuid(),
|
||||
reservation='fake-host',
|
||||
uuid=uuidutils.generate_uuid())
|
||||
node2 = utils.create_test_node(driver='driver-two',
|
||||
node2 = utils.create_test_node(
|
||||
driver='driver-two',
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
maintenance=True)
|
||||
|
||||
@ -151,7 +153,7 @@ class DbNodeTestCase(base.DbTestCase):
|
||||
provision_state=states.DEPLOYWAIT)
|
||||
# node without timeout
|
||||
utils.create_test_node(uuid=uuidutils.generate_uuid(),
|
||||
provision_updated_at=next)
|
||||
provision_updated_at=next)
|
||||
|
||||
mock_utcnow.return_value = present
|
||||
res = self.dbapi.get_nodeinfo_list(filters={'provisioned_before': 300})
|
||||
@ -180,7 +182,7 @@ class DbNodeTestCase(base.DbTestCase):
|
||||
|
||||
mock_utcnow.return_value = present
|
||||
res = self.dbapi.get_nodeinfo_list(
|
||||
filters={'inspection_started_before': 300})
|
||||
filters={'inspection_started_before': 300})
|
||||
self.assertEqual([node1.id], [r[0] for r in res])
|
||||
|
||||
res = self.dbapi.get_nodeinfo_list(filters={'provision_state':
|
||||
@ -200,12 +202,14 @@ class DbNodeTestCase(base.DbTestCase):
|
||||
ch1 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
|
||||
ch2 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
|
||||
|
||||
node1 = utils.create_test_node(driver='driver-one',
|
||||
node1 = utils.create_test_node(
|
||||
driver='driver-one',
|
||||
instance_uuid=uuidutils.generate_uuid(),
|
||||
reservation='fake-host',
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
chassis_id=ch1['id'])
|
||||
node2 = utils.create_test_node(driver='driver-two',
|
||||
node2 = utils.create_test_node(
|
||||
driver='driver-two',
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
chassis_id=ch2['id'],
|
||||
maintenance=True)
|
||||
@ -247,14 +251,14 @@ class DbNodeTestCase(base.DbTestCase):
|
||||
|
||||
def test_get_node_by_instance(self):
|
||||
node = utils.create_test_node(
|
||||
instance_uuid='12345678-9999-0000-aaaa-123456789012')
|
||||
instance_uuid='12345678-9999-0000-aaaa-123456789012')
|
||||
|
||||
res = self.dbapi.get_node_by_instance(node.instance_uuid)
|
||||
self.assertEqual(node.uuid, res.uuid)
|
||||
|
||||
def test_get_node_by_instance_wrong_uuid(self):
|
||||
utils.create_test_node(
|
||||
instance_uuid='12345678-9999-0000-aaaa-123456789012')
|
||||
instance_uuid='12345678-9999-0000-aaaa-123456789012')
|
||||
|
||||
self.assertRaises(exception.InstanceNotFound,
|
||||
self.dbapi.get_node_by_instance,
|
||||
|
@ -93,9 +93,9 @@ def get_test_seamicro_info():
|
||||
|
||||
def get_test_ilo_info():
|
||||
return {
|
||||
"ilo_address": "1.2.3.4",
|
||||
"ilo_username": "admin",
|
||||
"ilo_password": "fake",
|
||||
"ilo_address": "1.2.3.4",
|
||||
"ilo_username": "admin",
|
||||
"ilo_password": "fake",
|
||||
}
|
||||
|
||||
|
||||
|
@ -163,9 +163,9 @@ class TestNeutron(db_base.DbTestCase):
|
||||
|
||||
api = dhcp_factory.DHCPFactory()
|
||||
self.assertRaises(
|
||||
exception.FailedToUpdateDHCPOptOnPort,
|
||||
api.provider.update_port_dhcp_opts,
|
||||
port_id, opts)
|
||||
exception.FailedToUpdateDHCPOptOnPort,
|
||||
api.provider.update_port_dhcp_opts,
|
||||
port_id, opts)
|
||||
|
||||
@mock.patch.object(client.Client, 'update_port')
|
||||
@mock.patch.object(client.Client, '__init__')
|
||||
@ -264,7 +264,7 @@ class TestNeutron(db_base.DbTestCase):
|
||||
}
|
||||
],
|
||||
"device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
|
||||
}
|
||||
}
|
||||
fake_client = mock.Mock()
|
||||
fake_client.show_port.return_value = {'port': port_data}
|
||||
result = api._get_fixed_ip_address(port_id, fake_client)
|
||||
@ -287,7 +287,7 @@ class TestNeutron(db_base.DbTestCase):
|
||||
}
|
||||
],
|
||||
"device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
|
||||
}
|
||||
}
|
||||
fake_client = mock.Mock()
|
||||
fake_client.show_port.return_value = {'port': port_data}
|
||||
self.assertRaises(exception.InvalidIPv4Address,
|
||||
@ -301,7 +301,7 @@ class TestNeutron(db_base.DbTestCase):
|
||||
|
||||
fake_client = mock.Mock()
|
||||
fake_client.show_port.side_effect = (
|
||||
neutron_client_exc.NeutronClientException())
|
||||
neutron_client_exc.NeutronClientException())
|
||||
self.assertRaises(exception.FailedToGetIPAddressOnPort,
|
||||
api._get_fixed_ip_address, port_id, fake_client)
|
||||
fake_client.show_port.assert_called_once_with(port_id)
|
||||
|
@ -49,15 +49,15 @@ class AMTManagementInteralMethodsTestCase(db_base.DbTestCase):
|
||||
namespace = resource_uris.CIM_BootConfigSetting
|
||||
device = boot_devices.PXE
|
||||
result_xml = test_utils.build_soap_xml([{'ReturnValue': '0'}],
|
||||
namespace)
|
||||
namespace)
|
||||
mock_xml = test_utils.mock_wsman_root(result_xml)
|
||||
mock_pywsman = mock_client_pywsman.Client.return_value
|
||||
mock_pywsman.invoke.return_value = mock_xml
|
||||
|
||||
amt_mgmt._set_boot_device_order(self.node, device)
|
||||
|
||||
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
|
||||
namespace, 'ChangeBootOrder', mock.ANY)
|
||||
mock_pywsman.invoke.assert_called_once_with(
|
||||
mock.ANY, namespace, 'ChangeBootOrder', mock.ANY)
|
||||
|
||||
def test__set_boot_device_order_fail(self, mock_client_pywsman):
|
||||
namespace = resource_uris.CIM_BootConfigSetting
|
||||
@ -70,8 +70,8 @@ class AMTManagementInteralMethodsTestCase(db_base.DbTestCase):
|
||||
|
||||
self.assertRaises(exception.AMTFailure,
|
||||
amt_mgmt._set_boot_device_order, self.node, device)
|
||||
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
|
||||
namespace, 'ChangeBootOrder', mock.ANY)
|
||||
mock_pywsman.invoke.assert_called_once_with(
|
||||
mock.ANY, namespace, 'ChangeBootOrder', mock.ANY)
|
||||
|
||||
mock_pywsman = mock_client_pywsman.Client.return_value
|
||||
mock_pywsman.invoke.return_value = None
|
||||
@ -89,8 +89,8 @@ class AMTManagementInteralMethodsTestCase(db_base.DbTestCase):
|
||||
|
||||
amt_mgmt._enable_boot_config(self.node)
|
||||
|
||||
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
|
||||
namespace, 'SetBootConfigRole', mock.ANY)
|
||||
mock_pywsman.invoke.assert_called_once_with(
|
||||
mock.ANY, namespace, 'SetBootConfigRole', mock.ANY)
|
||||
|
||||
def test__enable_boot_config_fail(self, mock_client_pywsman):
|
||||
namespace = resource_uris.CIM_BootService
|
||||
@ -102,8 +102,8 @@ class AMTManagementInteralMethodsTestCase(db_base.DbTestCase):
|
||||
|
||||
self.assertRaises(exception.AMTFailure,
|
||||
amt_mgmt._enable_boot_config, self.node)
|
||||
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
|
||||
namespace, 'SetBootConfigRole', mock.ANY)
|
||||
mock_pywsman.invoke.assert_called_once_with(
|
||||
mock.ANY, namespace, 'SetBootConfigRole', mock.ANY)
|
||||
|
||||
mock_pywsman = mock_client_pywsman.Client.return_value
|
||||
mock_pywsman.invoke.return_value = None
|
||||
|
@ -54,8 +54,8 @@ class AMTPowerInteralMethodsTestCase(db_base.DbTestCase):
|
||||
namespace = resource_uris.CIM_PowerManagementService
|
||||
mock_client = mock_client_pywsman.return_value
|
||||
amt_power._set_power_state(self.node, states.POWER_ON)
|
||||
mock_client.wsman_invoke.assert_called_once_with(mock.ANY,
|
||||
namespace, 'RequestPowerStateChange', mock.ANY)
|
||||
mock_client.wsman_invoke.assert_called_once_with(
|
||||
mock.ANY, namespace, 'RequestPowerStateChange', mock.ANY)
|
||||
|
||||
@mock.patch.object(amt_common, 'get_wsman_client', spec_set=True,
|
||||
autospec=True)
|
||||
@ -186,7 +186,7 @@ class AMTPowerInteralMethodsTestCase(db_base.DbTestCase):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
self.assertEqual(states.POWER_ON,
|
||||
amt_power._set_and_wait(task, target_state))
|
||||
amt_power._set_and_wait(task, target_state))
|
||||
mock_ps.assert_called_with(task.node)
|
||||
|
||||
@mock.patch.object(amt_power, '_power_status', spec_set=True,
|
||||
@ -199,7 +199,7 @@ class AMTPowerInteralMethodsTestCase(db_base.DbTestCase):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
self.assertEqual(states.POWER_OFF,
|
||||
amt_power._set_and_wait(task, target_state))
|
||||
amt_power._set_and_wait(task, target_state))
|
||||
mock_sps.assert_called_once_with(task.node, states.POWER_OFF)
|
||||
mock_ps.assert_called_with(task.node)
|
||||
|
||||
|
@ -33,8 +33,8 @@ class AMTPXEVendorPassthruTestCase(db_base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(AMTPXEVendorPassthruTestCase, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver="pxe_amt")
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='pxe_amt', driver_info=INFO_DICT)
|
||||
self.node = obj_utils.create_test_node(
|
||||
self.context, driver='pxe_amt', driver_info=INFO_DICT)
|
||||
|
||||
def test_vendor_routes(self):
|
||||
expected = ['heartbeat', 'pass_deploy_info',
|
||||
@ -66,8 +66,8 @@ class AMTPXEVendorPassthruTestCase(db_base.DbTestCase):
|
||||
task.node.provision_state = states.DEPLOYWAIT
|
||||
task.node.target_provision_state = states.ACTIVE
|
||||
task.node.instance_info['capabilities'] = {
|
||||
"boot_option": "netboot"
|
||||
}
|
||||
"boot_option": "netboot"
|
||||
}
|
||||
task.driver.vendor.pass_deploy_info(task, **kwargs)
|
||||
mock_ensure.assert_called_with(
|
||||
task.driver.management, task.node, boot_devices.PXE)
|
||||
@ -105,8 +105,8 @@ class AMTPXEVendorPassthruTestCase(db_base.DbTestCase):
|
||||
task.node.provision_state = states.DEPLOYWAIT
|
||||
task.node.target_provision_state = states.ACTIVE
|
||||
task.node.instance_info['capabilities'] = {
|
||||
"boot_option": "netboot"
|
||||
}
|
||||
"boot_option": "netboot"
|
||||
}
|
||||
task.driver.vendor.continue_deploy(task, **kwargs)
|
||||
mock_ensure.assert_called_with(
|
||||
task.driver.management, task.node, boot_devices.PXE)
|
||||
|
@ -49,8 +49,8 @@ class DracClientTestCase(base.TestCase):
|
||||
mock_options.set_flags.assert_called_once_with(
|
||||
mock_client_pywsman.FLAG_ENUMERATION_OPTIMIZATION)
|
||||
mock_options.set_max_elements.assert_called_once_with(100)
|
||||
mock_pywsman_client.enumerate.assert_called_once_with(mock_options,
|
||||
None, self.resource_uri)
|
||||
mock_pywsman_client.enumerate.assert_called_once_with(
|
||||
mock_options, None, self.resource_uri)
|
||||
mock_xml.context.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(time, 'sleep', lambda seconds: None)
|
||||
@ -74,10 +74,10 @@ class DracClientTestCase(base.TestCase):
|
||||
|
||||
def test_wsman_enumerate_with_additional_pull(self, mock_client_pywsman):
|
||||
mock_root = mock.Mock(spec=['string'])
|
||||
mock_root.string.side_effect = [test_utils.build_soap_xml(
|
||||
[{'item1': 'test1'}]),
|
||||
test_utils.build_soap_xml(
|
||||
[{'item2': 'test2'}])]
|
||||
mock_root.string.side_effect = [
|
||||
test_utils.build_soap_xml([{'item1': 'test1'}]),
|
||||
test_utils.build_soap_xml([{'item2': 'test2'}])
|
||||
]
|
||||
mock_xml = mock.Mock(spec=['context', 'root'])
|
||||
mock_xml.root.return_value = mock_root
|
||||
mock_xml.context.side_effect = [42, 42, None]
|
||||
@ -98,8 +98,8 @@ class DracClientTestCase(base.TestCase):
|
||||
mock_options.set_flags.assert_called_once_with(
|
||||
mock_client_pywsman.FLAG_ENUMERATION_OPTIMIZATION)
|
||||
mock_options.set_max_elements.assert_called_once_with(100)
|
||||
mock_pywsman_client.enumerate.assert_called_once_with(mock_options,
|
||||
None, self.resource_uri)
|
||||
mock_pywsman_client.enumerate.assert_called_once_with(
|
||||
mock_options, None, self.resource_uri)
|
||||
|
||||
def test_wsman_enumerate_filter_query(self, mock_client_pywsman):
|
||||
mock_xml = test_utils.mock_wsman_root('<test></test>')
|
||||
@ -113,8 +113,8 @@ class DracClientTestCase(base.TestCase):
|
||||
mock_options = mock_client_pywsman.ClientOptions.return_value
|
||||
mock_filter = mock_client_pywsman.Filter.return_value
|
||||
mock_filter.simple.assert_called_once_with(mock.ANY, filter_query)
|
||||
mock_pywsman_client.enumerate.assert_called_once_with(mock_options,
|
||||
mock_filter, self.resource_uri)
|
||||
mock_pywsman_client.enumerate.assert_called_once_with(
|
||||
mock_options, mock_filter, self.resource_uri)
|
||||
mock_xml.context.assert_called_once_with()
|
||||
|
||||
def test_wsman_enumerate_invalid_filter_dialect(self, mock_client_pywsman):
|
||||
@ -136,8 +136,8 @@ class DracClientTestCase(base.TestCase):
|
||||
client.wsman_invoke(self.resource_uri, method_name)
|
||||
|
||||
mock_options = mock_client_pywsman.ClientOptions.return_value
|
||||
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
|
||||
self.resource_uri, method_name, None)
|
||||
mock_pywsman_client.invoke.assert_called_once_with(
|
||||
mock_options, self.resource_uri, method_name, None)
|
||||
|
||||
@mock.patch.object(time, 'sleep', lambda seconds: None)
|
||||
def test_wsman_invoke_retry(self, mock_client_pywsman):
|
||||
@ -171,8 +171,8 @@ class DracClientTestCase(base.TestCase):
|
||||
selectors=selectors)
|
||||
|
||||
mock_options = mock_client_pywsman.ClientOptions.return_value
|
||||
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
|
||||
self.resource_uri, method_name, None)
|
||||
mock_pywsman_client.invoke.assert_called_once_with(
|
||||
mock_options, self.resource_uri, method_name, None)
|
||||
mock_options.add_selector.assert_called_once_with('foo', 'bar')
|
||||
|
||||
def test_wsman_invoke_with_properties(self, mock_client_pywsman):
|
||||
@ -189,12 +189,12 @@ class DracClientTestCase(base.TestCase):
|
||||
properties=properties)
|
||||
|
||||
mock_options = mock_client_pywsman.ClientOptions.return_value
|
||||
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
|
||||
self.resource_uri, method_name, None)
|
||||
mock_pywsman_client.invoke.assert_called_once_with(
|
||||
mock_options, self.resource_uri, method_name, None)
|
||||
mock_options.add_property.assert_called_once_with('foo', 'bar')
|
||||
|
||||
def test_wsman_invoke_with_properties_including_a_list(self,
|
||||
mock_client_pywsman):
|
||||
def test_wsman_invoke_with_properties_including_a_list(
|
||||
self, mock_client_pywsman):
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
[{'ReturnValue': drac_client.RET_SUCCESS}], self.resource_uri)
|
||||
mock_xml = test_utils.mock_wsman_root(result_xml)
|
||||
@ -209,16 +209,16 @@ class DracClientTestCase(base.TestCase):
|
||||
properties=properties)
|
||||
|
||||
mock_options = mock_client_pywsman.ClientOptions.return_value
|
||||
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
|
||||
self.resource_uri, method_name, mock_request_xml)
|
||||
mock_pywsman_client.invoke.assert_called_once_with(
|
||||
mock_options, self.resource_uri, method_name, mock_request_xml)
|
||||
mock_request_xml.root().add.assert_has_calls([
|
||||
mock.call(self.resource_uri, 'foo', 'bar'),
|
||||
mock.call(self.resource_uri, 'foo', 'baz')
|
||||
])
|
||||
self.assertEqual(2, mock_request_xml.root().add.call_count)
|
||||
|
||||
def test_wsman_invoke_receives_error_return_value(self,
|
||||
mock_client_pywsman):
|
||||
def test_wsman_invoke_receives_error_return_value(
|
||||
self, mock_client_pywsman):
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
[{'ReturnValue': drac_client.RET_ERROR,
|
||||
'Message': 'error message'}],
|
||||
@ -230,14 +230,14 @@ class DracClientTestCase(base.TestCase):
|
||||
method_name = 'method'
|
||||
client = drac_client.Client(**INFO_DICT)
|
||||
self.assertRaises(exception.DracOperationFailed,
|
||||
client.wsman_invoke, self.resource_uri, method_name)
|
||||
client.wsman_invoke, self.resource_uri, method_name)
|
||||
|
||||
mock_options = mock_client_pywsman.ClientOptions.return_value
|
||||
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
|
||||
self.resource_uri, method_name, None)
|
||||
mock_pywsman_client.invoke.assert_called_once_with(
|
||||
mock_options, self.resource_uri, method_name, None)
|
||||
|
||||
def test_wsman_invoke_receives_unexpected_return_value(self,
|
||||
mock_client_pywsman):
|
||||
def test_wsman_invoke_receives_unexpected_return_value(
|
||||
self, mock_client_pywsman):
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
[{'ReturnValue': '42'}], self.resource_uri)
|
||||
mock_xml = test_utils.mock_wsman_root(result_xml)
|
||||
@ -247,8 +247,8 @@ class DracClientTestCase(base.TestCase):
|
||||
method_name = 'method'
|
||||
client = drac_client.Client(**INFO_DICT)
|
||||
self.assertRaises(exception.DracUnexpectedReturnValue,
|
||||
client.wsman_invoke, self.resource_uri, method_name)
|
||||
client.wsman_invoke, self.resource_uri, method_name)
|
||||
|
||||
mock_options = mock_client_pywsman.ClientOptions.return_value
|
||||
mock_pywsman_client.invoke.assert_called_once_with(mock_options,
|
||||
self.resource_uri, method_name, None)
|
||||
mock_pywsman_client.invoke.assert_called_once_with(
|
||||
mock_options, self.resource_uri, method_name, None)
|
||||
|
@ -49,11 +49,10 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase):
|
||||
driver_info=INFO_DICT)
|
||||
|
||||
def test__get_next_boot_mode(self, mock_client_pywsman):
|
||||
result_xml = test_utils.build_soap_xml([{'DCIM_BootConfigSetting':
|
||||
{'InstanceID': 'IPL',
|
||||
'IsNext':
|
||||
drac_mgmt.PERSISTENT}}],
|
||||
resource_uris.DCIM_BootConfigSetting)
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
[{'DCIM_BootConfigSetting': {'InstanceID': 'IPL',
|
||||
'IsNext': drac_mgmt.PERSISTENT}}],
|
||||
resource_uris.DCIM_BootConfigSetting)
|
||||
|
||||
mock_xml = test_utils.mock_wsman_root(result_xml)
|
||||
mock_pywsman = mock_client_pywsman.Client.return_value
|
||||
@ -63,19 +62,16 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase):
|
||||
result = drac_mgmt._get_next_boot_mode(self.node)
|
||||
|
||||
self.assertEqual(expected, result)
|
||||
mock_pywsman.enumerate.assert_called_once_with(mock.ANY, mock.ANY,
|
||||
resource_uris.DCIM_BootConfigSetting)
|
||||
mock_pywsman.enumerate.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, resource_uris.DCIM_BootConfigSetting)
|
||||
|
||||
def test__get_next_boot_mode_onetime(self, mock_client_pywsman):
|
||||
result_xml = test_utils.build_soap_xml([{'DCIM_BootConfigSetting':
|
||||
{'InstanceID': 'IPL',
|
||||
'IsNext':
|
||||
drac_mgmt.PERSISTENT}},
|
||||
{'DCIM_BootConfigSetting':
|
||||
{'InstanceID': 'OneTime',
|
||||
'IsNext':
|
||||
drac_mgmt.ONE_TIME_BOOT}}],
|
||||
resource_uris.DCIM_BootConfigSetting)
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
[{'DCIM_BootConfigSetting': {'InstanceID': 'IPL',
|
||||
'IsNext': drac_mgmt.PERSISTENT}},
|
||||
{'DCIM_BootConfigSetting': {'InstanceID': 'OneTime',
|
||||
'IsNext': drac_mgmt.ONE_TIME_BOOT}}],
|
||||
resource_uris.DCIM_BootConfigSetting)
|
||||
|
||||
mock_xml = test_utils.mock_wsman_root(result_xml)
|
||||
mock_pywsman = mock_client_pywsman.Client.return_value
|
||||
@ -86,13 +82,13 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase):
|
||||
result = drac_mgmt._get_next_boot_mode(self.node)
|
||||
|
||||
self.assertEqual(expected, result)
|
||||
mock_pywsman.enumerate.assert_called_once_with(mock.ANY, mock.ANY,
|
||||
resource_uris.DCIM_BootConfigSetting)
|
||||
mock_pywsman.enumerate.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, resource_uris.DCIM_BootConfigSetting)
|
||||
|
||||
def test__check_for_config_job(self, mock_client_pywsman):
|
||||
result_xml = test_utils.build_soap_xml([{'DCIM_LifecycleJob':
|
||||
{'Name': 'fake'}}],
|
||||
resource_uris.DCIM_LifecycleJob)
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
[{'DCIM_LifecycleJob': {'Name': 'fake'}}],
|
||||
resource_uris.DCIM_LifecycleJob)
|
||||
|
||||
mock_xml = test_utils.mock_wsman_root(result_xml)
|
||||
mock_pywsman = mock_client_pywsman.Client.return_value
|
||||
@ -101,15 +97,15 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase):
|
||||
result = drac_mgmt._check_for_config_job(self.node)
|
||||
|
||||
self.assertIsNone(result)
|
||||
mock_pywsman.enumerate.assert_called_once_with(mock.ANY, mock.ANY,
|
||||
resource_uris.DCIM_LifecycleJob)
|
||||
mock_pywsman.enumerate.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, resource_uris.DCIM_LifecycleJob)
|
||||
|
||||
def test__check_for_config_job_already_exist(self, mock_client_pywsman):
|
||||
result_xml = test_utils.build_soap_xml([{'DCIM_LifecycleJob':
|
||||
{'Name': 'BIOS.Setup.1-1',
|
||||
'JobStatus': 'scheduled',
|
||||
'InstanceID': 'fake'}}],
|
||||
resource_uris.DCIM_LifecycleJob)
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
[{'DCIM_LifecycleJob': {'Name': 'BIOS.Setup.1-1',
|
||||
'JobStatus': 'scheduled',
|
||||
'InstanceID': 'fake'}}],
|
||||
resource_uris.DCIM_LifecycleJob)
|
||||
|
||||
mock_xml = test_utils.mock_wsman_root(result_xml)
|
||||
mock_pywsman = mock_client_pywsman.Client.return_value
|
||||
@ -117,8 +113,8 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase):
|
||||
|
||||
self.assertRaises(exception.DracPendingConfigJobExists,
|
||||
drac_mgmt._check_for_config_job, self.node)
|
||||
mock_pywsman.enumerate.assert_called_once_with(mock.ANY, mock.ANY,
|
||||
resource_uris.DCIM_LifecycleJob)
|
||||
mock_pywsman.enumerate.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, resource_uris.DCIM_LifecycleJob)
|
||||
|
||||
def test__create_config_job(self, mock_client_pywsman):
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
@ -132,8 +128,9 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase):
|
||||
result = drac_mgmt._create_config_job(self.node)
|
||||
|
||||
self.assertIsNone(result)
|
||||
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
|
||||
resource_uris.DCIM_BIOSService, 'CreateTargetedConfigJob', None)
|
||||
mock_pywsman.invoke.assert_called_once_with(
|
||||
mock.ANY, resource_uris.DCIM_BIOSService,
|
||||
'CreateTargetedConfigJob', None)
|
||||
|
||||
def test__create_config_job_error(self, mock_client_pywsman):
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
@ -147,8 +144,9 @@ class DracManagementInternalMethodsTestCase(db_base.DbTestCase):
|
||||
|
||||
self.assertRaises(exception.DracOperationFailed,
|
||||
drac_mgmt._create_config_job, self.node)
|
||||
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
|
||||
resource_uris.DCIM_BIOSService, 'CreateTargetedConfigJob', None)
|
||||
mock_pywsman.invoke.assert_called_once_with(
|
||||
mock.ANY, resource_uris.DCIM_BIOSService,
|
||||
'CreateTargetedConfigJob', None)
|
||||
|
||||
|
||||
@mock.patch.object(drac_client, 'pywsman', spec_set=mock_specs.PYWSMAN_SPEC)
|
||||
@ -179,8 +177,8 @@ class DracManagementTestCase(db_base.DbTestCase):
|
||||
mock_gnbm.return_value = {'instance_id': 'OneTime',
|
||||
'is_next': drac_mgmt.ONE_TIME_BOOT}
|
||||
|
||||
result_xml = test_utils.build_soap_xml([{'InstanceID': 'HardDisk'}],
|
||||
resource_uris.DCIM_BootSourceSetting)
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
[{'InstanceID': 'HardDisk'}], resource_uris.DCIM_BootSourceSetting)
|
||||
|
||||
mock_xml = test_utils.mock_wsman_root(result_xml)
|
||||
mock_pywsman = mock_client_pywsman.Client.return_value
|
||||
@ -190,8 +188,8 @@ class DracManagementTestCase(db_base.DbTestCase):
|
||||
expected = {'boot_device': boot_devices.DISK, 'persistent': False}
|
||||
|
||||
self.assertEqual(expected, result)
|
||||
mock_pywsman.enumerate.assert_called_once_with(mock.ANY, mock.ANY,
|
||||
resource_uris.DCIM_BootSourceSetting)
|
||||
mock_pywsman.enumerate.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, resource_uris.DCIM_BootSourceSetting)
|
||||
|
||||
@mock.patch.object(drac_mgmt, '_get_next_boot_mode', spec_set=True,
|
||||
autospec=True)
|
||||
@ -199,8 +197,8 @@ class DracManagementTestCase(db_base.DbTestCase):
|
||||
mock_gnbm.return_value = {'instance_id': 'IPL',
|
||||
'is_next': drac_mgmt.PERSISTENT}
|
||||
|
||||
result_xml = test_utils.build_soap_xml([{'InstanceID': 'NIC'}],
|
||||
resource_uris.DCIM_BootSourceSetting)
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
[{'InstanceID': 'NIC'}], resource_uris.DCIM_BootSourceSetting)
|
||||
|
||||
mock_xml = test_utils.mock_wsman_root(result_xml)
|
||||
mock_pywsman = mock_client_pywsman.Client.return_value
|
||||
@ -210,8 +208,8 @@ class DracManagementTestCase(db_base.DbTestCase):
|
||||
expected = {'boot_device': boot_devices.PXE, 'persistent': True}
|
||||
|
||||
self.assertEqual(expected, result)
|
||||
mock_pywsman.enumerate.assert_called_once_with(mock.ANY, mock.ANY,
|
||||
resource_uris.DCIM_BootSourceSetting)
|
||||
mock_pywsman.enumerate.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, resource_uris.DCIM_BootSourceSetting)
|
||||
|
||||
@mock.patch.object(drac_client.Client, 'wsman_enumerate', spec_set=True,
|
||||
autospec=True)
|
||||
@ -239,8 +237,8 @@ class DracManagementTestCase(db_base.DbTestCase):
|
||||
mock_client_pywsman):
|
||||
mock_gbd.return_value = {'boot_device': boot_devices.PXE,
|
||||
'persistent': True}
|
||||
result_xml_enum = test_utils.build_soap_xml([{'InstanceID': 'NIC'}],
|
||||
resource_uris.DCIM_BootSourceSetting)
|
||||
result_xml_enum = test_utils.build_soap_xml(
|
||||
[{'InstanceID': 'NIC'}], resource_uris.DCIM_BootSourceSetting)
|
||||
result_xml_invk = test_utils.build_soap_xml(
|
||||
[{'ReturnValue': drac_client.RET_SUCCESS}],
|
||||
resource_uris.DCIM_BootConfigSetting)
|
||||
@ -257,12 +255,11 @@ class DracManagementTestCase(db_base.DbTestCase):
|
||||
result = self.driver.set_boot_device(task, boot_devices.PXE)
|
||||
|
||||
self.assertIsNone(result)
|
||||
mock_pywsman.enumerate.assert_called_once_with(mock.ANY, mock.ANY,
|
||||
resource_uris.DCIM_BootSourceSetting)
|
||||
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
|
||||
resource_uris.DCIM_BootConfigSetting,
|
||||
'ChangeBootOrderByInstanceID',
|
||||
None)
|
||||
mock_pywsman.enumerate.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, resource_uris.DCIM_BootSourceSetting)
|
||||
mock_pywsman.invoke.assert_called_once_with(
|
||||
mock.ANY, resource_uris.DCIM_BootConfigSetting,
|
||||
'ChangeBootOrderByInstanceID', None)
|
||||
mock_gbd.assert_called_once_with(self.driver, task)
|
||||
mock_cfcj.assert_called_once_with(self.node)
|
||||
mock_ccj.assert_called_once_with(self.node)
|
||||
@ -277,8 +274,8 @@ class DracManagementTestCase(db_base.DbTestCase):
|
||||
mock_client_pywsman):
|
||||
mock_gbd.return_value = {'boot_device': boot_devices.PXE,
|
||||
'persistent': True}
|
||||
result_xml_enum = test_utils.build_soap_xml([{'InstanceID': 'NIC'}],
|
||||
resource_uris.DCIM_BootSourceSetting)
|
||||
result_xml_enum = test_utils.build_soap_xml(
|
||||
[{'InstanceID': 'NIC'}], resource_uris.DCIM_BootSourceSetting)
|
||||
result_xml_invk = test_utils.build_soap_xml(
|
||||
[{'ReturnValue': drac_client.RET_ERROR, 'Message': 'E_FAKE'}],
|
||||
resource_uris.DCIM_BootConfigSetting)
|
||||
@ -295,12 +292,11 @@ class DracManagementTestCase(db_base.DbTestCase):
|
||||
self.driver.set_boot_device, task,
|
||||
boot_devices.PXE)
|
||||
|
||||
mock_pywsman.enumerate.assert_called_once_with(mock.ANY, mock.ANY,
|
||||
resource_uris.DCIM_BootSourceSetting)
|
||||
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
|
||||
resource_uris.DCIM_BootConfigSetting,
|
||||
'ChangeBootOrderByInstanceID',
|
||||
None)
|
||||
mock_pywsman.enumerate.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, resource_uris.DCIM_BootSourceSetting)
|
||||
mock_pywsman.invoke.assert_called_once_with(
|
||||
mock.ANY, resource_uris.DCIM_BootConfigSetting,
|
||||
'ChangeBootOrderByInstanceID', None)
|
||||
mock_gbd.assert_called_once_with(self.driver, task)
|
||||
mock_cfcj.assert_called_once_with(self.node)
|
||||
self.assertFalse(mock_ccj.called)
|
||||
|
@ -46,8 +46,8 @@ class DracPowerInternalMethodsTestCase(base.DbTestCase):
|
||||
instance_uuid='instance_uuid_123')
|
||||
|
||||
def test__get_power_state(self, mock_power_pywsman, mock_client_pywsman):
|
||||
result_xml = test_utils.build_soap_xml([{'EnabledState': '2'}],
|
||||
resource_uris.DCIM_ComputerSystem)
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
[{'EnabledState': '2'}], resource_uris.DCIM_ComputerSystem)
|
||||
mock_xml = test_utils.mock_wsman_root(result_xml)
|
||||
mock_pywsman_client = mock_client_pywsman.Client.return_value
|
||||
mock_pywsman_client.enumerate.return_value = mock_xml
|
||||
@ -55,8 +55,8 @@ class DracPowerInternalMethodsTestCase(base.DbTestCase):
|
||||
self.assertEqual(states.POWER_ON,
|
||||
drac_power._get_power_state(self.node))
|
||||
|
||||
mock_pywsman_client.enumerate.assert_called_once_with(mock.ANY,
|
||||
mock.ANY, resource_uris.DCIM_ComputerSystem)
|
||||
mock_pywsman_client.enumerate.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, resource_uris.DCIM_ComputerSystem)
|
||||
|
||||
def test__set_power_state(self, mock_power_pywsman, mock_client_pywsman):
|
||||
result_xml = test_utils.build_soap_xml(
|
||||
@ -78,8 +78,9 @@ class DracPowerInternalMethodsTestCase(base.DbTestCase):
|
||||
mock_pywsman_clientopts.add_property.assert_called_once_with(
|
||||
'RequestedState', '2')
|
||||
|
||||
mock_pywsman_client.invoke.assert_called_once_with(mock.ANY,
|
||||
resource_uris.DCIM_ComputerSystem, 'RequestStateChange', None)
|
||||
mock_pywsman_client.invoke.assert_called_once_with(
|
||||
mock.ANY, resource_uris.DCIM_ComputerSystem,
|
||||
'RequestStateChange', None)
|
||||
|
||||
def test__set_power_state_fail(self, mock_power_pywsman,
|
||||
mock_client_pywsman):
|
||||
@ -106,8 +107,9 @@ class DracPowerInternalMethodsTestCase(base.DbTestCase):
|
||||
mock_pywsman_clientopts.add_property.assert_called_once_with(
|
||||
'RequestedState', '2')
|
||||
|
||||
mock_pywsman_client.invoke.assert_called_once_with(mock.ANY,
|
||||
resource_uris.DCIM_ComputerSystem, 'RequestStateChange', None)
|
||||
mock_pywsman_client.invoke.assert_called_once_with(
|
||||
mock.ANY, resource_uris.DCIM_ComputerSystem,
|
||||
'RequestStateChange', None)
|
||||
|
||||
|
||||
class DracPowerTestCase(base.DbTestCase):
|
||||
@ -163,7 +165,7 @@ class DracPowerTestCase(base.DbTestCase):
|
||||
@mock.patch.object(drac_power, '_get_power_state', spec_set=True,
|
||||
autospec=True)
|
||||
def test_reboot_in_power_off(self, mock_get_power_state,
|
||||
mock_set_power_state):
|
||||
mock_set_power_state):
|
||||
mock_get_power_state.return_value = states.POWER_OFF
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
|
@ -48,8 +48,9 @@ class IloValidateParametersTestCase(db_base.DbTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(IloValidateParametersTestCase, self).setUp()
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='fake_ilo', driver_info=db_utils.get_test_ilo_info())
|
||||
self.node = obj_utils.create_test_node(
|
||||
self.context, driver='fake_ilo',
|
||||
driver_info=db_utils.get_test_ilo_info())
|
||||
|
||||
def test_parse_driver_info(self):
|
||||
info = ilo_common.parse_driver_info(self.node)
|
||||
@ -112,8 +113,8 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
super(IloCommonMethodsTestCase, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
|
||||
self.info = db_utils.get_test_ilo_info()
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='fake_ilo', driver_info=self.info)
|
||||
self.node = obj_utils.create_test_node(
|
||||
self.context, driver='fake_ilo', driver_info=self.info)
|
||||
|
||||
@mock.patch.object(ilo_client, 'IloClient', spec_set=True,
|
||||
autospec=True)
|
||||
@ -160,10 +161,10 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
ipmi_info = {
|
||||
"ipmi_address": "1.2.3.4",
|
||||
"ipmi_username": "admin",
|
||||
"ipmi_password": "fake",
|
||||
"ipmi_terminal_port": 60
|
||||
"ipmi_address": "1.2.3.4",
|
||||
"ipmi_username": "admin",
|
||||
"ipmi_password": "fake",
|
||||
"ipmi_terminal_port": 60
|
||||
}
|
||||
self.info['console_port'] = 60
|
||||
task.node.driver_info = self.info
|
||||
@ -221,10 +222,11 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
files_info=files_info,
|
||||
parameters=deploy_args)
|
||||
|
||||
swift_obj_mock.create_object.assert_called_once_with('ilo_cont',
|
||||
object_name, 'image-tmp-file', object_headers=object_headers)
|
||||
swift_obj_mock.get_temp_url.assert_called_once_with('ilo_cont',
|
||||
object_name, timeout)
|
||||
swift_obj_mock.create_object.assert_called_once_with(
|
||||
'ilo_cont', object_name, 'image-tmp-file',
|
||||
object_headers=object_headers)
|
||||
swift_obj_mock.get_temp_url.assert_called_once_with(
|
||||
'ilo_cont', object_name, timeout)
|
||||
self.assertEqual('temp-url', temp_url)
|
||||
|
||||
@mock.patch.object(swift, 'SwiftAPI', spec_set=True, autospec=True)
|
||||
@ -264,8 +266,8 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
|
||||
ilo_common.attach_vmedia(self.node, 'FLOPPY', 'url')
|
||||
insert_media_mock.assert_called_once_with('url', device='FLOPPY')
|
||||
set_status_mock.assert_called_once_with(device='FLOPPY',
|
||||
boot_option='CONNECT', write_protect='YES')
|
||||
set_status_mock.assert_called_once_with(
|
||||
device='FLOPPY', boot_option='CONNECT', write_protect='YES')
|
||||
|
||||
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
|
||||
autospec=True)
|
||||
@ -320,7 +322,7 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
@mock.patch.object(ilo_common, 'set_boot_mode', spec_set=True,
|
||||
autospec=True)
|
||||
def test_update_boot_mode_instance_info_exists(self,
|
||||
set_boot_mode_mock):
|
||||
set_boot_mode_mock):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
task.node.instance_info['deploy_boot_mode'] = 'bios'
|
||||
@ -349,7 +351,7 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
get_ilo_object_mock.assert_called_once_with(task.node)
|
||||
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
|
||||
self.assertEqual('bios',
|
||||
task.node.instance_info['deploy_boot_mode'])
|
||||
task.node.instance_info['deploy_boot_mode'])
|
||||
|
||||
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
|
||||
autospec=True)
|
||||
@ -366,7 +368,7 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
|
||||
set_pending_boot_mode_mock.assert_called_once_with('UEFI')
|
||||
self.assertEqual('uefi',
|
||||
task.node.instance_info['deploy_boot_mode'])
|
||||
task.node.instance_info['deploy_boot_mode'])
|
||||
|
||||
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
|
||||
autospec=True)
|
||||
@ -381,7 +383,7 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
self.assertRaises(exception.IloOperationError,
|
||||
ilo_common.update_boot_mode, task)
|
||||
ilo_common.update_boot_mode, task)
|
||||
get_ilo_object_mock.assert_called_once_with(task.node)
|
||||
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
|
||||
|
||||
@ -399,7 +401,7 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
get_ilo_object_mock.assert_called_once_with(task.node)
|
||||
ilo_mock_obj.get_pending_boot_mode.assert_called_once_with()
|
||||
self.assertEqual('bios',
|
||||
task.node.instance_info['deploy_boot_mode'])
|
||||
task.node.instance_info['deploy_boot_mode'])
|
||||
|
||||
@mock.patch.object(ilo_common, 'set_boot_mode', spec_set=True,
|
||||
autospec=True)
|
||||
@ -419,8 +421,8 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
autospec=True)
|
||||
@mock.patch.object(ilo_common, '_prepare_floppy_image', spec_set=True,
|
||||
autospec=True)
|
||||
def test_setup_vmedia_for_boot_with_parameters(self, prepare_image_mock,
|
||||
attach_vmedia_mock, temp_url_mock):
|
||||
def test_setup_vmedia_for_boot_with_parameters(
|
||||
self, prepare_image_mock, attach_vmedia_mock, temp_url_mock):
|
||||
parameters = {'a': 'b'}
|
||||
boot_iso = '733d1c44-a2ea-414b-aca7-69decf20d810'
|
||||
prepare_image_mock.return_value = 'floppy_url'
|
||||
@ -441,7 +443,7 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
@mock.patch.object(ilo_common, 'attach_vmedia', spec_set=True,
|
||||
autospec=True)
|
||||
def test_setup_vmedia_for_boot_with_swift(self, attach_vmedia_mock,
|
||||
swift_api_mock):
|
||||
swift_api_mock):
|
||||
swift_obj_mock = swift_api_mock.return_value
|
||||
boot_iso = 'swift:object-name'
|
||||
swift_obj_mock.get_temp_url.return_value = 'image_url'
|
||||
@ -451,10 +453,10 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
ilo_common.setup_vmedia_for_boot(task, boot_iso)
|
||||
swift_obj_mock.get_temp_url.assert_called_once_with('ilo_cont',
|
||||
'object-name', 1)
|
||||
attach_vmedia_mock.assert_called_once_with(task.node, 'CDROM',
|
||||
'image_url')
|
||||
swift_obj_mock.get_temp_url.assert_called_once_with(
|
||||
'ilo_cont', 'object-name', 1)
|
||||
attach_vmedia_mock.assert_called_once_with(
|
||||
task.node, 'CDROM', 'image_url')
|
||||
|
||||
@mock.patch.object(ilo_common, 'attach_vmedia', spec_set=True,
|
||||
autospec=True)
|
||||
@ -472,7 +474,7 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
@mock.patch.object(ilo_common, '_get_floppy_image_name', spec_set=True,
|
||||
autospec=True)
|
||||
def test_cleanup_vmedia_boot(self, get_name_mock, swift_api_mock,
|
||||
get_ilo_object_mock):
|
||||
get_ilo_object_mock):
|
||||
swift_obj_mock = swift_api_mock.return_value
|
||||
CONF.ilo.swift_ilo_container = 'ilo_cont'
|
||||
|
||||
@ -483,8 +485,8 @@ class IloCommonMethodsTestCase(db_base.DbTestCase):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
ilo_common.cleanup_vmedia_boot(task)
|
||||
swift_obj_mock.delete_object.assert_called_once_with('ilo_cont',
|
||||
'image-node-uuid')
|
||||
swift_obj_mock.delete_object.assert_called_once_with(
|
||||
'ilo_cont', 'image-node-uuid')
|
||||
ilo_object_mock.eject_virtual_media.assert_any_call('CDROM')
|
||||
ilo_object_mock.eject_virtual_media.assert_any_call('FLOPPY')
|
||||
|
||||
|
@ -57,8 +57,8 @@ class IloDeployPrivateMethodsTestCase(db_base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(IloDeployPrivateMethodsTestCase, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='iscsi_ilo', driver_info=INFO_DICT)
|
||||
self.node = obj_utils.create_test_node(
|
||||
self.context, driver='iscsi_ilo', driver_info=INFO_DICT)
|
||||
|
||||
def test__get_boot_iso_object_name(self):
|
||||
boot_iso_actual = ilo_deploy._get_boot_iso_object_name(self.node)
|
||||
@ -116,7 +116,7 @@ class IloDeployPrivateMethodsTestCase(db_base.DbTestCase):
|
||||
@mock.patch.object(ilo_deploy, '_parse_deploy_info', spec_set=True,
|
||||
autospec=True)
|
||||
def test__get_boot_iso_glance_image(self, deploy_info_mock,
|
||||
image_props_mock):
|
||||
image_props_mock):
|
||||
deploy_info_mock.return_value = {'image_source': 'image-uuid',
|
||||
'ilo_deploy_iso': 'deploy_iso_uuid'}
|
||||
image_props_mock.return_value = {'boot_iso': 'boot-iso-uuid',
|
||||
@ -201,8 +201,9 @@ class IloDeployPrivateMethodsTestCase(db_base.DbTestCase):
|
||||
shared=False) as task:
|
||||
boot_iso_actual = ilo_deploy._get_boot_iso(task, 'root-uuid')
|
||||
deploy_info_mock.assert_called_once_with(task.node)
|
||||
image_props_mock.assert_called_once_with(task.context,
|
||||
'image-uuid', ['boot_iso', 'kernel_id', 'ramdisk_id'])
|
||||
image_props_mock.assert_called_once_with(
|
||||
task.context, 'image-uuid',
|
||||
['boot_iso', 'kernel_id', 'ramdisk_id'])
|
||||
boot_object_name_mock.assert_called_once_with(task.node)
|
||||
create_boot_iso_mock.assert_called_once_with(task.context,
|
||||
'tmpfile',
|
||||
@ -446,10 +447,9 @@ class IloDeployPrivateMethodsTestCase(db_base.DbTestCase):
|
||||
autospec=True)
|
||||
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
|
||||
autospec=True)
|
||||
def test__prepare_node_for_deploy_sec_boot_on_inst_info(self,
|
||||
func_node_power_action,
|
||||
func_disable_secure_boot,
|
||||
func_update_boot_mode):
|
||||
def test__prepare_node_for_deploy_sec_boot_on_inst_info(
|
||||
self, func_node_power_action, func_disable_secure_boot,
|
||||
func_update_boot_mode):
|
||||
instance_info = {'capabilities': '{"secure_boot": "true"}'}
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
@ -471,8 +471,8 @@ class IloVirtualMediaIscsiDeployTestCase(db_base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(IloVirtualMediaIscsiDeployTestCase, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='iscsi_ilo', driver_info=INFO_DICT)
|
||||
self.node = obj_utils.create_test_node(
|
||||
self.context, driver='iscsi_ilo', driver_info=INFO_DICT)
|
||||
|
||||
@mock.patch.object(driver_utils, 'validate_secure_boot_capability',
|
||||
spec_set=True, autospec=True)
|
||||
@ -496,8 +496,8 @@ class IloVirtualMediaIscsiDeployTestCase(db_base.DbTestCase):
|
||||
task.driver.deploy.validate(task)
|
||||
validate_mock.assert_called_once_with(task)
|
||||
deploy_info_mock.assert_called_once_with(task.node)
|
||||
validate_prop_mock.assert_called_once_with(task.context,
|
||||
d_info, props_expected)
|
||||
validate_prop_mock.assert_called_once_with(
|
||||
task.context, d_info, props_expected)
|
||||
validate_boot_mode_mock.assert_called_once_with(task.node)
|
||||
validate_secure_boot_mock.assert_called_once_with(task.node)
|
||||
|
||||
@ -599,7 +599,7 @@ class IloVirtualMediaIscsiDeployTestCase(db_base.DbTestCase):
|
||||
returned_state = task.driver.deploy.deploy(task)
|
||||
|
||||
cache_instance_image_mock.assert_called_once_with(task.context,
|
||||
task.node)
|
||||
task.node)
|
||||
check_image_size_mock.assert_called_once_with(task)
|
||||
expected_ramdisk_opts = {'a': 'b', 'BOOTIF': '12:34:56:78:90:ab',
|
||||
'ipa-api-url': 'http://1.2.3.4:6385'}
|
||||
@ -621,7 +621,7 @@ class IloVirtualMediaIscsiDeployTestCase(db_base.DbTestCase):
|
||||
shared=False) as task:
|
||||
returned_state = task.driver.deploy.tear_down(task)
|
||||
node_power_action_mock.assert_called_once_with(task,
|
||||
states.POWER_OFF)
|
||||
states.POWER_OFF)
|
||||
update_secure_boot_mode_mock.assert_called_once_with(task, False)
|
||||
self.assertEqual(states.DELETED, returned_state)
|
||||
|
||||
@ -642,7 +642,7 @@ class IloVirtualMediaIscsiDeployTestCase(db_base.DbTestCase):
|
||||
update_secure_boot_mode_mock.side_effect = Exception
|
||||
returned_state = task.driver.deploy.tear_down(task)
|
||||
node_power_action_mock.assert_called_once_with(task,
|
||||
states.POWER_OFF)
|
||||
states.POWER_OFF)
|
||||
update_secure_boot_mode_mock.assert_called_once_with(task, False)
|
||||
self.assertTrue(mock_log.called)
|
||||
self.assertEqual(states.DELETED, returned_state)
|
||||
@ -672,8 +672,8 @@ class IloVirtualMediaAgentDeployTestCase(db_base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(IloVirtualMediaAgentDeployTestCase, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver="agent_ilo")
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='agent_ilo', driver_info=INFO_DICT)
|
||||
self.node = obj_utils.create_test_node(
|
||||
self.context, driver='agent_ilo', driver_info=INFO_DICT)
|
||||
|
||||
@mock.patch.object(driver_utils, 'validate_secure_boot_capability',
|
||||
spec_set=True, autospec=True)
|
||||
@ -712,7 +712,7 @@ class IloVirtualMediaAgentDeployTestCase(db_base.DbTestCase):
|
||||
shared=False) as task:
|
||||
returned_state = task.driver.deploy.tear_down(task)
|
||||
node_power_action_mock.assert_called_once_with(task,
|
||||
states.POWER_OFF)
|
||||
states.POWER_OFF)
|
||||
update_secure_boot_mode_mock.assert_called_once_with(task, False)
|
||||
self.assertEqual(states.DELETED, returned_state)
|
||||
|
||||
@ -793,11 +793,11 @@ class IloVirtualMediaAgentDeployTestCase(db_base.DbTestCase):
|
||||
def test_get_clean_steps_with_conf_option(self, get_clean_step_mock):
|
||||
self.config(clean_priority_erase_devices=20, group='ilo')
|
||||
get_clean_step_mock.return_value = [{
|
||||
'step': 'erase_devices',
|
||||
'priority': 10,
|
||||
'interface': 'deploy',
|
||||
'reboot_requested': False
|
||||
}]
|
||||
'step': 'erase_devices',
|
||||
'priority': 10,
|
||||
'interface': 'deploy',
|
||||
'reboot_requested': False
|
||||
}]
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
step = task.driver.deploy.get_clean_steps(task)
|
||||
@ -810,11 +810,11 @@ class IloVirtualMediaAgentDeployTestCase(db_base.DbTestCase):
|
||||
def test_get_clean_steps_erase_devices_disable(self, get_clean_step_mock):
|
||||
self.config(clean_priority_erase_devices=0, group='ilo')
|
||||
get_clean_step_mock.return_value = [{
|
||||
'step': 'erase_devices',
|
||||
'priority': 10,
|
||||
'interface': 'deploy',
|
||||
'reboot_requested': False
|
||||
}]
|
||||
'step': 'erase_devices',
|
||||
'priority': 10,
|
||||
'interface': 'deploy',
|
||||
'reboot_requested': False
|
||||
}]
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
step = task.driver.deploy.get_clean_steps(task)
|
||||
@ -826,11 +826,11 @@ class IloVirtualMediaAgentDeployTestCase(db_base.DbTestCase):
|
||||
autospec=True)
|
||||
def test_get_clean_steps_without_conf_option(self, get_clean_step_mock):
|
||||
get_clean_step_mock.return_value = [{
|
||||
'step': 'erase_devices',
|
||||
'priority': 10,
|
||||
'interface': 'deploy',
|
||||
'reboot_requested': False
|
||||
}]
|
||||
'step': 'erase_devices',
|
||||
'priority': 10,
|
||||
'interface': 'deploy',
|
||||
'reboot_requested': False
|
||||
}]
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=False) as task:
|
||||
step = task.driver.deploy.get_clean_steps(task)
|
||||
@ -860,7 +860,7 @@ class VendorPassthruTestCase(db_base.DbTestCase):
|
||||
@mock.patch.object(iscsi_deploy, 'validate_pass_bootloader_info_input',
|
||||
spec_set=True, autospec=True)
|
||||
def test_validate_pass_bootloader_install_info(self,
|
||||
validate_mock):
|
||||
validate_mock):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
shared=True) as task:
|
||||
kwargs = {'address': '1.2.3.4', 'key': 'fake-key',
|
||||
@ -973,9 +973,10 @@ class VendorPassthruTestCase(db_base.DbTestCase):
|
||||
autospec=True)
|
||||
@mock.patch.object(ilo_deploy, '_get_boot_iso', spec_set=True,
|
||||
autospec=True)
|
||||
def test_pass_deploy_info_create_boot_iso_fail(self, get_iso_mock,
|
||||
cleanup_vmedia_boot_mock, continue_deploy_mock, node_power_mock,
|
||||
update_boot_mode_mock, update_secure_boot_mode_mock):
|
||||
def test_pass_deploy_info_create_boot_iso_fail(
|
||||
self, get_iso_mock, cleanup_vmedia_boot_mock, continue_deploy_mock,
|
||||
node_power_mock, update_boot_mode_mock,
|
||||
update_secure_boot_mode_mock):
|
||||
kwargs = {'address': '123456'}
|
||||
continue_deploy_mock.return_value = {'root uuid': 'root-uuid'}
|
||||
get_iso_mock.side_effect = exception.ImageCreationFailed(
|
||||
@ -1244,8 +1245,8 @@ class IloPXEDeployTestCase(db_base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(IloPXEDeployTestCase, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver="pxe_ilo")
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='pxe_ilo', driver_info=INFO_DICT)
|
||||
self.node = obj_utils.create_test_node(
|
||||
self.context, driver='pxe_ilo', driver_info=INFO_DICT)
|
||||
|
||||
@mock.patch.object(pxe.PXEDeploy, 'validate', spec_set=True, autospec=True)
|
||||
def test_validate(self, pxe_validate_mock):
|
||||
@ -1299,8 +1300,8 @@ class IloPXEVendorPassthruTestCase(db_base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(IloPXEVendorPassthruTestCase, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver="pxe_ilo")
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='pxe_ilo', driver_info=INFO_DICT)
|
||||
self.node = obj_utils.create_test_node(
|
||||
self.context, driver='pxe_ilo', driver_info=INFO_DICT)
|
||||
|
||||
def test_vendor_routes(self):
|
||||
expected = ['heartbeat', 'pass_deploy_info',
|
||||
@ -1342,8 +1343,8 @@ class IloVirtualMediaAgentVendorInterfaceTestCase(db_base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(IloVirtualMediaAgentVendorInterfaceTestCase, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver="agent_ilo")
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='agent_ilo', driver_info=INFO_DICT)
|
||||
self.node = obj_utils.create_test_node(
|
||||
self.context, driver='agent_ilo', driver_info=INFO_DICT)
|
||||
|
||||
@mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance',
|
||||
spec_set=True, autospec=True)
|
||||
|
@ -41,8 +41,8 @@ class IloInspectTestCase(db_base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(IloInspectTestCase, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='fake_ilo', driver_info=INFO_DICT)
|
||||
self.node = obj_utils.create_test_node(
|
||||
self.context, driver='fake_ilo', driver_info=INFO_DICT)
|
||||
|
||||
def test_get_properties(self):
|
||||
with task_manager.acquire(self.context, self.node.uuid,
|
||||
@ -224,8 +224,8 @@ class TestInspectPrivateMethods(db_base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(TestInspectPrivateMethods, self).setUp()
|
||||
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
|
||||
self.node = obj_utils.create_test_node(self.context,
|
||||
driver='fake_ilo', driver_info=INFO_DICT)
|
||||
self.node = obj_utils.create_test_node(
|
||||
self.context, driver='fake_ilo', driver_info=INFO_DICT)
|
||||
|
||||
@mock.patch.object(ilo_inspect.LOG, 'info', spec_set=True, autospec=True)
|
||||
@mock.patch.object(dbapi, 'get_instance', spec_set=True, autospec=True)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user