Retire Tripleo: remove repo content

TripleO project is retiring
- https://review.opendev.org/c/openstack/governance/+/905145

this commit remove the content of this project repo

Change-Id: Ie970f3f04c78e9bfcd9212bfa97a9cc9ee376b0c
This commit is contained in:
Ghanshyam Mann 2024-02-24 11:33:01 -08:00
parent 09e3ef837b
commit d67a6ebe02
237 changed files with 8 additions and 47319 deletions

17
.gitignore vendored
View File

@ -1,17 +0,0 @@
*.swp
*~
*.qcow2
.DS_Store
*.egg*
*.pyc
.tox
doc/build
deploy-guide/source/_build
build
# pbr generates these
AUTHORS
ChangeLog

View File

@ -1,4 +0,0 @@
- project:
templates:
- publish-openstack-docs-pti
- deploy-guide-jobs

View File

@ -1,51 +1,10 @@
========================
Team and repository tags
========================
This project is no longer maintained.
.. image:: https://governance.openstack.org/tc/badges/tripleo-docs.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
.. Change things from this point on
TripleO Documentation
=====================
This is the documentation source for the TripleO project. You can read
the generated documentation at `TripleO
Docs <https://docs.openstack.org/tripleo-docs/latest/>`__.
You can find out more about TripleO at the `TripleO
Wiki <https://wiki.openstack.org/wiki/TripleO>`__.
Getting Started
---------------
Documentation for the TripleO project is hosted on the OpenStack Gerrit
site. You can view all open and resolved issues in the
``openstack/tripleo-docs`` project at `TripleO
Reviews <https://review.opendev.org/#/q/project:openstack/tripleo-docs>`__.
General information about contributing to the OpenStack documentation
available at `OpenStack Documentation Contributor
Guide <https://docs.openstack.org/doc-contrib-guide/>`__
Quick Start
-----------
The following is a quick set of instructions to get you up and running
by building the TripleO documentation locally. The first step is to get
your Python environment configured. Information on configuring is
available at `Python Project
Guide <https://docs.openstack.org/project-team-guide/project-setup/python.html>`__
Next you can generate the documentation using the following command. Be
sure to run all the commands from within the recently checked out
repository.
::
tox -edocs,pdf-docs,deploy-guide
Now you have the documentation generated for the various available
formats from the local source. The resulting documentation will be
available within the ``doc/build/`` directory.
For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
OFTC.

View File

@ -1,58 +0,0 @@
/*
This function will search for all classes matching all IDs which are under
#admonition_selector element and display/hide their content.
State is saved in cookies so user doesn't lose his settings after page
reload or changing pages.
To make this feature work, you need to:
- add checkbox to _templates/layout.html file with proper ID
- in admonitions use proper class which matches above mentioned ID
*/
// after document is loaded
$(document).ready(function() {
// for each checkbox in #admonition_selector do
$('#admonition_selector :checkbox').each(function() {
// check value of cookies and set state to the related element
if ($.cookie($(this).attr("id")) == "true") {
$(this).prop("checked", true);
} else {
$(this).prop("checked", false);
}
// show/hide elements after page loaded
toggle_admonition($(this).attr("id"));
});
// when user clicks on the checkbox, react
$('#admonition_selector :checkbox').change(function() {
// show/hide related elements
toggle_admonition($(this).attr("id"));
// save the state in the cookies
$.cookie($(this).attr("id"), $(this).is(':checked'), { path: '/' });
});
});
// function to show/hide elements based on checkbox state
// checkbox has ID and it toggles elements having class named same way as the ID
function toggle_admonition(admonition) {
// for each element having class as the checkbox's ID
$(".admonition." + admonition).each(function() {
// set show/hide
if($("#" + admonition).is(':checked')) {
$(this).show();
} else {
$(this).hide();
}
});
}

View File

@ -1,117 +0,0 @@
/*!
* jQuery Cookie Plugin v1.4.1
* https://github.com/carhartl/jquery-cookie
*
* Copyright 2013 Klaus Hartl
* Released under the MIT license
*/
(function (factory) {
if (typeof define === 'function' && define.amd) {
// AMD
define(['jquery'], factory);
} else if (typeof exports === 'object') {
// CommonJS
factory(require('jquery'));
} else {
// Browser globals
factory(jQuery);
}
}(function ($) {
var pluses = /\+/g;
function encode(s) {
return config.raw ? s : encodeURIComponent(s);
}
function decode(s) {
return config.raw ? s : decodeURIComponent(s);
}
function stringifyCookieValue(value) {
return encode(config.json ? JSON.stringify(value) : String(value));
}
function parseCookieValue(s) {
if (s.indexOf('"') === 0) {
// This is a quoted cookie as according to RFC2068, unescape...
s = s.slice(1, -1).replace(/\\"/g, '"').replace(/\\\\/g, '\\');
}
try {
// Replace server-side written pluses with spaces.
// If we can't decode the cookie, ignore it, it's unusable.
// If we can't parse the cookie, ignore it, it's unusable.
s = decodeURIComponent(s.replace(pluses, ' '));
return config.json ? JSON.parse(s) : s;
} catch(e) {}
}
function read(s, converter) {
var value = config.raw ? s : parseCookieValue(s);
return $.isFunction(converter) ? converter(value) : value;
}
var config = $.cookie = function (key, value, options) {
// Write
if (value !== undefined && !$.isFunction(value)) {
options = $.extend({}, config.defaults, options);
if (typeof options.expires === 'number') {
var days = options.expires, t = options.expires = new Date();
t.setTime(+t + days * 864e+5);
}
return (document.cookie = [
encode(key), '=', stringifyCookieValue(value),
options.expires ? '; expires=' + options.expires.toUTCString() : '', // use expires attribute, max-age is not supported by IE
options.path ? '; path=' + options.path : '',
options.domain ? '; domain=' + options.domain : '',
options.secure ? '; secure' : ''
].join(''));
}
// Read
var result = key ? undefined : {};
// To prevent the for loop in the first place assign an empty array
// in case there are no cookies at all. Also prevents odd result when
// calling $.cookie().
var cookies = document.cookie ? document.cookie.split('; ') : [];
for (var i = 0, l = cookies.length; i < l; i++) {
var parts = cookies[i].split('=');
var name = decode(parts.shift());
var cookie = parts.join('=');
if (key && key === name) {
// If second argument (value) is a function it's a converter...
result = read(cookie, value);
break;
}
// Prevent storing a cookie that we couldn't decode.
if (!key && (cookie = read(cookie)) !== undefined) {
result[name] = cookie;
}
}
return result;
};
config.defaults = {};
$.removeCookie = function (key, options) {
if ($.cookie(key) === undefined) {
return false;
}
// Must not alter options, thus extending a fresh object...
$.cookie(key, '', $.extend({}, options, { expires: -1 }));
return !$.cookie(key);
};
}));

View File

@ -1,146 +0,0 @@
/* CUSTOM CSS OVERRIDES GO HERE */
/* ============================ */
/* remove backgrounds */
#admonition_selector {
background: none !important;
color: black !important;
}
/* admonition selector */
#admonition_selector {
border-top: 0 none !important;
}
#admonition_selector .title {
color: rgba(0, 0, 0, 0.6) !important;
}
.trigger {
color: rgba(0, 0, 0, 0.7) !important;
border-top: 1px solid rgba(0, 0, 0, 0.2);
border-bottom: 1px solid rgba(0, 0, 0, 0.2);
background: rgba(0, 0, 0, 0.05);
}
.trigger:hover {
color: rgba(0, 0, 0, 0.9) !important;
}
/* NOTES, ADMONITIONS AND TAGS */
.admonition {
font-size: 85%; /* match code size */
background: rgb(240, 240, 240);
color: rgba(0, 0, 0, 0.55);
border: 1px solid rgba(0, 0, 0, 0.1);
padding: 0.5em 1em 0.75em 1em;
margin-bottom: 24px;
}
.admonition .admonition {
/* Don't keep shrinking the font for nested admonitions. */
font-size: 100%;
}
.admonition p {
font-size: inherit;
}
.admonition p.last {
margin-bottom: 0;
}
.admonition p.first.admonition-title {
display: inline;
background: none;
font-weight: bold;
color: rgba(0, 0, 0, 0.75);
}
/* notes */
.rst-content .note {
background: rgb(240, 240, 240);
}
/* tags */
.fedora28 {background: #aee;}
.centos7 {background: #cea;}
.centos8 {background: #cae;}
.rhel {background: #fee;}
.portal {background-color: #ded;}
.satellite {background-color: #dee;}
.stable {background: #eed;}
.newton {background: #ede;}
.ocata {background: #edd;}
.pike {background: #dfb;}
.queens {background: #afd;}
.rocky {background: #aee;}
.stein {background: #ade;}
.centos {background: #fef;}
.baremetal {background: #eef;}
.virtual {background: #efe;}
.ceph {background: #eff;}
.mton {background: #ded;}
.ntoo {background: #edd;}
.otop {background: #dfb;}
.ptoq {background: #afd;}
.qtor {background: #aee;}
.rtos {background: #ade;}
.validations {background: #fdd;}
.optional {background: #ffe;}
.tls {background: #ded;}
/* admonition selector */
#admonition_selector {
color: white;
font-size: 85%;
line-height: 1.4;
background: #2980b9;
border-top: 1px solid rgba(255, 255, 255, 0.4);
}
.trigger {
color: rgba(255, 255, 255, 0.75);
line-height: 2.5;
position: relative;
cursor: pointer;
padding: 0 1.618em;
}
.trigger:after {
content: '▾';
font-family: FontAwesome;
}
.trigger:hover {
color: white;
}
.content {
display: none;
border-top: 1px solid rgba(255, 255, 255, 0.1);
background: rgba(255, 255, 255, 0.1);
padding: 0.5em 1.618em;
}
.displayed .trigger:after {
content: '▴';
}
#admonition_selector .title {
color: rgba(255, 255, 255, 0.45);
}
#admonition_selector ul {
margin-bottom: 0.75em;
}
#admonition_selector ul li {
display: block;
}
#admonition_selector label {
display: inline;
color: inherit;
text-decoration: underline dotted;
}

View File

@ -1,31 +0,0 @@
$(document).ready(function() {
// for each trigger
$('.trigger').each(function() {
// check if cookie has value on true
if ($.cookie($(this).parent().prop('id')) == "true") {
// add displayed class and show the content
$(this).parent().addClass("displayed");
$(this).next('.content').show();
} else {
// remove displayed class and hide the content
$(this).parent().removeClass("displayed");
$(this).next('.content').hide();
}
});
// if user clicked trigger element
$('.trigger').click(function() {
// toggle parent's class and animate the content
$(this).parent().toggleClass('displayed');
$(this).next('.content').slideToggle("fast");
// save the state to cookies
$.cookie($(this).parent().prop('id'),
$(this).parent().hasClass('displayed'),
{ path: '/' });
});
});

View File

@ -1,223 +0,0 @@
/*
* jQuery One Page Nav Plugin
* http://github.com/davist11/jQuery-One-Page-Nav
*
* Copyright (c) 2010 Trevor Davis (http://trevordavis.net)
* Dual licensed under the MIT and GPL licenses.
* Uses the same license as jQuery, see:
* http://jquery.org/license
*
* @version 3.0.0
*
* Example usage:
* $('#nav').onePageNav({
* currentClass: 'current',
* changeHash: false,
* scrollSpeed: 750
* });
*/
;(function($, window, document, undefined){
// our plugin constructor
var OnePageNav = function(elem, options){
this.elem = elem;
this.$elem = $(elem);
this.options = options;
this.metadata = this.$elem.data('plugin-options');
this.$win = $(window);
this.sections = {};
this.didScroll = false;
this.$doc = $(document);
this.docHeight = this.$doc.height();
};
// the plugin prototype
OnePageNav.prototype = {
defaults: {
navItems: 'a',
currentClass: 'active',
changeHash: false,
easing: 'swing',
filter: '',
scrollSpeed: 750,
scrollThreshold: 0.2,
begin: false,
end: false,
scrollChange: false
},
init: function() {
// Introduce defaults that can be extended either
// globally or using an object literal.
this.config = $.extend({}, this.defaults, this.options, this.metadata);
this.$nav = this.$elem.find(this.config.navItems);
//Filter any links out of the nav
if(this.config.filter !== '') {
this.$nav = this.$nav.filter(this.config.filter);
}
//Handle clicks on the nav
this.$nav.on('click.onePageNav', $.proxy(this.handleClick, this));
//Get the section positions
this.getPositions();
//Handle scroll changes
this.bindInterval();
//Update the positions on resize too
this.$win.on('resize.onePageNav', $.proxy(this.getPositions, this));
return this;
},
adjustNav: function(self, $parent) {
self.$elem.find('.' + self.config.currentClass).removeClass(self.config.currentClass);
$parent.addClass(self.config.currentClass);
},
bindInterval: function() {
var self = this;
var docHeight;
self.$win.on('scroll.onePageNav', function() {
self.didScroll = true;
});
self.t = setInterval(function() {
docHeight = self.$doc.height();
//If it was scrolled
if(self.didScroll) {
self.didScroll = false;
self.scrollChange();
}
//If the document height changes
if(docHeight !== self.docHeight) {
self.docHeight = docHeight;
self.getPositions();
}
}, 250);
},
getHash: function($link) {
return $link.attr('href').split('#')[1];
},
getPositions: function() {
var self = this;
var linkHref;
var topPos;
var $target;
self.$nav.each(function() {
linkHref = self.getHash($(this));
$target = $('#' + linkHref);
if($target.length) {
topPos = $target.offset().top;
self.sections[linkHref] = Math.round(topPos);
}
});
},
getSection: function(windowPos) {
var returnValue = null;
var windowHeight = Math.round(this.$win.height() * this.config.scrollThreshold);
for(var section in this.sections) {
if((this.sections[section] - windowHeight) < windowPos) {
returnValue = section;
}
}
return returnValue;
},
handleClick: function(e) {
var self = this;
var $link = $(e.currentTarget);
var $parent = $link.parent();
var newLoc = '#' + self.getHash($link);
if(!$parent.hasClass(self.config.currentClass)) {
//Start callback
if(self.config.begin) {
self.config.begin();
}
//Change the highlighted nav item
self.adjustNav(self, $parent);
//Removing the auto-adjust on scroll
self.unbindInterval();
//Scroll to the correct position
self.scrollTo(newLoc, function() {
//Do we need to change the hash?
if(self.config.changeHash) {
window.location.hash = newLoc;
}
//Add the auto-adjust on scroll back in
self.bindInterval();
//End callback
if(self.config.end) {
self.config.end();
}
});
}
e.preventDefault();
},
scrollChange: function() {
var windowTop = this.$win.scrollTop();
var position = this.getSection(windowTop);
var $parent;
//If the position is set
if(position !== null) {
$parent = this.$elem.find('a[href$="#' + position + '"]').parent();
//If it's not already the current section
if(!$parent.hasClass(this.config.currentClass)) {
//Change the highlighted nav item
this.adjustNav(this, $parent);
//If there is a scrollChange callback
if(this.config.scrollChange) {
this.config.scrollChange($parent);
}
}
}
},
scrollTo: function(target, callback) {
var offset = $(target).offset().top;
$('html, body').animate({
scrollTop: offset
}, this.config.scrollSpeed, this.config.easing, callback);
},
unbindInterval: function() {
clearInterval(this.t);
this.$win.unbind('scroll.onePageNav');
}
};
OnePageNav.defaults = OnePageNav.prototype.defaults;
$.fn.onePageNav = function(options) {
return this.each(function() {
new OnePageNav(this, options).init();
});
};
})( jQuery, window , document );

View File

@ -1,208 +0,0 @@
/*!
* jQuery.scrollTo
* Copyright (c) 2007-2015 Ariel Flesler - aflesler<a>gmail<d>com | http://flesler.blogspot.com
* Licensed under MIT
* http://flesler.blogspot.com/2007/10/jqueryscrollto.html
* @projectDescription Easy element scrolling using jQuery.
* @author Ariel Flesler
* @version 2.1.0
*/
;(function(define) {
'use strict';
define(['jquery'], function($) {
var $scrollTo = $.scrollTo = function(target, duration, settings) {
return $(window).scrollTo(target, duration, settings);
};
$scrollTo.defaults = {
axis:'xy',
duration: 0,
limit:true
};
function isWin(elem) {
return !elem.nodeName ||
$.inArray(elem.nodeName.toLowerCase(), ['iframe','#document','html','body']) !== -1;
}
$.fn.scrollTo = function(target, duration, settings) {
if (typeof duration === 'object') {
settings = duration;
duration = 0;
}
if (typeof settings === 'function') {
settings = { onAfter:settings };
}
if (target === 'max') {
target = 9e9;
}
settings = $.extend({}, $scrollTo.defaults, settings);
// Speed is still recognized for backwards compatibility
duration = duration || settings.duration;
// Make sure the settings are given right
var queue = settings.queue && settings.axis.length > 1;
if (queue) {
// Let's keep the overall duration
duration /= 2;
}
settings.offset = both(settings.offset);
settings.over = both(settings.over);
return this.each(function() {
// Null target yields nothing, just like jQuery does
if (target === null) return;
var win = isWin(this),
elem = win ? this.contentWindow || window : this,
$elem = $(elem),
targ = target,
attr = {},
toff;
switch (typeof targ) {
// A number will pass the regex
case 'number':
case 'string':
if (/^([+-]=?)?\d+(\.\d+)?(px|%)?$/.test(targ)) {
targ = both(targ);
// We are done
break;
}
// Relative/Absolute selector
targ = win ? $(targ) : $(targ, elem);
if (!targ.length) return;
/* falls through */
case 'object':
// DOMElement / jQuery
if (targ.is || targ.style) {
// Get the real position of the target
toff = (targ = $(targ)).offset();
}
}
var offset = $.isFunction(settings.offset) && settings.offset(elem, targ) || settings.offset;
$.each(settings.axis.split(''), function(i, axis) {
var Pos = axis === 'x' ? 'Left' : 'Top',
pos = Pos.toLowerCase(),
key = 'scroll' + Pos,
prev = $elem[key](),
max = $scrollTo.max(elem, axis);
if (toff) {// jQuery / DOMElement
attr[key] = toff[pos] + (win ? 0 : prev - $elem.offset()[pos]);
// If it's a dom element, reduce the margin
if (settings.margin) {
attr[key] -= parseInt(targ.css('margin'+Pos), 10) || 0;
attr[key] -= parseInt(targ.css('border'+Pos+'Width'), 10) || 0;
}
attr[key] += offset[pos] || 0;
if (settings.over[pos]) {
// Scroll to a fraction of its width/height
attr[key] += targ[axis === 'x'?'width':'height']() * settings.over[pos];
}
} else {
var val = targ[pos];
// Handle percentage values
attr[key] = val.slice && val.slice(-1) === '%' ?
parseFloat(val) / 100 * max
: val;
}
// Number or 'number'
if (settings.limit && /^\d+$/.test(attr[key])) {
// Check the limits
attr[key] = attr[key] <= 0 ? 0 : Math.min(attr[key], max);
}
// Don't waste time animating, if there's no need.
if (!i && settings.axis.length > 1) {
if (prev === attr[key]) {
// No animation needed
attr = {};
} else if (queue) {
// Intermediate animation
animate(settings.onAfterFirst);
// Don't animate this axis again in the next iteration.
attr = {};
}
}
});
animate(settings.onAfter);
function animate(callback) {
var opts = $.extend({}, settings, {
// The queue setting conflicts with animate()
// Force it to always be true
queue: true,
duration: duration,
complete: callback && function() {
callback.call(elem, targ, settings);
}
});
$elem.animate(attr, opts);
}
});
};
// Max scrolling position, works on quirks mode
// It only fails (not too badly) on IE, quirks mode.
$scrollTo.max = function(elem, axis) {
var Dim = axis === 'x' ? 'Width' : 'Height',
scroll = 'scroll'+Dim;
if (!isWin(elem))
return elem[scroll] - $(elem)[Dim.toLowerCase()]();
var size = 'client' + Dim,
doc = elem.ownerDocument || elem.document,
html = doc.documentElement,
body = doc.body;
return Math.max(html[scroll], body[scroll]) - Math.min(html[size], body[size]);
};
function both(val) {
return $.isFunction(val) || $.isPlainObject(val) ? val : { top:val, left:val };
}
// Add special hooks so that window scroll properties can be animated
$.Tween.propHooks.scrollLeft =
$.Tween.propHooks.scrollTop = {
get: function(t) {
return $(t.elem)[t.prop]();
},
set: function(t) {
var curr = this.get(t);
// If interrupt is true and user scrolled, stop animating
if (t.options.interrupt && t._last && t._last !== curr) {
return $(t.elem).stop();
}
var next = Math.round(t.now);
// Don't waste CPU
// Browsers don't render floating point scroll
if (curr !== next) {
$(t.elem)[t.prop](next);
t._last = this.get(t);
}
}
};
// AMD requirement
return $scrollTo;
});
}(typeof define === 'function' && define.amd ? define : function(deps, factory) {
'use strict';
if (typeof module !== 'undefined' && module.exports) {
// Node
module.exports = factory(require('jquery'));
} else {
factory(jQuery);
}
}));

View File

@ -1,3 +0,0 @@
$(document).ready(function() {
$('.wy-menu').onePageNav();
});

View File

@ -1,65 +0,0 @@
{% extends "!layout.html" %}
{% set script_files = script_files + ["_static/cookies.js"] %}
{% set script_files = script_files + ["_static/expandable.js"] %}
{% set script_files = script_files + ["_static/admonition_selector.js"] %}
{% set script_files = script_files + ["_static/jquery.scrollTo.js"] %}
{% set script_files = script_files + ["_static/jquery.nav.js"] %}
{% set script_files = script_files + ["_static/menu.js"] %}
{% set css_files = css_files + ['_static/custom.css'] %}
{% block otherversions %}
<div id="admonition_selector">
<span class="trigger">Limit Environment Specific Content</span>
<div class="content">
<span class="title">Operating Systems</span>
<ul>
<li><input type="checkbox" id="centos" checked="checked"><label for="centos" title="Step that should only be run when using CentOS.">CentOS</label></li>
<li><input type="checkbox" id="rhel" checked="checked"><label for="rhel" title="Step that should only be run when using RHEL.">RHEL</label></li>
</ul>
<span class="title">Branches</span>
<ul>
<li><input type="checkbox" id="stable" checked=""><label for="stable" title="Step that should only be run when choosing to use components from their stable branches rather than using packages/source based on current master.">Install from stable branch</label></li>
<li><input type="checkbox" id="newton" checked=""><label for="newton" title="Step that should only be run when installing from the Newton stable branch.">Install from Newton branch</label></li>
<li><input type="checkbox" id="ocata" checked=""><label for="ocata" title="Step that should only be run when installing from the Ocata stable branch.">Install from Ocata branch</label></li>
</ul>
<span class="title">RHEL Registration Types</span>
<ul>
<li><input type="checkbox" id="portal" checked="checked"><label for="portal" title="Step that should only be run when registering to the Red Hat Portal.">Portal</label></li>
<li><input type="checkbox" id="satellite" checked="checked"><label for="satellite" title="Step that should only be run when registering to Red Hat Satellite.">Satellite</label></li>
</ul>
<span class="title">Environments</span>
<ul>
<li><input type="checkbox" id="baremetal" checked="checked"><label for="baremetal" title="Step that should only be run when deploying to baremetal.">Baremetal</label></li>
<li><input type="checkbox" id="virtual" checked="checked"><label for="virtual" title="Step that should only be run when deploying to virtual machines.">Virtual</label></li>
</ul>
<span class="title">Features</span>
<ul>
<li><input type="checkbox" id="validations" checked="checked"><label for="validations" title="Step that should only be run when deploying with validations.">Validations</label></li>
<li><input type="checkbox" id="optional" checked="checked"><label for="optional" title="Step that is optional. A deployment can be done without these steps, but they may provide useful additional functionality.">Optional</label></li>
</ul>
<span class="title">Additional Overcloud Roles</span>
<ul>
<li><input type="checkbox" id="ceph" checked="checked"><label for="ceph" title="Step that should only be run when deploying Ceph for use by the Overcloud.">Ceph</label></li>
</ul>
<span class="title">Upgrade Version</span>
<ul>
<li><input type="checkbox" id="mton" checked="checked"><label for="mton" title="Step that should only be run for upgrading from Mitaka to Newton">Upgrading Mitaka to Newton</label></li>
<li><input type="checkbox" id="ntoo" checked="checked"><label for="ntoo" title="Step that should only be run for upgrading from Newton to Ocata">Upgrading Newton to Ocata</label></li>
<li><input type="checkbox" id="otop" checked="checked"><label for="otop" title="Step that should only be run for upgrading from Ocata to Pike">Upgrading Ocata to Pike</label></li>
</ul>
</div>
</div>
{{ super() }}
{% endblock %}

View File

@ -1,2 +0,0 @@
librsvg2-tools [doc platform:rpm]
librsvg2-bin [doc platform:dpkg]

Binary file not shown.

Before

Width:  |  Height:  |  Size: 92 KiB

View File

@ -1,938 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
version="1.2"
width="221.28786mm"
height="95.618492mm"
viewBox="0 0 22128.785 9561.849"
preserveAspectRatio="xMidYMid"
clip-path="url(#presentation_clip_path)"
xml:space="preserve"
id="svg2"
inkscape:version="0.92.2 (5c3e80d, 2017-08-06)"
sodipodi:docname="spine_and_leaf.svg"
inkscape:export-filename="/home/remote/hjensas/Documents/Projects/FKassan/spine_and_leaf_grey.png"
inkscape:export-xdpi="90"
inkscape:export-ydpi="90"
style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
id="metadata2202"><rdf:RDF><cc:Work
rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><sodipodi:namedview
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1"
objecttolerance="10"
gridtolerance="10"
guidetolerance="10"
inkscape:pageopacity="0"
inkscape:pageshadow="2"
inkscape:window-width="1920"
inkscape:window-height="1016"
id="namedview2200"
showgrid="true"
inkscape:zoom="1"
inkscape:cx="325.01545"
inkscape:cy="362.39456"
inkscape:window-x="0"
inkscape:window-y="27"
inkscape:window-maximized="1"
inkscape:current-layer="svg2"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
showguides="false"
inkscape:snap-grids="true"
inkscape:snap-bbox="true"><inkscape:grid
type="xygrid"
id="grid2162"
originx="-3557.8186"
originy="-465.85891" /></sodipodi:namedview><defs
class="ClipPathGroup"
id="defs4"><clipPath
id="presentation_clip_path"
clipPathUnits="userSpaceOnUse"><rect
x="0"
y="0"
width="21000"
height="14800"
id="rect7" /></clipPath></defs><defs
class="TextShapeIndex"
id="defs9" /><defs
class="EmbeddedBulletChars"
id="defs13" /><defs
class="TextEmbeddedBitmaps"
id="defs42" /><rect
style="fill:#000000;fill-opacity:0.26666667;fill-rule:evenodd;stroke:none;stroke-width:99.59664917;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
id="rect7472-1-4"
width="21068.076"
height="3327.2278"
x="1060.71"
y="5783.6973"
ry="493.24051" /><rect
style="fill:#000000;fill-opacity:0.26666667;stroke:none;stroke-width:70.56232452;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
id="rect7472"
width="21172.889"
height="1661.8187"
x="955.89368"
y="0"
ry="246.35416" /><rect
style="fill:#000000;fill-opacity:0.26666667;stroke-width:23.36642456;stroke-miterlimit:4;stroke-dasharray:186.93141099, 93.46570549;stroke-dashoffset:0"
id="rect7187"
width="4004.1733"
height="5368.9526"
x="1060.71"
y="4192.897"
ry="244.64894" /><g
id="g44"
transform="translate(-3557.8188,-1102.0474)"><g
id="id2"
class="Master_Slide"><g
id="bg-id2"
class="Background" /><g
id="bo-id2"
class="BackgroundObjects" /></g></g><path
style="fill:none;stroke:#000000;stroke-width:13.56770134;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:108.54160421, 54.2708021;stroke-dashoffset:0;stroke-opacity:1"
d="m 925.65479,5022.3992 c -60.51808,14.927 -121.17892,9.0977 -181.76702,9.0977 -172.42701,0 -344.8541,0 -517.28111,0 -45.48585,0 -53.55151,0 -91.5542,0 -9.15541,0 -18.31082,0 -27.46622,0 -3.05182,0 -6.10366,0 -9.155409,0 -1.525908,0 -6.103651,0 -4.577737,0 3.051829,0 12.207226,0 9.155406,0 -6.103577,0 -12.207234,0 -18.31081,0 -19.836723,0 -39.673445,0 -59.510248,0 -4.577662,0 -9.155405,0 -13.733067,0 -1.5259141,0 -5.2813943,5.859 -4.5777418,0 0.9296198,-7.7403 3.7802008,-6.065 5.6703408,-9.0977"
id="path6251"
inkscape:connector-curvature="0" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:329.94973755px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:232.79600525"
x="227.58598"
y="4397.6118"
id="text6255"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan6253"
x="227.58598"
y="4397.6118"
style="stroke-width:232.79600525">L3</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:367.5909729px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:259.3538208"
x="211.54889"
y="4957.1426"
id="text6259"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan6257"
x="211.54889"
y="4957.1426"
style="stroke-width:259.3538208">L2</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:386.90072632px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:272.9777832"
x="2280.334"
y="5325.6221"
id="text7127"><tspan
sodipodi:role="line"
id="tspan7125"
x="2280.334"
y="5667.9385"
style="stroke-width:272.9777832" /></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:386.90075684px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:272.97781372;stroke-linejoin:round"
x="4539.1108"
y="5329.9927"
id="text7127-0"><tspan
sodipodi:role="line"
id="tspan7125-3"
x="4539.1108"
y="5672.3091"
style="stroke-width:272.97781372" /></text>
<rect
id="rect2109-6-5-9-0"
width="5118.355"
height="946.78052"
x="1670.5643"
y="305.97485"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:66.52793121;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="240.99867" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:1058.33337402px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:746.70709229"
x="3481.3599"
y="1986.9637"
id="text7185"><tspan
sodipodi:role="line"
id="tspan7183"
x="3481.3599"
y="2923.3408"
style="stroke-width:746.70709229" /></text>
<path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 1675.6302,4832.0574 2442.6235,1252.7555"
id="path7189"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 2212.0703,4727.9807 15737.173,1252.7555"
id="path7191"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 3465.2812,4832.0574 2442.6235,1252.7555"
id="path7193"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 3899.7681,4727.9805 15737.173,1252.7555"
id="path7195"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><rect
style="fill:#000000;fill-opacity:0.26666667;fill-rule:evenodd;stroke-width:23.40653419;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:187.25226202, 93.62613113;stroke-dashoffset:0"
id="rect7187-4"
width="3990.1204"
height="5406.373"
x="5995.4189"
y="4155.4761"
ry="246.35411" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 16759.831,1252.7555 6825.6059,4704.2258 2953.9523,1252.7555"
id="path7446"
inkscape:connector-curvature="0"
sodipodi:nodetypes="ccc" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 2953.9523,1252.7555 5852.4337,3475.225 7953.445,-3475.225"
id="path7448"
inkscape:connector-curvature="0"
sodipodi:nodetypes="ccc" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:304.87020874px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:215.10116577"
x="2737.1526"
y="8551.4111"
id="text7452"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7450"
x="2737.1526"
y="8551.4111"
style="stroke-width:215.10116577">Rack A</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:304.87023926px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:215.10118103;stroke-linejoin:round"
x="8249.8623"
y="8547.1641"
id="text7452-0"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7450-5"
x="8249.8623"
y="8547.1641"
style="stroke-width:215.10118103">Rack B</tspan></text>
<rect
id="rect2109-6-5-9-8-0-5"
width="3458.9375"
height="639.16052"
x="1270.3427"
y="6059.2471"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.14209747;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69539" /><rect
id="rect2109-6-5-9-8-0-5-0"
width="3458.9377"
height="639.16058"
x="1270.3418"
y="6877.374"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.14210129;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.6954" /><rect
id="rect2109-6-5-9-8-0-5-6"
width="3458.9377"
height="639.16058"
x="1270.3427"
y="7644.3667"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.14210129;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.6954" /><rect
id="rect2109-6-5-9-8-0-5-4"
width="3458.9377"
height="639.16058"
x="1270.3427"
y="8411.3604"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.14210129;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.6954" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00549316px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;stroke:none;stroke-width:259.64627075"
x="2238.1521"
y="5906.1802"
id="text7569"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567"
x="2238.1521"
y="5906.1802"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.64627075">Undercloud</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00552368px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.64630127;stroke-linejoin:round"
x="2230.365"
y="6600.7021"
id="text7569-6"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-2"
x="2230.365"
y="6600.7021"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.64630127">Controller-0</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00552368px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.64630127;stroke-linejoin:round"
x="2283.1382"
y="7295.2236"
id="text7569-5"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-8"
x="2283.1382"
y="7295.2236"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.64630127">Controller-1</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00552368px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.64630127;stroke-linejoin:round"
x="2293.3462"
y="7989.7456"
id="text7569-62"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-84"
x="2293.3462"
y="7989.7456"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.64630127">Controller-2</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 6993.313,5343.3869 v 766.9932"
id="path7762"
inkscape:connector-curvature="0" /><rect
id="rect2109-6-5-9-8-0-5-3"
width="3458.9377"
height="639.16058"
x="6301.5244"
y="6033.6812"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.14210129;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.6954" /><rect
id="rect2109-6-5-9-8-0-5-0-1"
width="3458.938"
height="639.16064"
x="6301.5239"
y="6851.8071"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.1421051;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69542" /><rect
id="rect2109-6-5-9-8-0-5-6-1"
width="3458.938"
height="639.16064"
x="6301.5244"
y="7618.8003"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.1421051;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69542" /><rect
id="rect2109-6-5-9-8-0-5-4-0"
width="3458.938"
height="639.16064"
x="6301.5244"
y="8385.7939"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.1421051;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69542" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00552368px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.64630127;stroke-linejoin:round"
x="7794.3252"
y="5883.0293"
id="text7569-3"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-4"
x="7794.3252"
y="5883.0293"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.64630127">Compute-1</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.0055542px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.64633179;stroke-linejoin:round"
x="7786.5371"
y="6577.5518"
id="text7569-6-0"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-2-3"
x="7786.5371"
y="6577.5518"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.64633179">Compute-2</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.0055542px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.64633179;stroke-linejoin:round"
x="7839.311"
y="7272.0737"
id="text7569-5-9"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-8-1"
x="7839.311"
y="7272.0737"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.64633179">Compute-3</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.0055542px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.64633179;stroke-linejoin:round"
x="7849.5195"
y="7966.5952"
id="text7569-62-9"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-84-6"
x="7849.5195"
y="7966.5952"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.64633179">Compute-4</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:479.77911377px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e9e9e9;fill-opacity:1;stroke:none;stroke-width:338.50814819"
x="3706.873"
y="825.29425"
id="text7700"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7698"
x="3706.873"
y="825.29425"
style="fill:#e9e9e9;fill-opacity:1;stroke-width:338.50814819">Spine 1</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 1899.2403,5343.3869 v 766.9932"
id="path7758"
inkscape:connector-curvature="0" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 3869.7864,5343.3869 v 766.9932"
id="path7760"
inkscape:connector-curvature="0" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 9005.7841,5343.3869 v 766.9932"
id="path7764"
inkscape:connector-curvature="0" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:347.70367432px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:231.50720215;stroke-miterlimit:4;stroke-dasharray:none"
x="21719.07"
y="849.41766"
id="text7772"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7770"
x="21719.07"
y="849.41766"
style="stroke-width:231.50720215;stroke-miterlimit:4;stroke-dasharray:none">Spine Switches</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:350.77200317px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:247.48719788;stroke-linejoin:round"
x="21927.396"
y="6772.7363"
id="text7776-0"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7774-0"
x="21927.396"
y="6772.7363"
style="stroke-width:247.48719788">Servers</tspan></text>
<rect
style="fill:#000000;fill-opacity:0.26666667;fill-rule:evenodd;stroke-width:23.40653419;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:187.25228269, 93.62614135;stroke-dashoffset:0"
id="rect7187-4-2"
width="3990.1208"
height="5406.3735"
x="10816.969"
y="4129.9087"
ry="246.35414" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:304.87026978px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:215.10121155;stroke-linejoin:round"
x="13574.528"
y="8524.0117"
id="text7452-0-3"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7450-5-7"
x="13574.528"
y="8524.0117"
style="stroke-width:215.10121155">Rack C</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:23.15072632px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 11814.863,5317.8196 v 766.9932"
id="path7762-9"
inkscape:connector-curvature="0" /><rect
id="rect2109-6-5-9-8-0-5-3-2"
width="3458.938"
height="639.16064"
x="11123.074"
y="6008.1138"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.1421051;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69542" /><rect
id="rect2109-6-5-9-8-0-5-0-1-2"
width="3458.9385"
height="639.16071"
x="11123.072"
y="6826.2402"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.14210892;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69543" /><rect
id="rect2109-6-5-9-8-0-5-6-1-8"
width="3458.9385"
height="639.16071"
x="11123.074"
y="7593.2339"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.14210892;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69543" /><rect
id="rect2109-6-5-9-8-0-5-4-0-9"
width="3458.9385"
height="639.16071"
x="11123.074"
y="8360.2266"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.14210892;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69543" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00558472px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.64633179;stroke-linejoin:round"
x="13118.99"
y="5859.8784"
id="text7569-3-7"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-4-3"
x="13118.99"
y="5859.8784"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.64633179">Compute-5</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00558472px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.6463623;stroke-linejoin:round"
x="13111.204"
y="6554.4009"
id="text7569-6-0-6"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-2-3-1"
x="13111.204"
y="6554.4009"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.6463623">Compute-6</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00558472px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.6463623;stroke-linejoin:round"
x="13163.978"
y="7248.9224"
id="text7569-5-9-2"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-8-1-9"
x="13163.978"
y="7248.9224"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.6463623">Compute-7</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00558472px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.6463623;stroke-linejoin:round"
x="13174.185"
y="7943.4443"
id="text7569-62-9-3"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-84-6-1"
x="13174.185"
y="7943.4443"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.6463623">Compute-8</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:23.15072632px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 13827.334,5317.8196 v 766.9932"
id="path7764-4"
inkscape:connector-curvature="0" /><rect
style="fill:#000000;fill-opacity:0.26666667;fill-rule:evenodd;stroke-width:23.4065361;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:187.2523033, 93.62615153;stroke-dashoffset:0"
id="rect7187-4-2-0"
width="3990.1216"
height="5406.374"
x="15470.81"
y="4155.4756"
ry="246.35416" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:304.87030029px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:215.10124207;stroke-linejoin:round"
x="18713.99"
y="8547.1641"
id="text7452-0-3-6"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7450-5-7-1"
x="18713.99"
y="8547.1641"
style="stroke-width:215.10124207">Rack D</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:23.15072823px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 16468.705,5343.3865 v 766.9938"
id="path7762-9-6"
inkscape:connector-curvature="0" /><rect
id="rect2109-6-5-9-8-0-5-3-2-3"
width="3458.9385"
height="639.16071"
x="15776.916"
y="6033.6812"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.14210892;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69543" /><rect
id="rect2109-6-5-9-8-0-5-0-1-2-2"
width="3458.939"
height="639.16077"
x="15776.915"
y="6851.8071"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.14211273;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69545" /><rect
id="rect2109-6-5-9-8-0-5-6-1-8-0"
width="3458.939"
height="639.16077"
x="15776.916"
y="7618.8003"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.14211273;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69545" /><rect
id="rect2109-6-5-9-8-0-5-4-0-9-6"
width="3458.939"
height="639.16077"
x="15776.916"
y="8385.7939"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:35.14211273;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69545" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00561523px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.6463623;stroke-linejoin:round"
x="18258.451"
y="5883.0293"
id="text7569-3-7-1"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-4-3-5"
x="18258.451"
y="5883.0293"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.6463623">Compute-9</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00561523px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.64639282;stroke-linejoin:round"
x="18250.664"
y="6577.5522"
id="text7569-6-0-6-5"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-2-3-1-4"
x="18250.664"
y="6577.5522"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.64639282">Compute-10</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00561523px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.64639282;stroke-linejoin:round"
x="18303.438"
y="7272.0742"
id="text7569-5-9-2-7"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-8-1-9-6"
x="18303.438"
y="7272.0742"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.64639282">Compute-11</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:368.00561523px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e6e6e6;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:259.64639282;stroke-linejoin:round"
x="18313.645"
y="7966.5952"
id="text7569-62-9-3-5"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7567-84-6-1-6"
x="18313.645"
y="7966.5952"
style="fill:#e6e6e6;fill-opacity:1;stroke-width:259.64639282">Compute-12</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:23.15072823px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 18481.177,5343.3865 v 766.9938"
id="path7764-4-5"
inkscape:connector-curvature="0" /><rect
id="rect2109-6-5-9-0-5"
width="5118.356"
height="946.78058"
x="7806.5093"
y="281.23059"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:66.52793121;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="240.99869" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:479.77914429px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e9e9e9;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:338.50820923;stroke-linejoin:round"
x="10483.087"
y="802.88812"
id="text7700-4"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7698-7"
x="10483.087"
y="802.88812"
style="fill:#e9e9e9;fill-opacity:1;stroke-width:338.50820923">Spine 2</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 11856.789,4832.0579 4232.2745,1252.7555"
id="path1177"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 13533.849,4832.0579 4232.2745,1252.7555"
id="path1179"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 16259.072,4832.0579 5254.9322,1252.7555 v 0"
id="path1181"
inkscape:connector-curvature="0"
sodipodi:nodetypes="ccc" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 17936.133,4832.0579 5254.9322,1252.7555"
id="path1183"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 18565.031,4832.0579 18293.819,1252.7555"
id="path1185"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 16248.503,4832.0574 18293.819,1252.7555"
id="path1187"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 11902.207,4832.0574 17526.825,1252.7555"
id="path1189"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 13947.523,4832.0574 17526.825,1252.7555"
id="path1191"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><rect
style="fill:#000000;fill-opacity:0.26666667;fill-rule:evenodd;stroke:none;stroke-width:57.1015358;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
id="rect7472-1"
width="21068.076"
height="1093.6759"
x="1060.71"
y="4521.9468"
ry="162.13054" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:350.77197266px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:247.48716736"
x="21855.145"
y="4661.4409"
id="text7776"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7774"
x="21855.145"
y="4661.4409"
style="stroke-width:247.48716736">Leaf Switches</tspan></text>
<rect
id="rect2109-6-5-9-8"
width="1585.0544"
height="639.1604"
x="6283.0039"
y="4704.2261"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:23.78912354;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69536" /><rect
id="rect2109-6-5-9-8-1"
width="1585.0546"
height="639.16046"
x="3144.2256"
y="4704.2266"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:23.78912354;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69537" /><rect
id="rect2109-6-5-9-8-0"
width="1585.0546"
height="639.16046"
x="1257.5322"
y="4704.2261"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:23.78912354;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69537" /><rect
id="rect2109-6-5-9-8-8"
width="1585.0546"
height="639.16046"
x="8175.4072"
y="4704.2266"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:23.78912354;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69537" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:333.13885498px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e9e9e9;fill-opacity:1;stroke:none;stroke-width:235.04612732"
x="1715.7467"
y="4661.6909"
id="text7722"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7720"
x="1715.7467"
y="4661.6909"
style="fill:#e9e9e9;fill-opacity:1;stroke-width:235.04612732">Leaf 1</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:333.13885498px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e9e9e9;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:235.04614258;stroke-linejoin:round"
x="3763.0071"
y="4643.5928"
id="text7722-3"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7720-8"
x="3763.0071"
y="4643.5928"
style="fill:#e9e9e9;fill-opacity:1;stroke-width:235.04614258">Leaf 1</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:333.13885498px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e9e9e9;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:235.04614258;stroke-linejoin:round"
x="7318.2295"
y="4638.5405"
id="text7722-0"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7720-5"
x="7318.2295"
y="4638.5405"
style="fill:#e9e9e9;fill-opacity:1;stroke-width:235.04614258">Leaf 2</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:333.13885498px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e9e9e9;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:235.04614258;stroke-linejoin:round"
x="9424.9385"
y="4661.6909"
id="text7722-6"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7720-6"
x="9424.9385"
y="4661.6909"
style="fill:#e9e9e9;fill-opacity:1;stroke-width:235.04614258">Leaf 2</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 3157.0358,5087.7224 H 2842.5869"
id="path7766"
inkscape:connector-curvature="0" /><path
style="fill:none;stroke:#000000;stroke-width:23.1507225px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 8188.2177,5087.7224 H 7768.9525"
id="path7768"
inkscape:connector-curvature="0" /><rect
id="rect2109-6-5-9-8-02"
width="1585.0546"
height="639.16046"
x="11104.554"
y="4678.6587"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:23.78912544;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69537" /><rect
id="rect2109-6-5-9-8-8-5"
width="1585.0548"
height="639.16052"
x="12996.957"
y="4678.6592"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:23.78912735;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69539" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:333.1388855px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e9e9e9;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:235.0461731;stroke-linejoin:round"
x="12642.896"
y="4615.3896"
id="text7722-0-9"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7720-5-4"
x="12642.896"
y="4615.3896"
style="fill:#e9e9e9;fill-opacity:1;stroke-width:235.0461731">Leaf 3</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:333.1388855px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e9e9e9;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:235.0461731;stroke-linejoin:round"
x="14749.604"
y="4638.54"
id="text7722-6-7"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7720-6-8"
x="14749.604"
y="4638.54"
style="fill:#e9e9e9;fill-opacity:1;stroke-width:235.0461731">Leaf 3</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:23.15072632px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 13009.768,5062.155 h -419.265"
id="path7768-5"
inkscape:connector-curvature="0" /><rect
id="rect2109-6-5-9-8-02-3"
width="1585.0548"
height="639.16052"
x="15758.396"
y="4704.2256"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:23.78912735;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.69539" /><rect
id="rect2109-6-5-9-8-8-5-0"
width="1585.0551"
height="639.16058"
x="17650.801"
y="4704.2266"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:23.78913116;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="162.6954" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:333.13891602px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e9e9e9;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:235.04620361;stroke-linejoin:round"
x="17782.357"
y="4638.541"
id="text7722-0-9-9"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7720-5-4-3"
x="17782.357"
y="4638.541"
style="fill:#e9e9e9;fill-opacity:1;stroke-width:235.04620361">Leaf 4</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:333.13891602px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e9e9e9;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:235.04620361;stroke-linejoin:round"
x="19889.064"
y="4661.6914"
id="text7722-6-7-7"
transform="scale(0.90551207,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7720-6-8-4"
x="19889.064"
y="4661.6914"
style="fill:#e9e9e9;fill-opacity:1;stroke-width:235.04620361">Leaf 4</tspan></text>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:23.15072823px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 17663.609,5087.7218 h -419.265"
id="path7768-5-2"
inkscape:connector-curvature="0" /><rect
id="rect2109-6-5-9-0-5-4"
width="5118.3569"
height="946.7807"
x="13942.457"
y="305.9747"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:66.52793884;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;image-rendering:auto"
ry="240.99872" /><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:479.77920532px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#e9e9e9;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:338.50827026;stroke-linejoin:round"
x="17259.299"
y="871.5957"
id="text7700-4-3"
transform="scale(0.90551208,1.1043475)"><tspan
sodipodi:role="line"
id="tspan7698-7-0"
x="17259.299"
y="871.5957"
style="fill:#e9e9e9;fill-opacity:1;stroke-width:338.50827026">Spine 3</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:25.56644249px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 1931.2946,4832.0575 C 8578.57,1252.7555 8578.57,1252.7555 8578.57,1252.7555"
id="path1229"
inkscape:connector-curvature="0" /><path
style="fill:none;stroke:#000000;stroke-width:25.56644249px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 3720.9458,4832.0575 C 8578.57,1252.7555 8578.57,1252.7555 8578.57,1252.7555"
id="path1231"
inkscape:connector-curvature="0" /><path
style="fill:none;stroke:#000000;stroke-width:25.56644249px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 7300.2478,4832.0575 C 8834.2345,1252.7555 8834.2345,1252.7555 8834.2345,1252.7555"
id="path1233"
inkscape:connector-curvature="0" /><path
style="fill:none;stroke:#000000;stroke-width:25.56644249px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 9345.5633,4832.0575 C 8834.2347,1252.7555 8834.2347,1252.7555 8834.2347,1252.7555"
id="path1235"
inkscape:connector-curvature="0" /><path
style="fill:none;stroke:#000000;stroke-width:25.56644249px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 11646.543,4832.0574 9856.8923,1252.7555"
id="path1237"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><path
style="fill:none;stroke:#000000;stroke-width:25.56644249px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 13691.859,4832.0574 9856.8923,1252.7555"
id="path1239"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cc" /><path
style="fill:none;stroke:#000000;stroke-width:25.56644249px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 16248.503,4832.0575 c -4601.96,-3579.302 -4601.96,-3579.302 -4601.96,-3579.302"
id="path1241"
inkscape:connector-curvature="0" /><path
style="fill:none;stroke:#000000;stroke-width:25.56644249px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="M 18038.154,4832.0575 C 11646.543,1252.7555 11646.543,1252.7555 11646.543,1252.7555"
id="path1243"
inkscape:connector-curvature="0" /></svg>

Before

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 165 KiB

View File

@ -1,131 +0,0 @@
# instack-undercloud documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 25 10:56:57 2015.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import os
# import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.intersphinx',
'openstackdocstheme'
]
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TripleO'
copyright = u'2015, OpenStack Foundation'
bug_tracker = u'Launchpad'
bug_tracker_url = u'https://launchpad.net/tripleo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0.0'
# The full version, including alpha/beta/rc tags.
release = '3.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
html_static_path = ['../../_custom']
# html_style = 'custom.css'
templates_path = ['../../_templates']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
rst_prolog = """
.. |project| replace:: %s
.. |bug_tracker| replace:: %s
.. |bug_tracker_url| replace:: %s
""" % (project, bug_tracker, bug_tracker_url)
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/tripleo-docs'
openstackdocs_auto_name = False
openstackdocs_auto_version = False
openstackdocs_bug_project = 'tripleo'
openstackdocs_bug_tag = 'documentation'

View File

@ -1,447 +0,0 @@
Integrating 3rd Party Containers in TripleO
===========================================
.. _build_container_images:
One of the following methods can be used to extend or build from scratch
custom 3rd party containers.
Extend TripleO Containers
-------------------------
Any extra RPMs required by 3rd party drivers may need to be post-installed into
our stock TripleO containers. In this case the 3rd party vendor may opt to add
a layer to an existing container in order to deploy their software.
Adding layers to existing containers using TripleO tooling
..........................................................
The example below demonstrates how to extend a container image, where the goal
is to create a layer on top of the cinder-volume image that will be named
"cinder-cooldriver".
* Make sure python-tripleoclient and the dependencies are installed:
.. code-block:: shell
sudo dnf install -y python-tripleoclient
* Create a vendor directory (which later can be pushed into a git
repository):
.. code-block:: shell
mkdir ~/vendor
* Create a tcib directory under the vendor folder. All container build
yaml needs to live in a tcib folder as a root directory.
.. code-block:: shell
mkdir ~/vendor/tcib
* Create the `~/vendor/containers.yaml` which contains the list
of images that we want to build:
.. code-block:: yaml
container_images:
- image_source: tripleo
imagename: localhost/tripleomaster/openstack-cinder-cooldriver:latest
* Create `~/vendor/tcib/cinder-cooldriver` to hold our container image
configuration.
.. code-block:: shell
mkdir ~/vendor/tcib/cinder-cooldriver
* Optionally, add custom files into the build environment.
.. code-block:: shell
mkdir ~/vendor/tcib/cinder-cooldriver/files
cp custom-package.rpm ~/vendor/tcib/cinder-cooldriver/files
* Create `~/vendor/tcib/cinder-cooldriver/cinder-cooldriver.yaml` file which
contains the container image configuration:
.. code-block:: yaml
---
# that's the parent layer, here cinder-volume
tcib_from: localhost/tripleomaster/openstack-cinder-volume:latest
tcib_actions:
- user: root
- run: mkdir /tmp/cooldriver/example.py
- run: mkdir -p /rpms
- run: dnf install -y cooldriver_package
tcib_copies:
- '{{lookup(''env'',''HOME'')}}/vendor/tcib/cinder-cooldriver/files/custom-package.rpm /rpms'
tcib_gather_files: >
{{ lookup('fileglob', '~/vendor/tcib/cinder-cooldriver/files/*', wantlist=True) }}
tcib_runs:
- dnf install -y /rpms/*.rpm
tcib_user: cinder
.. note:: Here `tcib_runs` provides a shortcut to `tcib_actions:run`. See more tcib parameters documented in the `tcib`_ role.
.. _tcib: https://docs.openstack.org/tripleo-ansible/latest/roles/role-tripleo_container_image_build.html#r-o-l-e-d-e-f-a-u-l-t-s
* The result file structure should look something like:
.. code-block:: shell
$ tree vendor
vendor
├── containers.yaml
└── tcib
└── cinder-cooldriver
└── cinder-cooldriver.yaml
└── files
└── custom-package.rpm
* Build the vendor container image:
.. code-block:: shell
openstack tripleo container image build \
--config-file ~/vendor/containers.yaml \
--config-path ~/vendor
* Use `sudo buildah images` command to check if the image was built:
.. code-block:: shell
localhost/tripleomaster/openstack-cinder-cooldriver latest 257592a90133 1 minute ago 1.22 GB
.. note:: If you want to push the image into a Docker Registry, you can use
`--push` with `--registry`. Use
`openstack tripleo container image build --help` for more details.
* Push the image into the TripleO Container registry:
.. code-block:: shell
sudo openstack tripleo container image push \
--local --registry-url 192.168.24.1:8787 \
localhost/tripleomaster/openstack-cinder-cooldriver:latest
* Use `openstack tripleo container image list` to check if the image was pushed:
.. code-block:: shell
+--------------------------------------------------------------------------------------------------+
| Image Name |
+--------------------------------------------------------------------------------------------------+
| docker://undercloud.ctlplane.localdomain:8787/tripleomaster/openstack-cinder-vendor:latest |
+--------------------------------------------------------------------------------------------------+
Adding layers to existing containers using Docker
.................................................
.. note:: Note that this method has been simplified in Victoria and backported
down to train, with the new `openstack tripleo container image build`
command.
The example below demonstrates how to extend a container on the Undercloud host
machine. It assumes you are running a local docker registry on the undercloud.
We recommend that you create a Dockerfile to extend the existing container.
Here is an example extending the cinder-volume container::
FROM 127.0.0.1:8787/tripleo/centos-binary-cinder-volume
MAINTAINER Vendor X
LABEL name="tripleo/centos-binary-cinder-volume-vendorx" vendor="Vendor X" version="2.1" release="1"
# switch to root and install a custom RPM, etc.
USER root
COPY vendor_x.rpm /tmp
RUN rpm -ivh /tmp/vendor_x.rpm
# switch the container back to the default user
USER cinder
Docker build the container above using `docker build` on the command line. This
will output a container image <ID> (used below to tag it). Create a docker tag
and push it into the local registry::
docker tag <ID> 127.0.0.1:8787/tripleo/centos-binary-cinder-volume-vendorx:rev1
docker push 127.0.0.1:8787/tripleo/centos-binary-cinder-volume-vendorx:rev1
Start an overcloud deployment as normal with the extra custom Heat environment
above to obtain the new container.
.. warning:: Note that the new container will have the complete software stack
built into it as is normal for containers. When other containers
are updated and include security fixes in these lower layers, this
container will NOT be updated as a result and will require rebuilding.
Building new containers with tripleo container image build
----------------------------------------------------------
Usage
.....
Use the following command to build all of the container images used in TripleO:
.. code-block:: shell
openstack tripleo container image build
Different options are provided for advanced usage. They can be discovered
by using `--help` argument.
Here are some of them:
* `--config-file` to use a custom YAML config file specifying the images to build.
* `--config-path` to use a custom base configuration path.
This is the base path for all container-image files. If this option is set,
the default path for <config-file> will be modified.
* `--extra-config` to apply additional options from a given configuration YAML
file. This will apply to all containers built.
* `--exclude` to skip some containers during the build.
* `--registry` to specify a Container Registry where the images will be pushed.
* `--authfile` to specify an authentication file if the Container Registry
requires authentication.
* `--skip-build` if we don't want to build and push images. It will only
generate the configuration files.
* `--push` to push the container images into the Container Registry.
* `--volume` to overrides the default bind mounts needed when the container
images are built. If you use this argument, don't forget that you might need
to include the default ones.
* `--work-dir` to specify the place where the configuration files will be generated.
Tips and Tricks with tripleo_container_image_build
..................................................
Here's a non-exhaustive list of tips and tricks that might make things faster,
especially on a dev env where you need to build multiple times the containers.
Inject a caching proxy
______________________
Using a caching proxy can make things faster when it comes to package fetching.
One of the way is to either expose the dnf.conf/yum.conf using `--volume`.
Since `dnf.conf is edited during the container build`_, you want to expose a
copy of your host config::
sudo cp -r /etc/dnf /srv/container-dnf
openstack tripleo container image build --volume /srv/container-dnf:/etc/dnf:z
Another way is to expose the `http_proxy` and `https_proxy` environment
variable.
In order to do so, create a simple yaml file, for instance ~/proxy.yaml::
---
tcib_envs:
LANG: en_US.UTF-8
container: oci
http_proxy: http://PROXY_HOST:PORT
https_proxy: http://PROXY_HOST:PORT
Then, pass that file using the `--extra-config` parameter::
openstack tripleo container image build --extra-config proxy.yaml
And you're set.
.. note:: Please ensure you also pass the `default values`_, since ansible
isn't configured to `merge dicts/lists`_ by default.
.. _dnf.conf is edited during the container build: https://opendev.org/openstack/tripleo-common/src/commit/156b565bdf74c19d3513f9586fa5fcf1181db3a7/container-images/tcib/base/base.yaml#L3-L14
.. _default values: https://opendev.org/openstack/tripleo-common/src/commit/156b565bdf74c19d3513f9586fa5fcf1181db3a7/container-images/tcib/base/base.yaml#L35-L37
.. _merge dicts/lists: https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-hash-behaviour
Get a minimal environment to build containers
_____________________________________________
As a dev, you might want to get a daily build of your container images. While
you can, of course, run this on an Undercloud, you actually don't need an
undercloud: you can use `this playbook`_ from `tripleo-operator-ansible`_
project
With this, you can set a nightly cron that will ensure you're always getting
latest build on your registry.
.. _this playbook: https://opendev.org/openstack/tripleo-operator-ansible/src/branch/master/playbooks/container-build.yaml
.. _tripleo-operator-ansible: https://docs.openstack.org/tripleo-operator-ansible/latest/
Building new containers with kolla-build
........................................
.. note:: Note that this method will be deprecated during the Victoria cycle
and replaced by the new `openstack tripleo container image build`
command.
To create new containers, or modify existing ones, you can use ``kolla-build``
from the `Kolla`_ project to build and push the images yourself. The command
to build a new containers is below. Note that this assumes you are on an
undercloud host where the registry IP address is 192.168.24.1.
Configure Kolla to build images for TripleO, in `/etc/kolla/kolla-build.conf`::
[DEFAULT]
base=centos
type=binary
namespace=master
registry=192.168.24.1:8787
tag=latest
template_override=/usr/share/tripleo-common/container-images/tripleo_kolla_template_overrides.j2
rpm_setup_config=http://trunk.rdoproject.org/centos9/current-tripleo/delorean.repo,http://trunk.rdoproject.org/centos9/delorean-deps.repo
push=True
Use the following command to build all of the container images used in TripleO::
openstack overcloud container image build \
--config-file /usr/share/tripleo-common/container-images/overcloud_containers.yaml \
--kolla-config-file /etc/kolla/kolla-build.conf
Or use `kolla-build` to build the images yourself, which provides more
flexibility and allows you to rebuild selectively just the images matching
a given name, for example to build only the heat images with the TripleO
customization::
kolla-build heat
Notice that TripleO already uses the
``/usr/share/tripleo-common/container-images/tripleo_kolla_template_overrides.j2``
to add or change specific aspects of the containers using the `kolla template
override mechanism`_. This file can be copied and modified to create custom
containers. The original copy of this file can be found in the
`tripleo-common`_ repository.
The following template is an example of the template used for building the base
images that are consumed by TripleO. In this case we are adding the `puppet`
RPM to the base image::
{% extends parent_template %}
{% set base_centos_binary_packages_append = ['puppet'] %}
.. _Kolla: https://github.com/openstack/kolla
.. _kolla template override mechanism: https://docs.openstack.org/kolla/latest/admin/image-building.html#dockerfile-customisation
.. _tripleo-common: https://github.com/openstack/tripleo-common/blob/master/container-images/tripleo_kolla_template_overrides.j2
Integrating 3rd party containers with tripleo-heat-templates
------------------------------------------------------------
The `TripleO Heat Templates`_ repo is where most of the logic resides in the form
of heat templates. These templates define each service, the containers'
configuration and the initialization or post-execution operations.
.. _TripleO Heat Templates: https://opendev.org/openstack/tripleo-heat-templates
The docker templates can be found under the `docker` sub directory in the
`tripleo-heat-templates` root. The services files are under the
`docker/service` directory.
For more information on how to integrate containers into the TripleO Heat templates,
see the :ref:`Containerized TripleO architecture<containers_arch_tht>` document.
If all you need to do is change out a container for a specific service, you can
create a custom heat environment file that contains your override. To swap out
the cinder container from our previous example we would add::
parameter_defaults:
   ContainerCinderVolumeImage: centos-binary-cinder-volume-vendorx:rev1
.. note:: Image parameters were named Docker*Image prior to the Train cycle.
3rd party kernel modules
------------------------
Some applications (like Neutron or Cinder plugins) require specific kernel modules to be installed
and loaded on the system.
We recommend two different methods to deploy and load these modules.
kernel module is deployed on the host
.....................................
The kernel module is deployed on the base Operating System via RPM or DKMS.
Deploy the module by using the ``tripleo-mount-image`` tool and create a
``chroot``.
First you need to create a repository file where the module will be downloaded from, and copy the repo file into the image::
temp_dir=$(mktemp -d)
sudo tripleo-mount-image -a /path/to/overcloud-full.qcow2 -m $temp_dir
sudo cp my-repo.repo $temp_dir/etc/yum.repos.d/
You can now start a chroot and install the rpm that contains the kernel module::
sudo mount -o bind /dev $temp_dir/dev/
sudo cp /etc/resolv.conf $temp_dir/etc/resolv.conf
sudo chroot $temp_dir /bin/bash
dnf install my-rpm
exit
Then unmount the image::
sudo rm $temp_dir/etc/resolv.conf
sudo umount $temp_dir/dev
sudo tripleo-unmount-image -m $temp_dir
Now that the rpm is deployed with the kernel module, we need to configure TripleO to load it.
To configure an extra kernel module named "dpdk_module" for a specific role, we would add::
parameter_defaults:
ControllerExtraKernelModules:
dpdk_module: {}
Since our containers don't get their own kernels, we load modules on the host.
Therefore, ExtraKernelModules parameter is used to configure which modules we want to configure.
This parameter will be applied to the Puppet manifest (in the kernel.yaml service).
The container needs the modules mounted from the host, so make sure the plugin template has the
following configuration (at minimum)::
volumes:
- /lib/modules:/lib/modules:ro
However, this method might be problematic if RPMs dependencies are too complex to deploy the kernel
module on the host.
kernel module is containerized
..............................
Kernel modules can be loaded from the container.
The module can be deployed in the same container as the application that will use it, or in a separated
container.
Either way, if you need to run a privileged container, make sure to set this parameter::
privileged: true
If privilege mode isn't required, it is suggested to set it to false for security reasons.
Kernel modules will need to be loaded when the container will be started by Docker. To do so, it is
suggested to configure the composable service which deploys the module in the container this way::
kolla_config:
/var/lib/kolla/config_files/neutron_ovs_agent.json:
command: /dpdk_module_launcher.sh
docker_config_scripts:
dpdk_module_launcher.sh:
mode: "0755"
content: |
#!/bin/bash
set -xe
modprobe dpdk_module
docker_config:
step_3:
neutron_ovs_bridge:
volumes:
list_concat:
- {get_attr: [ContainersCommon, volumes]}
-
- /var/lib/docker-config-scripts/dpdk_module_launcher.sh:/dpdk_module_launcher.sh:ro
That way, the container will be configured to load the module at start, so the operator can restart containers without caring about loading the module manually.

View File

@ -1,657 +0,0 @@
.. _config_download:
TripleO config-download User's Guide: Deploying with Ansible
=============================================================
Introduction
------------
This documentation details using ``config-download``.
``config-download`` is the feature that enables deploying the Overcloud software
configuration with Ansible in TripleO.
Summary
-------
Since the Queens release, it has been possible to use Ansible to apply the
overcloud configuration and with the Rocky release it became the default.
Ansible is used to replace the communication and transport of the software
configuration deployment data between Heat and the Heat agent
(os-collect-config) on the overcloud nodes.
Instead of os-collect-config running on each overcloud node and polling for
deployment data from Heat, the Ansible control node applies the configuration
by running ``ansible-playbook`` with an Ansible inventory file and a set of
playbooks and tasks.
The Ansible control node (the node running ``ansible-playbook``) is the
undercloud by default.
``config-download`` is the feature name that enables using Ansible in this
manner, and will often be used to refer to the method detailed in this
documentation.
Heat is still used to create the stack, then the ansible playbooks are saved
to the filesystem in a git repository. These playbook are used to deploy the
openstack services and configuration to the Overcloud nodes.
The same parameter values and environment files are passed to Heat as they were
previously. During the stack creation, Heat simply takes the user inputs from the
templates and renders the required playbooks for the deployment.
The difference with ``config-download`` is that although Heat creates all the
deployment data necessary via SoftwareDeployment resources to perform the
overcloud installation and configuration, it does not apply any of the software
deployments. The data is only made available via the Heat API. Once the stack
is created, deployment data is downloaded from Heat and ansible playbooks are
generated.
Using the downloaded deployment data and ansible playbooks configuration of
the overcloud using ``ansible-playbook`` are completed.
This diagram details the overall sequence of how using config-download
completes an overcloud deployment:
.. image:: ../_images/tripleo_ansible_arch.png
:scale: 40%
Deployment with config-download
-------------------------------
Ansible and ``config-download`` are used by default when ``openstack
overcloud deploy`` (tripleoclient) is run. The command is backwards compatible
in terms of functionality, meaning that running ``openstack overcloud deploy``
will still result in a full overcloud deployment.
The deployment is done through a series of steps in tripleoclient. All of the
workflow steps are automated by tripleoclient. The workflow steps are summarized
as:
#. Create deployment plan
#. Create Heat stack
#. Create software configuration within the Heat stack
#. Create tripleo-admin ssh user
#. Download the software configuration from Heat
#. Applying the downloaded software configuration to the overcloud nodes with
``ansible-playbook``.
.. _`authorized on the overcloud nodes`:
Creating the ``tripleo-admin`` user on each overcloud node is necessary since
ansible uses ssh to connect to each node to perform configuration.
The following steps are done to create the ``tripleo-admin`` user:
#. Runs a playbook to create ``tripleo-admin`` on each node. Also, gives sudo
permissions to the user, as well as creates and stores a new ssh keypair
for ``tripleo-admin``.
The values for these cli arguments must be the same for all nodes in the
overcloud deployment. ``overcloud-ssh-key`` should be the private key that
corresponds with the public key specified by the Heat parameter ``KeyName``
when using Ironic deployed nodes.
config-download related CLI arguments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are some new CLI arguments for ``openstack overcloud deploy`` that can be
used to influence the behavior of the overcloud deployment as it relates to
``config-download``::
--overcloud-ssh-user # Initial ssh user used for creating tripleo-admin.
# Defaults to heat-admin
--overcloud-ssh-key # Initial ssh private key (file path) to be used for
# creating tripleo-admin.
# Defaults to ~/.ssh/id_rsa
--override-ansible-cfg # path to an ansible config file, to inject any
# arbitrary ansible config to be used when running
# ansible-playbook
--stack-only # Only update the stack. Skips applying the
# software configuration with ansible-playbook.
--config-download-only # Only apply the software configuration with
# ansible-playbook. Skips the stack update.
See ``openstack overcloud deploy --help`` for further help text.
.. include:: deployment_output.rst
.. _deployment_status:
.. include:: deployment_status.rst
.. include:: deployment_log.rst
Ansible configuration
^^^^^^^^^^^^^^^^^^^^^
When ``ansible-playbook`` runs, it will use a configuration file with the
following default values::
[defaults]
retry_files_enabled = False
log_path = <working directory>/ansible.log
forks = 25
[ssh_connection]
ssh_args = -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto -o ControlPersist=60s
control_path_dir = <working directory>/ansible-ssh
Any of the above configuration options can be overridden, or any additional
ansible configuration used by passing the path to an ansible configuration file
with ``--override-ansible-cfg`` on the deployment command.
For example the following command will use the configuration options from
``/home/stack/ansible.cfg``. Any options specified in the override file will
take precedence over the defaults::
openstack overcloud deploy \
...
--override-ansible-cfg /home/stack/ansible.cfg
Ansible project directory
^^^^^^^^^^^^^^^^^^^^^^^^^
The workflow will create an Ansible project directory with the plan name under
``$HOME/overcloud-deploy/<stack>/config-download``. For the default plan name of ``overcloud`` the working
directory will be::
$HOME/overcloud-deploy/overcloud/config-download/overcloud
The project directory is where the downloaded software configuration from
Heat will be saved. It also includes other ansible-related files necessary to
run ``ansible-playbook`` to configure the overcloud.
The contents of the project directory include the following files:
tripleo-ansible-inventory.yaml
Ansible inventory file containing hosts and vars for all the overcloud nodes.
ansible.log
Log file from the last run of ``ansible-playbook``.
ansible.cfg
Config file used when running ``ansible-playbook``.
ansible-playbook-command.sh
Executable script that can be used to rerun ``ansible-playbook``.
ssh_private_key
Private ssh key used to ssh to the overcloud nodes.
Reproducing ansible-playbook
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Once in the project directory created, simply run ``ansible-playbook-command.sh``
to reproduce the deployment::
./ansible-playbook-command.sh
Any additional arguments passed to this script will be passed unchanged to the
``ansible-playbook`` command::
./ansible-playbook-command.sh --check
Using this method it is possible to take advantage of various Ansible features,
such as check mode (``--check``), limiting hosts (``--limit``), or overriding
variables (``-e``).
Git repository
^^^^^^^^^^^^^^
The ansible project directory is a git repository. Each time config-download
downloads the software configuration data from Heat, the project directory will
be checked for differences. A new commit will be created if there are any
changes from the previous revision.
From within the ansible project directory, standard git commands can be used to
explore each revision. Commands such as ``git log``, ``git show``, and ``git
diff`` are useful ways to describe how each commit to the software
configuration differs from previous commits.
Applying earlier versions of configuration
__________________________________________
Using commands such as ``git revert`` or ``git checkout``, it is possible to
update the ansible project directory to an earlier version of the software
configuration.
It is possible to then apply this earlier version with ``ansible-playbook``.
However, caution should be exercised as this could lead to a broken overcloud
deployment. Only well understood earlier versions should be attempted to be
applied.
.. note::
Data migration changes will never be undone by applying an earlier version
of the software configuration with config-download. For example, database
schema migrations that had already been applied would never be undone by
only applying an earlier version of the configuration.
Software changes that were related to hardware changes in the overcloud
(such as scaling up or down) would also not be completely undone by
applying earlier versions of the software configuration.
.. note::
Reverting to earlier revisions of the project directory has no effect on
the configuration stored in the Heat stack. A corresponding change should
be made to the deployment templates, and the stack updated to make the
changes permanent.
.. _manual-config-download:
Manual config-download
----------------------
Prior to running the ansible playbooks generated by config-download, it is necessary
to ensure the baremetal nodes have already been provisioned. See the baremetal deployment
guide first:
:doc:`configure-nodes-before-deployment <./network_v2>`
The config-download steps can be skipped when running ``openstack overcloud deploy``
by passing ``--stack-only``. This will cause tripleoclient to only deploy the Heat
stack.
When running ``openstack overcloud deploy`` with the ``--stack-only`` option, this
will still download the ansible content to the default directory
``$HOME/overcloud-deploy/overcloud/config-download``. But it will stop before running
the ``ansible-playbook`` command.
This method is described in the following sections.
Run ansible-playbook
^^^^^^^^^^^^^^^^^^^^
Once the baremetal nodes have been configured, and the configuration has been
downloaded during the ``--stack-only`` run of ``openstack overcloud deploy``.
You can then run ``ansible-playbook`` manually to configure the overcloud nodes::
ansible-playbook \
-i /home/stack/config-download/overcloud/tripleo-ansible-inventory.yaml \
--private-key /path/private/ssh/key \
--become \
config-download/deploy_steps_playbook.yaml
.. note::
``--become`` is required when running ansible-playbook.
All default ansible configuration values will be used when manually running
``ansible-playbook`` in this manner. These values can be customized through
`ansible configuration
<https://docs.ansible.com/ansible/latest/installation_guide/intro_configuration.html>`_.
The following minimum configuration is recommended::
[defaults]
log_path = ansible.log
forks = 25
timeout = 30
[ssh_connection]
ssh_args = -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ControlMaster=auto -o ControlPersist=30m
retries = 8
pipelining = True
.. note::
When running ``ansible-playbook`` manually, the overcloud status as returned
by ``openstack overcloud status`` won't be automatically updated due to the
configuration being applied outside of the API.
See :ref:`deployment_status` for setting the status manually.
Ansible project directory contents
----------------------------------
This section details the structure of the ``config-download`` generated
Ansible project directory.
Playbooks
^^^^^^^^^
deploy_steps_playbook.yaml
Initial deployment or template update (not minor update)
Further detailed in :ref:`deploy_steps_playbook.yaml`
fast_forward_upgrade_playbook.yaml
Fast forward upgrades
post_upgrade_steps_playbook.yaml
Post upgrade steps for major upgrade
pre_upgrade_rolling_steps_playbook.yaml
Pre upgrade steps for major upgrade
update_steps_playbook.yaml
Minor update steps
upgrade_steps_playbook.yaml
Major upgrade steps
.. _deploy_steps_playbook.yaml:
deploy_steps_playbook.yaml
__________________________
``deploy_steps_playbook.yaml`` is the playbook used for deployment and template
update. It applies all the software configuration necessary to deploy a full
overcloud based on the templates provided as input to the deployment command.
This section will summarize at high level the different ansible plays used
within this playbook. The play names shown here are the same names used within
the playbook and are what will be shown in the output when ``ansible-playbook`` is
run.
The ansible tags set on each play are also shown below.
Gather facts from undercloud
Fact gathering for the undercloud node
tags: facts
Gather facts from overcloud
Fact gathering for the overcloud nodes
tags: facts
Load global variables
Loads all variables from `l`global_vars.yaml``
tags: always
Common roles for TripleO servers
Applies common ansible roles to all overcloud nodes. Includes
``tripleo_bootstrap`` for installing bootstrap packages and
``tripleo_ssh_known_hosts`` for configuring ssh known hosts.
tags: common_roles
Overcloud deploy step tasks for step 0
Applies tasks from the ``deploy_steps_tasks`` template interface
tags: overcloud, deploy_steps
Server deployments
Applies server specific Heat deployments for configuration such as networking
and hieradata. Includes ``NetworkDeployment``, ``<Role>Deployment``,
``<Role>AllNodesDeployment``, etc.
tags: overcloud, pre_deploy_steps
Host prep steps
Applies tasks from the ``host_prep_steps`` template interface
tags: overcloud, host_prep_steps
External deployment step [1,2,3,4,5]
Applies tasks from the ``external_deploy_steps_tasks`` template interface.
These tasks are run against the undercloud node only.
tags: external, external_deploy_steps
Overcloud deploy step tasks for [1,2,3,4,5]
Applies tasks from the ``deploy_steps_tasks`` template interface
tags: overcloud, deploy_steps
Overcloud common deploy step tasks [1,2,3,4,5]
Applies the common tasks done at each step to include puppet host
configuration, ``container-puppet.py``, and ``paunch`` or
``tripleo_container_manage`` Ansible role (container configuration).
tags: overcloud, deploy_steps
Server Post Deployments
Applies server specific Heat deployments for configuration done after the 5
step deployment process.
tags: overcloud, post_deploy_steps
External deployment Post Deploy tasks
Applies tasks from the ``external_post_deploy_steps_tasks`` template interface.
These tasks are run against the undercloud node only.
tags: external, external_deploy_steps
Task files
^^^^^^^^^^
These task files include tasks specific to their intended function. The task
files are automatically used by specific playbooks from the previous section.
**boot_param_tasks.yaml**
**common_deploy_steps_tasks.yaml**
**docker_puppet_script.yaml**
**external_deploy_steps_tasks.yaml**
**external_post_deploy_steps_tasks.yaml**
**fast_forward_upgrade_bootstrap_role_tasks.yaml**
**fast_forward_upgrade_bootstrap_tasks.yaml**
**fast_forward_upgrade_post_role_tasks.yaml**
**fast_forward_upgrade_prep_role_tasks.yaml**
**fast_forward_upgrade_prep_tasks.yaml**
**fast_forward_upgrade_release_tasks.yaml**
**upgrade_steps_tasks.yaml**
**update_steps_tasks.yaml**
**pre_upgrade_rolling_steps_tasks.yaml**
**post_upgrade_steps_tasks.yaml**
**post_update_steps_tasks.yaml**
Heat Role directories
^^^^^^^^^^^^^^^^^^^^^
Each Heat role from the roles data file used in the deployment (specified with
``-r`` from the ``openstack overcloud deploy`` command), will have a
correspondingly named directory.
When using the default roles, these directories would be:
**Controller**
**Compute**
**ObjectStorage**
**BlockStorage**
**CephStorage**
A given role directory contains role specific task files and a subdirectory for
each host for that role. For example, when using the default hostnames, the
**Controller** role directory would contain the following host subdirectories:
**overcloud-controller-0**
**overcloud-controller-1**
**overcloud-controller-2**
Variable and template related files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
group_vars
Directory which contains variables specific to different ansible inventory
groups.
global_vars.yaml
Global ansible variables applied to all overcloud nodes
templates
Directory containing any templates used during the deployment
Other files
^^^^^^^^^^^
Other files in the project directory are:
ansible-playbook-command.sh
Script to reproduce ansible-playbook command
tripleo-ansible-inventory.yaml
Ansible inventory file
overcloud-config.tar.gz
Tarball of Ansible project directory
Running specific tasks
----------------------
Running only specific tasks (or skipping certain tasks) can be done from within
the ansible project directory.
.. note::
Running specific tasks is an advanced use case and only recommended for
specific scenarios where the deployer is aware of the impact of skipping or
only running certain tasks.
This can be useful during troubleshooting and debugging scenarios, but
should be used with caution as it can result in an overcloud that is not
fully configured.
.. warning::
All tasks that are part of the deployment need to be run, and in the order
specified. When skipping tasks with ``--tags``, ``-skip-tags``,
``--start-at-task``, the deployment could be left in an inoperable state.
The functionality to skip tasks or only run certain tasks is meant to aid in
troubleshooting and iterating more quickly on failing deployments and
updates.
All changes to the deployed cloud must still be applied through the Heat
templates and environment files passed to the ``openstack overcloud deploy``
command. Doing so ensures that the deployed cloud is kept in sync with the
state of the templates and the state of the Heat stack.
.. warning::
When skipping tasks, the overcloud must be in the state expected by the task
starting task. Meaning, the state of the overcloud should be the same as if
all the skipped tasks had been applied. Otherwise, the result of the tasks
that get executed will be undefined and could leave the cloud in an
inoperable state.
Likewise, the deployed cloud may not be left in its fully configured state
if tasks are skipped at the end of the deployment.
Complete the :ref:`manual-config-download` steps to create the ansible project
directory, or use the existing project directory at
``$HOME/overcloud-deploy/<stack-name>/config-download/<stack-name>``.
Tags
^^^^
The playbooks use tagged tasks for finer-grained control of what to apply if
desired. Tags can be used with the ``ansible-playbook`` CLI arguments ``--tags`` or
``--skip-tags`` to control what tasks are executed. The enabled tags are:
facts
fact gathering
common_roles
ansible roles common to all nodes
overcloud
all plays for overcloud deployment
pre_deploy_steps
deployments that happen pre deploy_steps
host_prep_steps
Host preparation steps
deploy_steps
deployment steps
post_deploy_steps
deployments that happen post deploy_steps
external
all external deployments
external_deploy_steps
external deployments that run on the undercloud
See :ref:`deploy_steps_playbook.yaml` for a description of which tags apply to
specific plays in the deployment playbook.
Server specific pre and post deployments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The list of server specific pre and post deployments run during the `Server
deployments` and `Server Post Deployments` plays (see
:ref:`deploy_steps_playbook.yaml`) are dependent upon what custom roles and
templates are used with the deployment.
The list of these tasks are defined in an ansible group variable that applies
to each server in the inventory group named after the Heat role. From the
ansible project directory, the value can be seen within the group variable file
named after the Heat role::
$ cat group_vars/Compute
Compute_pre_deployments:
- UpgradeInitDeployment
- HostsEntryDeployment
- DeployedServerBootstrapDeployment
- InstanceIdDeployment
- NetworkDeployment
- ComputeUpgradeInitDeployment
- ComputeDeployment
- ComputeHostsDeployment
- ComputeAllNodesDeployment
- ComputeAllNodesValidationDeployment
- ComputeHostPrepDeployment
- ComputeArtifactsDeploy
Compute_post_deployments: []
``<Role>_pre_deployments`` is the list of pre deployments, and
``<Role>_post_deployments`` is the list of post deployments.
To specify the specific task to run for each deployment, the value of the
variable can be defined on the command line when running ``ansible-playbook``,
which will overwrite the value from the group variable file for that role.
For example::
ansible-playbook \
-e Compute_pre_deployments=NetworkDeployment \
--tags pre_deploy_steps
# other CLI arguments
Using the above example, only the task for the ``NetworkDeployment`` resource
would get applied since it would be the only value defined in
``Compute_pre_deployments``, and ``--tags pre_deploy_steps`` is also specified,
causing all other plays to get skipped.
Starting at a specific task
^^^^^^^^^^^^^^^^^^^^^^^^^^^
To start the deployment at a specific task, use the ``ansible-playbook`` CLI
argument ``--start-at-task``. To see a list of task names for a given playbook,
``--list-tasks`` can be used to list the task names.
.. note::
Some tasks that include the ``step`` variable or other ansible variables in
the task name do not work with ``--start-at-task`` due to a limitation in
ansible. For example the task with the name::
Start containers for step 1
won't work with ``--start-at-task`` since the step number is in the name
(1).
When using ``--start-at-task``, the tasks that gather facts and load global
variables for the playbook execution are skipped by default. Skipping those
tasks can cause unexpected errors in later tasks. To avoid errors, those tasks
can be forced to execute when using ``--start-at-task`` by including the
following options to the ``ansible-playbook`` command::
ansible-playbook \
<other options > \
-e gather_facts=true \
-e @global_vars.yaml
The ``global_vars.yaml`` variable file exists in the config-download directory
that was either generated manually or under ``$HOME/config-download``.
Previewing changes
------------------
Changes can be previewed to see what will be changed before any changes are
applied to the overcloud. To preview changes, the stack update must be run with
the ``--stack-only`` cli argument::
openstack overcloud deploy \
--stack-only
# other CLI arguments
When ansible-playbook is run, use the ``--check`` CLI argument with
ansible-playbook to preview any changes. The extent to which changes can be
previewed is dependent on many factors such as the underlying tools in use
(puppet, docker, etc) and the support for ansible check mode in the given
ansible module.
The ``--diff`` option can also be used with ``--check`` to show the
differences that would result from changes.
See `Ansible Check Mode ("Dry Run")
<https://docs.ansible.com/ansible/2.5/user_guide/playbooks_checkmode.html>`_
for more details.

View File

@ -1,133 +0,0 @@
.. _config_download_differences:
Ansible config-download differences
===================================
With the Queens release, it became possible to use Ansible to apply the
overcloud configuration and this method became the default behavior with
the Rockt release.
The feature is fully documented at
:doc:`ansible_config_download`, while this page details
the differences to the deployer experience with config-download.
Ansible vs. os-collect-config
-----------------------------
Previously, TripleO used an agent running on each overcloud node called
``os-collect-config``. This agent periodically polled the undercloud Heat API for
software configuration changes that needed to be applied to the node.
``os-collect-config`` ran ``os-refresh-config`` and ``os-apply-config`` as
needed whenever new software configuration changes were detected. This model
is a **"pull"** style model given each node polled the Heat API and pulled changes,
then applied them locally.
With config-download, TripleO has switched to a **"push"** style model. Ansible
is run from a central control node which is the undercloud.
``ansible-playbook`` is run from the undercloud and software configuration
changes are pushed out to each overcloud node via ssh.
With the new model, ``os-collect-config``, ``os-refresh-config``, and
``os-apply-config`` are no longer used in a TripleO deployment. The
``os-collect-config`` service is now disabled by default and won't start on
boot.
.. note::
Heat standalone software deployments still rely on ``os-collect-config``.
They are a type of deployment that can be applied to overcloud nodes
directly via Heat outside of the overcloud stack, and without having to do
a full stack update of the overcloud stack.
These types of deployments are **NOT** typically used when doing TripleO.
However, if these deployments are being used in an environment to manage
overcloud nodes, then the ``os-collect-config`` service must be started and
enabled on the overcloud nodes where these types of deployments are
applied.
For reference, the Heat CLI commands that are used to create these types of
deployments are::
openstack software config create ...
openstack software deployment create ...
If these commands are not being used in the environment, then
``os-collect-config`` can be left disabled.
Deployment workflow
-------------------
The default workflow executed by ``openstack overcloud deploy`` takes care of
all the necessary changes when using config-download. In both the previous and
new workflows, ``openstack overcloud deploy`` (tripleoclient) takes care of
automating all the steps through Mistral workflow(s). Therefore, existing CLI
scripts that called ``openstack overcloud deploy`` will continue to work with
no changes.
It's important to recognize the differences in the workflow to aid in
understanding the deployment and operator experience. Previously, Heat was
responsible for:
#. (Heat) Creating OpenStack resources (Neutron networks, Nova/Ironic instances, etc)
#. (Heat) Creating software configuration
#. (Heat) Applying the created software configuration to the Nova/Ironic instances
With config-download, Heat is no longer responsible for the last item of
applying the created software configuration as ``ansible-playbook`` is used
instead.
Therefore, only creating the Heat stack for an overcloud is no longer all that
is required to fully deploy the overcloud. Ansible also must be run from the
undercloud to apply the software configuration, and do all the required tasks
to fully deploy an overcloud such as configuring services, bootstrap tasks, and
starting containers.
The new steps are summarized as:
#. (Heat) Creating OpenStack resources (Neutron networks, Nova/Ironic instances, etc)
#. (Heat) Creating software configuration
#. (tripleoclient) Enable tripleo-admin ssh user
#. (ansible) Applying the created software configuration to the Nova/Ironic instances
See :doc:`ansible_config_download` for details on the
tripleo-admin ssh user step.
Deployment CLI output
---------------------
During a deployment, the expected output from ``openstack overcloud deploy``
has changed. Output up to and including the stack create/update is similar to
previous releases. Stack events will be shown until the stack operation is
complete.
After the stack goes to ``CREATE_COMPLETE`` (or ``UPDATE_COMPLETE``), output
from the steps to enable the tripleo-admin user via ssh are shown.
.. include:: deployment_output.rst
.. include:: deployment_status.rst
.. include:: deployment_log.rst
config-download Use Cases
-------------------------
config-download exposes the ability to manually run the ``ansible-playbook``
command against the playbooks that are generated for the deployment. This leads
to many advantages over the older Heat deployment model.
- Test deployments. Using the
``ansible-playbook --check --diff deploy_steps_playbook.yaml``
arguments will not modify an existing deployment. Instead, it will only show
any changes that would be made.
- Development environment testing. Ansible variables can be modified to do
quick testing. Once verified, Heat environment templates need to be updated
to reflect the change permanently. Then the config-download content should
be re-generated by running ``openstack overcloud deploy --stack-only``.
- Run specific tasks. It is possible to run certain parts of a deployment by
using ``--tags``.
- Prepare the deployment or update ahead of time and then run the playbooks
later. The operations around a deployment can be done at different times to
minimize risk.
- Integration with CI/CD. Additional checks and verification can be added to
a CI/CD pipeline relating to updating Heat templates and the Ansible
config-download content.
- AWX or Ansible Tower integration. Ansible content can be imported and ran
through a scalable and distributed system.

View File

@ -1,335 +0,0 @@
TripleO Containers Architecture
===============================
This document explains the details around TripleO's containers architecture. The
document goes into the details of how the containers are built for TripleO,
how the configuration files are generated and how the containers are eventually
run.
Like other areas of TripleO, the containers based deployment requires a couple
of different projects to play together. The next section will cover each of the
parts that allow for deploying OpenStack in containers using TripleO.
Containers runtime deployment and configuration notes
-----------------------------------------------------
TripleO has transitioned to the `podman`_ container runtime. Podman does not
use a persistent daemon to manage containers. TripleO wraps the container
service execution in systemd managed services. These services are named
tripleo_<container name>. Prior to Stein, TripleO deployed the containers
runtime and image components from the docker packages. The installed components
include the docker daemon system service and `OCI`_ compliant `Moby`_ and
`Containerd`_ - the building blocks for the container system.
Containers control plane includes `Paunch`_ or tripleo_container_manage_ and
systemd for the stateless services, and Pacemaker `Bundle`_ for the
containerized stateful services, like the messaging system or database.
.. _podman: https://podman.io/
.. _OCI: https://www.opencontainers.org/
.. _Moby: https://mobyproject.org/
.. _Containerd: https://github.com/containerd/containerd
.. _Bundle: https://wiki.clusterlabs.org/wiki/Bundle_Walk-Through
Currently we provide a ``ContainerCli`` parameter which can be used to change
the container runtimes, but only podman is supported for both undercloud and
overcloud.
We have provided various ``Container*`` configuration parameters in TripleO
Heat Templates for operators to tune some of the container based settings.
There are still some ``Docker*`` configuration parameters in TripleO Heat
Templates available for operators which are left over for the Docker based
deployment or historical reasons.
Parameter override example::
parameter_defaults:
DockerDebug: true
DockerOptions: '--log-driver=syslog --live-restore'
DockerNetworkOptions: '--bip=10.10.0.1/16'
DockerInsecureRegistryAddress: ['myregistry.local:8787']
DockerRegistryMirror: 'mirror.regionone.local:8081/myregistry-1.local/'
* ``DockerDebug`` adds more framework-specific details to the deployment logs.
* ``DockerOptions``, ``DockerNetworkOptions``, ``DockerAdditionalSockets`` define
the docker service startup options, like the default IP address for the
`docker0` bridge interface (``--bip``) or SELinux mode (``--selinux-enabled``).
.. note:: Make sure the default CIDR assigned for the `docker0` bridge interface
does not conflict to other network ranges defined for your deployment.
.. note:: These options have no effect when using podman.
* ``DockerInsecureRegistryAddress``, ``DockerRegistryMirror`` allow you to
specify a custom registry mirror which can optionally be accessed insecurely
by using the ``DockerInsecureRegistryAddress`` parameter.
See the official dockerd `documentation`_ for the reference.
.. _documentation: https://docs.docker.com/engine/reference/commandline/dockerd/
Building Containers
-------------------
The containers used for TripleO are sourced from Kolla. Kolla is an OpenStack
team that aims to create tools to allow for deploying OpenStack on container
technologies. Kolla (or Kolla Build) is one of the tools produced by this team
and it allows for building and customizing container images for OpenStack
services and their dependencies.
TripleO consumes these images and takes advantage of the customization
capabilities provided by the `Kolla`_ build tool to install some packages that
are required by other parts of TripleO.
TripleO maintains its complete list of kolla customization in the
`tripleo-common`_ project.
.. _Kolla: https://docs.openstack.org/kolla/latest/admin/image-building.html#dockerfile-customisation
.. _tripleo-common: https://github.com/openstack/tripleo-common/blob/master/container-images/tripleo_kolla_template_overrides.j2
Paunch
------
.. note:: During Ussuri cycle, Paunch has been replaced by the
tripleo_container_manage_ Ansible role. Therefore, the following block
is deprecated in favor of the new role. However, the JSON input remains
backward compatible and the containers are configured the same way as it
was with Paunch.
The `paunch`_ hook is used to manage containers. This hook takes json
as input and uses it to create and run containers on demand. The json
describes how the container will be started. Some example keys are:
* **net**: To specify what network to use. This is commonly set to host.
* **privileged**: Whether to give full access to the host's devices to the
container, similar to what happens when the service runs directly on the host.
* **volumes**: List of host path volumes, named volumes, or dynamic volumes to
bind on the container.
* **environment**: List of environment variables to set on the container.
.. note:: The list above is not exhaustive and you should refer to the
`paunch` docs for the complete list.
The json file passed to this hook is built out of the `docker_config` attribute
defined in the service's yaml file. Refer to the `Docker specific settings`_
section for more info on this.
.. _paunch: https://github.com/openstack/paunch
.. _tripleo_container_manage: https://docs.openstack.org/tripleo-ansible/latest/roles/role-tripleo_container_manage.html
TripleO Heat Templates
----------------------
.. _containers_arch_tht:
The `TripleO Heat Templates`_ repo is where most of the logic resides in the form
of heat templates. These templates define each service, the containers'
configuration and the initialization or post-execution operations.
.. _TripleO Heat Templates: https://opendev.org/openstack/tripleo-heat-templates
Understanding container related files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The docker templates can be found under the `docker` sub directory in the
`tripleo-heat-templates` root. The services files are under `docker/service` but
the `docker` directory contains a bit more than just service files and some of
them are worth diving into:
deploy-steps.j2
...............
This file is a jinja template and it's rendered before the deployment is
started. This file defines the resources that are executed before and after the
container initialization.
.. _container-puppet.py:
container-puppet.py
...................
This script is responsible for generating the config files for each service. The
script is called from the `deploy-steps.j2` file and it takes a `json` file as
configuration. The json files passed to this script are built out of the
`puppet_config` parameter set in every service template (explained in the
`Docker specific settings`_ section).
The `container-puppet.py` execution results in a oneshot container being executed
(usually named `puppet-$service_name`) to generate the configuration options or
run other service specific initialization tasks. Example: Create Keystone endpoints.
.. note:: container-puppet.py was previously docker-puppet.py prior to the Train
cycle.
Anatomy of a containerized service template
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Containerized services templates inherit almost everything from the puppet based
templates, with some exceptions for some services. New properties have been
added to define container specific configurations, which will be covered in this
section.
Docker specific settings
........................
Each service may define output variable(s) which control config file generation,
initialization, and stepwise deployment of all the containers for this service.
The following sections are available:
* config_settings: This setting containers hiera data that is used
to control how the Puppet modules generate config files for each service.
* step_config: This setting controls the manifest that is used to
create docker config files via puppet. The puppet tags below are
used along with this manifest to generate a config directory for
this container.
* kolla_config: Contains YAML that represents how to map config files
into the kolla container. This config file is typically mapped into
the container itself at the /var/lib/kolla/config_files/config.json
location and drives how kolla's external config mechanisms work.
* docker_config: Data that is passed to the docker-cmd hook to configure
a container, or step of containers at each step. See the available steps
below and the related docker-cmd hook documentation in the heat-agents
project.
* puppet_config: This section is a nested set of key value pairs
that drive the creation of config files using puppet.
Required parameters include:
* puppet_tags: Puppet resource tag names that are used to generate config
files with puppet. Only the named config resources are used to generate
a config file. Any service that specifies tags will have the default
tags of 'file,concat,file_line,augeas,cron' appended to the setting.
Example: keystone_config
* config_volume: The name of the volume (directory) where config files
will be generated for this service. Use this as the location to
bind mount into the running Kolla container for configuration.
* config_image: The name of the docker image that will be used for
generating configuration files. This is often the same container
that the runtime service uses. Some services share a common set of
config files which are generated in a common base container.
* step_config: This setting controls the manifest that is used to
create docker config files via puppet. The puppet tags below are
used along with this manifest to generate a config directory for
this container.
* container_puppet_tasks: This section provides data to drive the
container-puppet.py tool directly. The task is executed only once
within the cluster (not on each node) and is useful for several
puppet snippets we require for initialization of things like
keystone endpoints, database users, etc. See container-puppet.py
for formatting. NOTE: these tasks were docker_puppet_tasks prior to the
Train cycle.
Container steps
...............
Similar to baremetal, containers are brought up in a stepwise manner. The
current architecture supports bringing up baremetal services alongside of
containers. Therefore, baremetal steps may be required depending on the service
and they are always executed before the corresponding container step.
The list below represents the correlation between the baremetal and the
containers steps. These steps are executed sequentially:
* Containers config files generated per hiera settings.
* Host Prep
* Load Balancer configuration baremetal
* Step 1 external steps (execute Ansible on Undercloud)
* Step 1 deployment steps (Ansible)
* Common Deployment steps
* Step 1 baremetal (Puppet)
* Step 1 containers
* Core Services (Database/Rabbit/NTP/etc.)
* Step 2 external steps (execute Ansible on Undercloud)
* Step 2 deployment steps (Ansible)
* Common Deployment steps
* Step 2 baremetal (Puppet)
* Step 2 containers
* Early Openstack Service setup (Ringbuilder, etc.)
* Step 3 external steps (execute Ansible on Undercloud)
* Step 3 deployment steps (Ansible)
* Common Deployment steps
* Step 3 baremetal (Puppet)
* Step 3 containers
* General OpenStack Services
* Step 4 external steps (execute Ansible on Undercloud)
* Step 4 deployment steps (Ansible)
* Common Deployment steps
* Step 4 baremetal (Puppet)
* Step 4 containers (Keystone initialization occurs here)
* Service activation (Pacemaker)
* Step 5 external steps (execute Ansible on Undercloud)
* Step 5 deployment steps (Ansible)
* Common Deployment steps
* Step 5 baremetal (Puppet)
* Step 5 containers
Service Bootstrap
~~~~~~~~~~~~~~~~~
Bootstrapping services is a one-shot operation for most services and it's done
by defining a separate container that shares the same structure as the main
service container commonly defined under the `docker_step` number 3 (see `Container
steps`_ section above).
Unlike normal service containers, the bootstrap container should be run in the
foreground - `detach: false` - so there can be more control on when the
execution is done and whether it succeeded or not.
Example taken from Glance's service file::
docker_config:
step_3:
glance_api_db_sync:
image: *glance_image
net: host
privileged: false
detach: false
volumes: &glance_volumes
- /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json
- /etc/localtime:/etc/localtime:ro
- /lib/modules:/lib/modules:ro
- /var/lib/config-data/glance_api/:/var/lib/kolla/config_files/src:ro
- /run:/run
- /dev:/dev
- /etc/hosts:/etc/hosts:ro
environment:
- KOLLA_BOOTSTRAP=True
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
step_4:
glance_api:
image: *glance_image
net: host
privileged: false
restart: always
volumes: *glance_volumes
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS

View File

@ -1,30 +0,0 @@
Building a Single Image
=======================
The ``openstack overcloud image build --all`` command builds all the images
needed for an overcloud deploy. However, you may need to rebuild a single
one of them. Use the following commands if you want to do it::
openstack overcloud image build --type {agent-ramdisk|deploy-ramdisk|fedora-user|overcloud-full}
If the target image exist, this commands ends silently. Make sure to delete a
previous version of the image to run the command as you expect.
Uploading the New Single Image
------------------------------
After the new image is built, it can be uploaded using the same command as
before, with the ``--update-existing`` flag added::
openstack overcloud image upload --update-existing
Note that if the new image is a ramdisk, the Ironic nodes need to be
re-configured to use it. This can be done by re-running::
openstack overcloud node configure --all-manageable
.. note::
If you want to use custom images for boot configuration, specify their names in
``--deploy-kernel`` and ``--deploy-ramdisk`` options.
Now the new image should be fully ready for use by new deployments.

View File

@ -1,552 +0,0 @@
.. _prepare-environment-containers:
Container Image Preparation
===========================
This documentation explains how to instruct container image preparation to do
different preparation tasks.
Choosing an image registry strategy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Container images need to be pulled from an image registry which is reliably
available to overcloud nodes. The three common options to serve images are to
use the default registry, the registry available on the undercloud, or an
independently managed registry.
.. note:: Private SSL-enabled registries with a custom CA are not tested.
If you have to use one, the custom CA (certificate authority) that is needed
for the registry should be installed before deploying the overcloud. For
example, it can be injected into the overcloud image, or installed via first
boot scripts.
During deployment the environment parameter
`ContainerImagePrepare` is used to specify any desired behaviour, including:
- Where to pull images from
- Optionally, which local repository to push images to
- How to discover the latest versioned tag for each image
In the following examples, the parameter `ContainerImagePrepare` will be
specified in its own file `containers-prepare-parameters.yaml`.
Default registry
................
By default the images will be pulled from a remote registry namespace such as
`docker.io/tripleomaster`. This is fine for development or POC clouds but is
not appropriate for production clouds due to the transfer of large amounts of
duplicate image data over a potentially unreliable internet connection.
During deployment with this default, any heat parameters which refer to
required container images will be populated with a value pointing at the
default registry, with a tag representing the latest image version.
To generate the `containers-prepare-parameters.yaml` containing these defaults,
run this command::
openstack tripleo container image prepare default \
--output-env-file containers-prepare-parameters.yaml
This will generate a file containing a `ContainerImagePrepare` similar to the
following::
parameter_defaults:
ContainerImagePrepare:
- set:
ceph_image: daemon
ceph_namespace: docker.io/ceph
ceph_tag: v4.0.0-stable-4.0-nautilus-centos-7-x86_64
name_prefix: centos-binary-
name_suffix: ''
namespace: docker.io/tripleomaster
neutron_driver: null
tag: current-tripleo
tag_from_label: rdo_version
During deployment, this will lookup images in `docker.io/tripleomaster` tagged
with `current-tripleo` and discover a versioned tag by looking up the label
`rdo_version`. This will result in the heat image parameters in the plan being
set with appropriate values, such as::
DockerNeutronMetadataImage: docker.io/tripleomaster/centos-binary-neutron-metadata-agent:35414701c176a6288fc2ad141dad0f73624dcb94_43527485
DockerNovaApiImage: docker.io/tripleomaster/centos-binary-nova-api:35414701c176a6288fc2ad141dad0f73624dcb94_43527485
.. note:: The tag is actually a Delorean hash. You can find out the versions
of packages by using this tag.
For example, `35414701c176a6288fc2ad141dad0f73624dcb94_43527485` tag,
is in fact using this `Delorean repository`_.
.. _populate-local-registry-containers:
Undercloud registry
...................
As part of the undercloud install, an image registry is configured on port
`8787`. This can be used to increase reliability of image pulls, and minimise
overall network transfers.
The undercloud registry can be used by generating the following
`containers-prepare-parameters.yaml` file::
openstack tripleo container image prepare default \
--local-push-destination \
--output-env-file containers-prepare-parameters.yaml
This will generate a file containing a `ContainerImagePrepare` similar to the
following::
parameter_defaults:
ContainerImagePrepare:
- push_destination: true
set:
ceph_image: daemon
ceph_namespace: docker.io/ceph
ceph_tag: v4.0.0-stable-4.0-nautilus-centos-7-x86_64
name_prefix: centos-binary-
name_suffix: ''
namespace: docker.io/tripleomaster
neutron_driver: null
tag: current-tripleo
tag_from_label: rdo_version
This is identical to the default registry, except for the `push_destination:
true` entry which indicates that the address of the local undercloud registry
will be discovered at upload time.
By specifying a `push_destination` value such as `192.168.24.1:8787`, during
deployment all images will be pulled from the remote registry then pushed to
the specified registry. The resulting image parameters will also be modified to
refer to the images in `push_destination` instead of `namespace`.
.. admonition:: Stein and newer
:class: stein
Prior to Stein, Docker Registry v2 (provided by "Docker
Distribution" package), was the service running on tcp 8787.
Since Stein it has been replaced with an Apache vhost called
"image-serve", which serves the containers on tcp 8787 and
supports podman or buildah pull commands. Though podman or buildah
tag, push, and commit commands are not supported, they are not
necessary because the same functionality may be achieved through
use of the "sudo openstack tripleo container image prepare"
commands described in this document.
Running container image prepare
...............................
The prepare operations are run at the following times:
#. During ``undercloud install`` when `undercloud.conf` has
`container_images_file=$HOME/containers-prepare-parameters.yaml` (see
:ref:`install_undercloud`)
#. During ``overcloud deploy`` when a `ContainerImagePrepare` parameter is
provided by including the argument `-e
$HOME/containers-prepare-parameters.yaml`
(see :ref:`overcloud-prepare-container-images`)
#. Any other time when ``sudo openstack tripleo container image prepare`` is run
As seen in the last of the above commands, ``sudo openstack tripleo
container image prepare`` may be run without ``default`` to set up an
undercloud registry without deploying the overcloud. It is run with
``sudo`` because it needs to write to `/var/lib/image-serve` on the
undercloud.
Options available in heat parameter ContainerImagePrepare
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To do something different to the above two registry scenarios, your custom
environment can set the value of the ContainerImagePrepare heat parameter to
result in any desired registry and image scenario.
Discovering versioned tags with tag_from_label
..............................................
If you want these parameters to have the actual tag `current-tripleo` instead of
the discovered tag (in this case the Delorean hash,
`35414701c176a6288fc2ad141dad0f73624dcb94_43527485` ) then the `tag_from_label`
entry can be omitted.
Likewise, if all images should be deployed with a different tag, the value of
`tag` can be set to the desired tag.
Some build pipelines have a versioned tag which can only be discovered via a
combination of labels. For this case, a template format can be specified
instead::
tag_from_label: {version}-{release}
It's possible to use the above feature while also disabling it only
for a subset of images by using an `includes` and `excludes` list as
described later in this document. This is useful when using the above
but also using containers from external projects which doesn't follow
the same convention like Ceph.
Copying images with push_destination
....................................
By specifying a `push_destination`, the required images will be copied from
`namespace` to this registry, for example::
ContainerImagePrepare:
- push_destination: 192.168.24.1:8787
set:
namespace: docker.io/tripleomaster
...
This will result in images being copied from `docker.io/tripleomaster` to
`192.168.24.1:8787/tripleomaster` and heat parameters set with values such as::
DockerNeutronMetadataImage: 192.168.24.1:8787/tripleomaster/centos-binary-neutron-metadata-agent:35414701c176a6288fc2ad141dad0f73624dcb94_43527485
DockerNovaApiImage: 192.168.24.1:8787/tripleomaster/centos-binary-nova-api:35414701c176a6288fc2ad141dad0f73624dcb94_43527485
.. note:: Use the IP address of your undercloud, which you previously set with
the `local_ip` parameter in your `undercloud.conf` file. For these example
commands, the address is assumed to be `192.168.24.1:8787`.
By setting different values for `namespace` and `push_destination` any
alternative registry strategy can be specified.
Ceph and other set options
..........................
The options `ceph_namespace`, `ceph_image`, and `ceph_tag` are similar to
`namespace` and `tag` but they specify the values for the ceph image. It will
often come from a different registry, and have a different versioned tag
policy.
The values in the `set` map are used when evaluating the file
`/usr/share/openstack-tripleo-common/container-images/tripleo_containers.yaml.j2`
as a Jinja2 template. This file contains the list of every container image and
how it relates to TripleO services and heat parameters.
If Ceph is not part of the overcloud deployment, it's possible to skip pulling
the related containers by setting the `ceph_images` parameter to false as shown
in the example below::
ContainerImagePrepare:
- push_destination: 192.168.24.1:8787
set:
ceph_images: false
By doing this, the Ceph container images are not pulled from the remote registry
during the deployment.
Authenticated Registries
........................
If a container registry requires a username and password, then those
values may be passed using the following syntax::
ContainerImagePrepare:
- push_destination: 192.168.24.1:8787
set:
namespace: quay.io/...
...
ContainerImageRegistryCredentials:
'quay.io': {'<your_quay_username>': '<your_quay_password>'}
.. note:: If the `ContainerImageRegistryCredentials` contain the credentials
for a registry whose name matches the `ceph_namespace` parameter, those
credentials will be extracted and passed to ceph-ansible as the
`ceph_docker_registry_username` and `ceph_docker_registry_password` parameters.
Layering image preparation entries
..................................
Since the value of `ContainerImagePrepare` is a list, multiple entries can be
specified, and later entries will overwrite any earlier ones. Consider the
following::
ContainerImagePrepare:
- tag_from_label: rdo_version
push_destination: true
excludes:
- nova-api
set:
namespace: docker.io/tripleomaster
name_prefix: centos-binary-
name_suffix: ''
tag: current-tripleo
- push_destination: true
includes:
- nova-api
set:
namespace: mylocal
tag: myhotfix
This will result in the following heat parameters which shows a `locally built
<build_container_images>`
and tagged `centos-binary-nova-api` being used for `DockerNovaApiImage`::
DockerNeutronMetadataImage: 192.168.24.1:8787/tripleomaster/centos-binary-neutron-metadata-agent:35414701c176a6288fc2ad141dad0f73624dcb94_43527485
DockerNovaApiImage: 192.168.24.1:8787/mylocal/centos-binary-nova-api:myhotfix
The `includes` and `excludes` entries can control the resulting image list in
addition to the filtering which is determined by roles and containerized
services in the plan. `includes` matches take precedence over `excludes`
matches, followed by role/service filtering. The image name must contain the
value within it to be considered a match.
The `includes` and `excludes` list is useful when pulling OpenStack
images using `tag_from_label: '{version}-{release}'` while also
pulling images which are not tagged the same way. The following
example shows how to do this with Ceph::
ContainerImagePrepare:
- push_destination: true
set:
namespace: docker.io/tripleomaster
name_prefix: centos-binary-
name_suffix: ''
tag: current-tripleo
tag_from_label: '{version}-{release}'
excludes: [ceph]
- push_destination: true
set:
ceph_image: ceph
ceph_namespace: docker.io/ceph
ceph_tag: latest
includes: [ceph]
Modifying images during prepare
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is possible to modify images during prepare to make any required changes,
then immediately deploy with those changes. The use-cases for modifying images
include:
- As part of a Continuous Integration pipeline where images are modified with
the changes being tested before deployment
- As part of a development workflow where local changes need to be deployed for
testing and development
- When changes need to be deployed but are not available through an image
build pipeline (proprietary addons, emergency fixes)
The modification is done by invoking an ansible role on each image which needs
to be modified. The role takes a source image, makes the requested changes,
then tags the result. The prepare can then push the image and set the heat
parameters to refer to the modified image. The modification is done in
the undercloud registry so it is not possible to use this feature when
using the Default registry, where images are pulled directly from a
remote registry during deployment.
The ansible role `tripleo-modify-image`_ conforms with the required role
interface, and provides the required behaviour for the modify use-cases. Modification is controlled via modify-specific keys in the
`ContainerImagePrepare` parameter:
- `modify_role` specifies what ansible role to invoke for each image to modify.
- `modify_append_tag` is used to append to the end of the
source image tag. This makes it obvious that the resulting image has been
modified. It is also used to skip modification if the `push_destination`
registry already has that image, so it is recommended to change
`modify_append_tag` whenever the image must be modified.
- `modify_vars` is a dictionary of ansible variables to pass to the role.
The different use-cases handled by role `tripleo-modify-image`_ are selected by
setting the `tasks_from` variable to the required file in that role. For all of
the following examples, see the documentation for the role
`tripleo-modify-image`_ for the other variables supported by that `tasks_from`.
While developing and testing the `ContainerImagePrepare` entries which modify
images, it is recommended to run prepare on its own to confirm it is being
modified as expected::
sudo openstack tripleo container image prepare \
-e ~/containers-prepare-parameters.yaml
Updating existing packages
..........................
The following entries will result in all packages being updated in the images,
but using the undercloud host's yum repository configuration::
ContainerImagePrepare:
- push_destination: true
...
modify_role: tripleo-modify-image
modify_append_tag: "-updated"
modify_vars:
tasks_from: yum_update.yml
compare_host_packages: true
yum_repos_dir_path: /etc/yum.repos.d
...
Install RPM files
.................
It is possible to install a directory of RPM files, which is useful for
installing hotfixes, local package builds, or any package which is not
available through a package repository. For example the following would install
some hotfix packages only in the `centos-binary-nova-compute` image::
ContainerImagePrepare:
- push_destination: true
...
includes:
- nova-compute
modify_role: tripleo-modify-image
modify_append_tag: "-hotfix"
modify_vars:
tasks_from: rpm_install.yml
rpms_path: /home/stack/nova-hotfix-pkgs
...
Modify with custom Dockerfile
.............................
For maximum flexibility, it is possible to specify a directory containing a
`Dockerfile` to make the required changes. When the role is invoked, a
`Dockerfile.modified` is generated which changes the `FROM` directive and adds
extra `LABEL` directives. The following example runs the custom
`Dockerfile` on the `centos-binary-nova-compute` image::
ContainerImagePrepare:
- push_destination: true
...
includes:
- nova-compute
modify_role: tripleo-modify-image
modify_append_tag: "-hotfix"
modify_vars:
tasks_from: modify_image.yml
modify_dir_path: /home/stack/nova-custom
...
An example `/home/stack/nova-custom/Dockerfile` follows. Note that after any
`USER root` directives have been run, it is necessary to switch back to the
original image default user::
FROM docker.io/tripleomaster/centos-binary-nova-compute:latest
USER root
COPY customize.sh /tmp/
RUN /tmp/customize.sh
USER "nova"
.. _Delorean repository: https://trunk.rdoproject.org/centos7-master/ac/82/ac82ea9271a4ae3860528eaf8a813da7209e62a6_28eeb6c7/
.. _tripleo-modify-image: https://github.com/openstack/ansible-role-tripleo-modify-image
Modify with Python source code installed via pip from OpenDev Gerrit
....................................................................
If you would like to build an image and apply your patch in a Python project in
OpenStack, you can use this example::
ContainerImagePrepare:
- push_destination: true
...
includes:
- heat-api
modify_role: tripleo-modify-image
modify_append_tag: "-devel"
modify_vars:
tasks_from: dev_install.yml
source_image: docker.io/tripleomaster/centos-binary-heat-api:current-tripleo
refspecs:
-
project: heat
refspec: refs/changes/12/1234/3
...
It will produce a modified image with Python source code installed via pip.
Building hotfixed containers
............................
The `tripleoclient` OpenStack plugin provides a command line interface which
will allow operators to apply packages (hotfixes) to running containers. This
capability leverages the **tripleo-modify-image** role, and automates its
application to a set of containers for a given collection of packages.
Using the provided command line interface is simple. The interface has very few
required options. The noted options below inform the tooling which containers
need to have the hotfix(es) applied, and where to find the hotfixed package(s).
============ =================================================================
option Description
============ =================================================================
--image The `--image` argument requires the use fully qualified image
name, something like *localhost/image/name:tag-data*. The
`--image` option can be used more than once, which will inform
the tooling that multiple containers need to have the same
hotfix packages applied.
--rpms-path The `--rpms-path` argument requires the full path to a
directory where RPMs exist. The RPMs within this directory will
be installed into the container, producing a new layer for an
existing container.
--tag The `--tag` argument is optional, though it is recommended to
be used. The value of this option will append to the tag of the
running container. By using the tag argument, images that have
been modified can be easily identified.
============ =================================================================
With all of the required information, the command to modify existing container
images can be executed like so.
.. code-block:: shell
# The shell variables need to be replaced with data that pertains to the given environment.
openstack tripleo container image hotfix --image ${FULLY_QUALIFIED_IMAGE_NAME} \
--rpms-path ${RPM_DIRECTORY} \
--tag ${TAG_VALUE}
When this command completes, new container images will be available on the
local system and are ready to be integrated into the environment.
You should see the image built on your local system via buildah CLI:
.. code-block:: shell
# The shell variables need to be replaced with data that pertains to the given environment.
sudo buildah images | grep ${TAG_VALUE}
Here is an example on how to push it into the TripleO Container registry:
.. code-block:: shell
# ${IMAGE} is in this format: <registry>/<namespace>/<name>:<tag>
sudo openstack tripleo container image push --local \
--registry-url 192.168.24.1:8787 ${IMAGE}
.. note::
Container images can be pushed to the TripleO Container registry or
a Docker Registry (using basic auth or the bearer token auth).
Now that your container image is pushed into a registry, you can deploy it
where it's needed. Two ways are supported:
* (Long but persistent): Update Container$NameImage where $Name is the name of
the service we update (e.g. ContainerNovaComputeImage). The parameters
can be found in TripleO Heat Templates. Once you update it into your
environment, you need to re-run the "openstack overcloud deploy" command
again and the necessary hosts will get the new container.
Example::
parameter_defaults:
# Replace the values by where the image is stored
ContainerNovaComputeImage: <registry>/<namespace>/<name>:<tag>
* (Short but not persistent after a minor update): Run Paunch or Ansible
to update the container on a host. The procedure is already documented
in the :doc:`./tips_tricks` manual.
Once the hotfixed container image has been deployed, it's very important to
check that the container is running with the right rpm version.
For example, if the nova-compute container was updated with a new hotfix image,
we want to check that the right nova-compute rpm is installed:
.. code-block:: shell
sudo podman exec -ti -u root nova_compute rpm -qa | grep nova-compute
It will return the version of the openstack-nova-compute rpm and we can compare
it with the one that was delivered via rpm. If the version is not correct (e.g.
older), it means that the hotfix image is wrong and doesn't contain the rpm
provided to build the new image. The image has to be rebuilt and redeployed.

View File

@ -1,4 +0,0 @@
Deployment Log
^^^^^^^^^^^^^^
The ansible part of the deployment creates a log file that is saved on the
undercloud. The log file is available at ``$HOME/ansible.log``.

View File

@ -1,31 +0,0 @@
Deployment Output
^^^^^^^^^^^^^^^^^
After the tripleo-admin user is created, ``ansible-playbook`` will be used to
configure the overcloud nodes.
The output from ``ansible-playbook`` will begin to appear in the console
and will be updated periodically as more tasks are applied.
When ansible is finished a play recap will be shown, and the usual overcloudrc
details will then be displayed. The following is an example of the end of the
output from a successful deployment::
PLAY RECAP ****************************************************************
compute-0 : ok=134 changed=48 unreachable=0 failed=0
openstack-0 : ok=164 changed=28 unreachable=0 failed=1
openstack-1 : ok=160 changed=28 unreachable=0 failed=0
openstack-2 : ok=160 changed=28 unreachable=0 failed=0
pacemaker-0 : ok=138 changed=30 unreachable=0 failed=0
pacemaker-1 : ok=138 changed=30 unreachable=0 failed=0
pacemaker-2 : ok=138 changed=30 unreachable=0 failed=0
undercloud : ok=2 changed=0 unreachable=0 failed=0
Overcloud configuration completed.
Overcloud Endpoint: http://192.168.24.8:5000/
Overcloud rc file: /home/stack/overcloudrc
Overcloud Deployed
When a failure happens, the deployment will stop and the error will be shown.
Review the ``PLAY RECAP`` which will show each host that is part of the
overcloud and the grouped count of each task status.

View File

@ -1,33 +0,0 @@
Deployment Status
^^^^^^^^^^^^^^^^^
Since Heat is no longer the source of authority on the status of the overcloud
deployment, a new tripleoclient command is available to show the overcloud
deployment status::
openstack overcloud status
The output will report the status of the deployment, taking into consideration
the result of all the steps to do the full deployment. The following is an
example of the output::
[stack@undercloud ]$ openstack overcloud status
+------------+-------------------+
| Stack Name | Deployment Status |
+------------+-------------------+
| overcloud | DEPLOY_SUCCESS |
+------------+-------------------+
A different stack name can be specified with ``--stack``::
[stack@undercloud ]$ openstack overcloud status --stack my-deployment
+---------------+-------------------+
| Stack Name | Deployment Status |
+-----------+-----------------------+
| my-deployment | DEPLOY_SUCCESS |
+---------------+-------------------+
The deployment status is stored in the YAML file, generated at
``$HOME/overcloud-deploy/<stack>/<stack>-deployment_status.yaml`` in
the undercloud node.

View File

@ -1,169 +0,0 @@
.. _ephemeral_heat:
Ephemeral Heat
==============
Introduction
------------
Ephemeral Heat is a means to install the overcloud by using an ephemeral Heat
process instead of a system installed Heat process. This change is possible
beginning in the Wallaby release.
In a typical undercloud, Heat is installed on the undercloud and processes are
run in podman containers for heat-api and heat-engine. When using ephemeral
Heat, there is no longer a requirement that Heat is installed on the
undercloud, instead these processes are started on demand by the deployment,
update, and upgrade commands.
This model has been in use within TripleO already for both the undercloud and
:ref:`standalone <standalone>` installation methods, which start an on demand
all in one heat-all process in order to perform only the installation. Using
ephemeral Heat in this way allows for re-use of the Heat templates from
tripleo-heat-templates without having to require an already fully installed
undercloud.
Description
-----------
Ephemeral Heat is enabled by passing the ``--heat-type`` argument to
``openstack overcloud deploy``. The ephemeral process can also be launched
outside of a deployment with the ``openstack tripleo launch heat`` command. The
latter command also takes a ``--heat-type`` argument to enable selecting the
type of Heat process to use.
Heat types
__________
The ``--heat-type`` argument allows for the following options described below.
installed
Use the system Heat installation. This is the historical TripleO usage of
Heat with Heat fully installed on the undercloud. This is the default
value, and requires a fully installed undercloud.
native
Use an ephemeral ``heat-all`` process. The process will be started natively
on the system executing tripleoclient commands by way of an OS (operating
system) fork.
container
A podman container will be started on the executing system that runs a
single ``heat-all`` process.
pod
A podman pod will be started on the executing system that runs containers
for ``heat-api`` and ``heat-engine``.
In all cases, the process(es) are terminated at the end of the deployment.
.. note::
The native and container methods are limited in scale due to being a single
Heat process. Deploying more than 3 nodes or 2 roles will significantly
impact the deployment time with these methods as Heat has only a single
worker thread.
Using the installed or pod methods enable scaling node and role counts as
is typically required.
Using
-----
The following example shows using ``--heat-type`` to enable ephemeral Heat::
openstack overcloud deploy \
--stack overcloud \
--work-dir ~/overcloud-deploy/overcloud \
--heat-type <pod|container|native> \
<other cli arguments>
With ephemeral Heat enabled, several additional deployment artifacts are
generated related to the management of the Heat process(es). These artifacts
are generated under the working directory of the deployment in a
``heat-launcher`` subdirectory. The working directory can be overridden with
the ``--work-dir`` argument.
Using the above example, the Heat artifact directory would be located at
``~/overcloud-deploy/overcloud/heat-launcher``. An example of the directory
contents is shown below::
[centos@ephemeral-heat ~]$ ls -l ~/overcloud-deploy/overcloud/heat-launcher/
total 41864
-rw-rw-r--. 1 centos centos 650 Mar 24 18:39 api-paste.ini
-rw-rw-r--. 1 centos centos 1054 Mar 24 18:39 heat.conf
-rw-rw-r--. 1 centos centos 42852118 Mar 24 18:31 heat-db-dump.sql
-rw-rw-r--. 1 centos centos 2704 Mar 24 18:39 heat-pod.yaml
drwxrwxr-x. 2 centos centos 49 Mar 24 16:02 log
-rw-rw-r--. 1 centos centos 1589 Mar 24 18:39 token_file.json
The directory contains the necessary files to inspect and debug the Heat
process(es), and if necessary reproduce the deployment.
.. note::
The consolidated log file for the Heat process is the ``log`` file in the
``heat-launcher`` directory.
Launching Ephemeral Heat
________________________
Outside of a deployment, the ephemeral Heat process can also be started with the
``openstack tripleo launch heat`` command. This can be used to interactively
use the ephemeral Heat process or to debug a previous deployment.
When combined with ``--heat-dir`` and ``--restore-db``, the command can be used
to restore the Heat process and database from a previous deployment::
openstack tripleo launch heat \
--heat-type pod \
--heat-dir ~/overcloud-deploy/overcloud/heat-launcher \
--restore-db
The command will exit after launching the Heat process, and the Heat process
will continue to run in the background.
Interacting with ephemeral Heat
...............................
With the ephemeral Heat process launched and running, ``openstackclient`` can be
used to interact with the Heat API. The following shell environment
configuration must set up access to the Heat API::
unset OS_CLOUD
unset OS_PROJECT_NAME
unset OS_PROJECT_DOMAIN_NAME
unset OS_USER_DOMAIN_NAME
export OS_AUTH_TYPE=none
export OS_ENDPOINT=http://127.0.0.1:8006/v1/admin
You can also use the ``OS_CLOUD`` environment to set up the same::
export OS_CLOUD=heat
Once the environment is configured, ``openstackclient`` work as expected
against the Heat API::
[centos@ephemeral-heat ~]$ openstack stack list
+--------------------------------------+------------+---------+-----------------+----------------------+--------------+
| ID | Stack Name | Project | Stack Status | Creation Time | Updated Time |
+--------------------------------------+------------+---------+-----------------+----------------------+--------------+
| 761e2a54-c6f9-4e0f-abe6-c8e0ad51a76c | overcloud | admin | CREATE_COMPLETE | 2021-03-22T20:48:37Z | None |
+--------------------------------------+------------+---------+-----------------+----------------------+--------------+
Killing ephemeral Heat
......................
To stop the ephemeral Heat process previously started with ``openstack tripleo
launch heat``, use the ``--kill`` argument::
openstack tripleo launch heat \
--heat-type pod \
--heat-dir ~/overcloud-deploy/overcloud/heat-launcher \
--kill
Limitations
-----------
Ephemeral Heat currently only supports new deployments. Update and Upgrade
support for deployments that previously used the system installed Heat will be
coming.

View File

@ -1,35 +0,0 @@
TripleO OpenStack Deployment
============================
This section describes how to deploy OpenStack clouds on containers, either on
the undercloud or the overcloud.
.. toctree::
:maxdepth: 1
undercloud
install_undercloud
overcloud
install_overcloud
TripleO Deployment Advanced Topics
==================================
This section has additional documentation around advanced deployment related topics.
.. toctree::
:maxdepth: 1
3rd_party
ansible_config_download
ansible_config_download_differences
architecture
build_single_image
container_image_prepare
ephemeral_heat
instack_undercloud
network_v2
standalone
template_deploy
tips_tricks
upload_single_image

View File

@ -1,227 +0,0 @@
(DEPRECATED) Installing the Undercloud
--------------------------------------
.. note::
Instack-undercloud is deprecated in Rocky cycle. Containerized undercloud
should be installed instead. See :doc:`undercloud` for backward
compatibility related information.
.. note::
Please ensure all your nodes (undercloud, compute, controllers, etc) have
their internal clock set to UTC in order to prevent any issue with possible
file future-dated timestamp if hwclock is synced before any timezone offset
is applied.
#. Log in to your machine (baremetal or VM) where you want to install the
undercloud as a non-root user (such as the stack user)::
ssh <non-root-user>@<undercloud-machine>
.. note::
If you don't have a non-root user created yet, log in as root and create
one with following commands::
sudo useradd stack
sudo passwd stack # specify a password
echo "stack ALL=(root) NOPASSWD:ALL" | sudo tee -a /etc/sudoers.d/stack
sudo chmod 0440 /etc/sudoers.d/stack
su - stack
.. note::
The undercloud is intended to work correctly with SELinux enforcing.
Installations with the permissive/disabled SELinux are not recommended.
The ``undercloud_enable_selinux`` config option controls that setting.
.. note::
vlan tagged interfaces must follow the if_name.vlan_id convention, like for
example: eth0.vlan100 or bond0.vlan120.
.. admonition:: Baremetal
:class: baremetal
Ensure that there is a FQDN hostname set and that the $HOSTNAME environment
variable matches that value. The easiest way to do this is to set the
``undercloud_hostname`` option in undercloud.conf before running the
install. This will allow the installer to configure all of the hostname-
related settings appropriately.
Alternatively the hostname settings can be configured manually, but
this is strongly discouraged. The manual steps are as follows::
sudo hostnamectl set-hostname myhost.mydomain
sudo hostnamectl set-hostname --transient myhost.mydomain
An entry for the system's FQDN hostname is also needed in /etc/hosts. For
example, if the system is named *myhost.mydomain*, /etc/hosts should have
an entry like::
127.0.0.1 myhost.mydomain myhost
#. Enable needed repositories:
.. admonition:: RHEL
:class: rhel
Enable optional repo::
sudo yum install -y yum-utils
sudo yum-config-manager --enable rhelosp-rhel-7-server-opt
.. include:: ../repositories.rst
#. Install the TripleO CLI, which will pull in all other necessary packages as dependencies::
sudo yum install -y python-tripleoclient
.. admonition:: Ceph
:class: ceph
If you intend to deploy Ceph in the overcloud, or configure the overcloud to use an external Ceph cluster, and are running Pike or newer, then install ceph-ansible on the undercloud::
sudo yum install -y ceph-ansible
#. Prepare the configuration file::
cp /usr/share/python-tripleoclient/undercloud.conf.sample ~/undercloud.conf
It is backwards compatible with non-containerized instack underclouds.
.. admonition:: Stable Branch
:class: stable
For a non-containerized undercloud, copy in the sample configuration
file and edit it to reflect your environment::
cp /usr/share/instack-undercloud/undercloud.conf.sample ~/undercloud.conf
.. note:: There is a tool available that can help with writing a basic
``undercloud.conf``:
`Undercloud Configuration Wizard <http://ucw.tripleo.org/>`_
It takes some basic information about the intended overcloud
environment and generates sane values for a number of the important
options.
#. (OPTIONAL) Generate configuration for preparing container images
As part of the undercloud install, an image registry is configured on port
`8787`. This is used to increase reliability of overcloud image pulls, and
minimise overall network transfers. The undercloud registry will be
populated with images required by the undercloud by generating the following
`containers-prepare-parameter.yaml` file and including it in
``undercloud.conf:
container_images_file=$HOME/containers-prepare-parameter.yaml``::
openstack tripleo container image prepare default \
--local-push-destination \
--output-env-file ~/containers-prepare-parameter.yaml
.. note::
This command is available since Rocky.
See :ref:`prepare-environment-containers` for details on using
`containers-prepare-parameter.yaml` to control what can be done
during the container images prepare phase of an undercloud install.
Additionally, ``docker_insecure_registries`` and ``docker_registry_mirror``
parameters allow to customize container registries via the
``undercloud.conf`` file.
#. (OPTIONAL) Override heat parameters and environment files used for undercloud
deployment.
Similarly to overcloud deployments, see :ref:`override-heat-templates` and
:ref:`custom-template-location`, the ``undercloud.conf: custom_env_files``
and ``undercloud.conf: templates`` configuration parameters allow to
use a custom heat templates location and override or specify additional
information for Heat resources used for undercloud deployment.
Additionally, the ``undercloud.conf: roles_file`` parameter brings in the
ultimate flexibility of :ref:`custom_roles` and :ref:`composable_services`.
This allows you to deploy an undercloud composed of highly customized
containerized services, with the same workflow that TripleO uses for
overcloud deployments.
.. note:: The CLI and configuration interface used to deploy a containerized
undercloud is the same as that used by 'legacy' non-containerized
underclouds. As noted above however mechanism by which the undercloud is
actually deployed is completely changed and what is more, for the first
time aligns with the overcloud deployment. See the command
``openstack tripleo deploy --standalone`` help for details.
That interface extension for standalone clouds is experimental for Rocky.
It is normally should not be used directly for undercloud installations.
#. Run the command to install the undercloud:
.. admonition:: SSL
:class: optional
To deploy an undercloud with SSL, see :doc:`../features/ssl`.
.. admonition:: Validations
:class: validations
:doc:`../post_deployment/validations/index` will be installed and
configured during undercloud installation. You can set
``enable_validations = false`` in ``undercloud.conf`` to prevent
that.
To deploy an undercloud::
openstack undercloud install
.. note::
The undercloud is containerized by default as of Rocky.
.. note::
It's possible to enable verbose logging with ``--verbose`` option.
Since Rocky, we run all the OpenStack services in a moby container runtime
unless the default settings are overwritten.
This command requires 2 services to be running at all times. The first one is a
basic keystone service, which is currently executed by `tripleoclient` itself, the
second one is `heat-all` which executes the templates and installs the services.
The latter can be run on baremetal or in a container (tripleoclient will run it
in a container by default).
Once the install has completed, you should take note of the files ``stackrc`` and
``undercloud-passwords.conf``. You can source ``stackrc`` to interact with the
undercloud via the OpenStack command-line client. The ``undercloud-passwords.conf``
file contains the passwords used for each service in the undercloud. These passwords
will be automatically reused if the undercloud is reinstalled on the same system,
so it is not necessary to copy them to ``undercloud.conf``.
.. note:: Heat installer configuration, logs and state is ephemeral for
undercloud deployments. Generated artifacts for consequent deployments get
overwritten or removed (when ``undercloud.conf: cleanup = true``).
Although, you can still find them stored in compressed files.
Miscellaneous undercloud deployment artifacts, like processed heat templates and
compressed files, can be found in ``undercloud.conf: output_dir`` locations
like ``~/tripleo-heat-installer-templates``.
There is also a compressed file created and placed into the output dir, named as
``undercloud-install-<TS>.tar.bzip2``, where TS represents a timestamp.
Downloaded ansible playbooks and inventory files (see :ref:`config_download`)
used for undercloud deployment are stored in the tempdir
``~/undercloud-ansible-<XXXX>`` by default.
.. note::
Any passwords set in ``undercloud.conf`` will take precedence over the ones in
``undercloud-passwords.conf``.
.. note::
The used undercloud installation command can be rerun to reapply changes from
``undercloud.conf`` to the undercloud. Note that this should **not** be done
if an overcloud has already been deployed or is in progress.
.. note::
If running ``docker`` commands as a stack user after an undercloud install fail
with a permission error, log out and log in again. The stack user does get added
to the docker group during install, but that change gets reflected only after a
new login.

View File

@ -1,712 +0,0 @@
.. _basic-deployment-cli:
Basic Deployment (CLI)
======================
These steps document a basic deployment with |project| in an environment using
the project defaults.
.. note::
Since Rocky, Ansible is used to deploy the software configuration of
the overcloud nodes using a feature called **config-download**. While
there are no necessary changes to the default deployment commands,
there are several differences to the deployer experience.
It's recommended to review these differences as documented at
:doc:`ansible_config_download_differences`
**config-download** is fully documented at
:doc:`ansible_config_download`
Prepare Your Environment
------------------------
#. Make sure you have your environment ready and undercloud running:
* :doc:`../environments/index`
* :doc:`undercloud`
#. Log into your undercloud virtual machine and become the non-root user (stack
by default)::
ssh root@<undercloud-machine>
su - stack
#. In order to use CLI commands easily you need to source needed environment
variables::
source stackrc
.. _basic-deployment-cli-get-images:
Get Images
----------
.. note::
If you already have images built, perhaps from a previous installation of
|project|, you can simply copy those image files into your non-root user's
home directory and skip this section.
If you do this, be aware that sometimes newer versions of |project| do not
work with older images, so if the deployment fails it may be necessary to
delete the older images and restart the process from this step.
Alternatively, images are available via RDO at
https://images.rdoproject.org/centos9/master/rdo_trunk/ which offers images from both the
CentOS Build System (cbs) and RDO Trunk (called rdo_trunk or delorean).
However this mirror is slow so if you experience slow download speeds
you should skip to building the images instead.
The image files required are::
ironic-python-agent.initramfs
ironic-python-agent.kernel
overcloud-full.initrd
overcloud-full.qcow2
overcloud-full.vmlinuz
Images must be built prior to doing a deployment. An IPA ramdisk and
openstack-full image can all be built using tripleo-common.
It's recommended to build images on the installed undercloud directly since all
the dependencies are already present, but this is not a requirement.
The following steps can be used to build images. They should be run as the same
non-root user that was used to install the undercloud. If the images are not
created on the undercloud, one should use a non-root user.
#. Choose image operating system:
.. admonition:: CentOS
:class: centos
The image build with no arguments will build CentOS 8. It will include the
common YAML of
``/usr/share/openstack-tripleo-common/image-yaml/overcloud-images-python3.yaml``
and the CentOS YAML at
``/usr/share/openstack-tripleo-common/image-yaml/overcloud-images-centos8.yaml``.
.. admonition:: CentOS 9
:class: centos9
The default YAML for Centos 9 is
``/usr/share/openstack-tripleo-common/image-yaml/overcloud-images-centos9.yaml``
::
export OS_YAML="/usr/share/openstack-tripleo-common/image-yaml/overcloud-images-centos9.yaml"
.. admonition:: RHEL
:class: rhel
The common YAML is
``/usr/share/openstack-tripleo-common/image-yaml/overcloud-images-python3.yaml``.
It must be specified along with the following.
The default YAML for RHEL is
``/usr/share/openstack-tripleo-common/image-yaml/overcloud-images-rhel8.yaml``
::
export OS_YAML="/usr/share/openstack-tripleo-common/image-yaml/overcloud-images-rhel8.yaml"
#. Install the ``current-tripleo`` delorean repository and deps repository:
.. include:: ../repositories.rst
3. Export environment variables
::
export DIB_YUM_REPO_CONF="/etc/yum.repos.d/delorean*"
.. admonition:: Ceph
:class: ceph
::
export DIB_YUM_REPO_CONF="$DIB_YUM_REPO_CONF /etc/yum.repos.d/tripleo-centos-ceph*.repo"
.. admonition:: CentOS 9
:class: centos9
::
export DIB_YUM_REPO_CONF="/etc/yum.repos.d/delorean* /etc/yum.repos.d/tripleo-centos-*"
.. admonition:: Stable Branch
:class: stable
.. admonition:: Victoria
:class: victoria
::
export STABLE_RELEASE="victoria"
.. admonition:: Ussuri
:class: ussuri
::
export STABLE_RELEASE="ussuri"
.. admonition:: Train
:class: train
::
export STABLE_RELEASE="train"
#. Build the required images:
.. admonition:: RHEL
:class: rhel
Download the RHEL 7.4 cloud image or copy it over from a different location,
for example:
``https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.4/x86_64/product-software``,
and define the needed environment variables for RHEL 7.4 prior to running
``tripleo-build-images``::
export DIB_LOCAL_IMAGE=rhel-server-7.4-x86_64-kvm.qcow2
.. admonition:: RHEL Portal Registration
:class: portal
To register the image builds to the Red Hat Portal define the following variables::
export REG_METHOD=portal
export REG_USER="[your username]"
export REG_PASSWORD="[your password]"
# Find this with `sudo subscription-manager list --available`
export REG_POOL_ID="[pool id]"
export REG_REPOS="rhel-7-server-rpms rhel-7-server-extras-rpms rhel-ha-for-rhel-7-server-rpms \
rhel-7-server-optional-rpms rhel-7-server-openstack-6.0-rpms"
.. admonition:: Ceph
:class: ceph
If using Ceph, additional channels need to be added to `REG_REPOS`.
Enable the appropriate channels for the desired release, as indicated below.
Do not enable any other channels not explicitly marked for that release.
::
rhel-7-server-rhceph-2-mon-rpms
rhel-7-server-rhceph-2-osd-rpms
rhel-7-server-rhceph-2-tools-rpms
.. admonition:: RHEL Satellite Registration
:class: satellite
To register the image builds to a Satellite define the following
variables. Only using an activation key is supported when registering to
Satellite, username/password is not supported for security reasons. The
activation key must enable the repos shown::
export REG_METHOD=satellite
# REG_SAT_URL should be in the format of:
# http://<satellite-hostname>
export REG_SAT_URL="[satellite url]"
export REG_ORG="[satellite org]"
# Activation key must enable these repos:
# rhel-7-server-rpms
# rhel-7-server-optional-rpms
# rhel-7-server-extras-rpms
# rhel-7-server-openstack-6.0-rpms
# rhel-7-server-rhceph-{2,1.3}-mon-rpms
# rhel-7-server-rhceph-{2,1.3}-osd-rpms
# rhel-7-server-rhceph-{2,1.3}-tools-rpms
export REG_ACTIVATION_KEY="[activation key]"
::
openstack overcloud image build
..
.. admonition:: RHEL 9
:class: rhel9
::
openstack overcloud image build \
--config-file /usr/share/openstack-tripleo-common/image-yaml/overcloud-images-python3.yaml \
--config-file /usr/share/openstack-tripleo-common/image-yaml/overcloud-images-rhel9.yaml \
--config-file $OS_YAML
.. admonition:: CentOS 9
:class: centos9
::
openstack overcloud image build \
--config-file /usr/share/openstack-tripleo-common/image-yaml/overcloud-images-python3.yaml \
--config-file /usr/share/openstack-tripleo-common/image-yaml/overcloud-images-centos9.yaml \
--config-file $OS_YAML
See the help for ``openstack overcloud image build`` for further options.
The YAML files are cumulative. Order on the command line is important. The
packages, elements, and options sections will append. All others will overwrite
previously read values.
.. note::
This command will build **overcloud-full** images (\*.qcow2, \*.initrd,
\*.vmlinuz) and **ironic-python-agent** images (\*.initramfs, \*.kernel)
In order to build specific images, one can use the ``--image-name`` flag
to ``openstack overcloud image build``. It can be specified multiple times.
.. note::
If you want to use whole disk images with TripleO, please see :doc:`../provisioning/whole_disk_images`.
.. _basic-deployment-cli-upload-images:
Upload Images
-------------
Load the images into the containerized undercloud Glance::
openstack overcloud image upload
To upload a single image, see :doc:`upload_single_image`.
If working with multiple architectures and/or platforms with an architecture these
attributes can be specified at upload time as in::
openstack overcloud image upload
openstack overcloud image upload --arch x86_64 \
--httpboot /var/lib/ironic/httpboot/x86_64
openstack overcloud image upload --arch x86_64 --platform SNB \
--httpboot /var/lib/ironic/httpboot/x86_64-SNB
.. note::
Adding ``--httpboot`` is optional but suggested if you need to ensure that
the ``agent`` images are unique within your environment.
.. admonition:: Prior to Rocky release
:class: stable
Before Rocky, the undercloud isn't containerized by default. Hence
you should use the ``/httpboot/*`` paths instead.
This will create 3 sets of images with in the undercloud image service for later
use in deployment, see :doc:`../environments/baremetal`
.. _node-registration:
Register Nodes
--------------
Register and configure nodes for your deployment with Ironic::
openstack overcloud node import instackenv.json
The file to be imported may be either JSON, YAML or CSV format, and
the type is detected via the file extension (json, yaml, csv).
The file format is documented in :ref:`instackenv`.
The nodes status will be set to ``manageable`` by default, so that
introspection may later be run. To also run introspection and make the
nodes available for deployment in one step, the following flags can be
used::
openstack overcloud node import --introspect --provide instackenv.json
Starting with the Newton release you can take advantage of the ``enroll``
provisioning state - see :doc:`../provisioning/node_states` for details.
If your hardware has several hard drives, it's highly recommended that you
specify the exact device to be used during introspection and deployment
as a root device. Please see :ref:`root_device` for details.
.. warning::
If you don't specify the root device explicitly, any device may be picked.
Also the device chosen automatically is **NOT** guaranteed to be the same
across rebuilds. Make sure to wipe the previous installation before
rebuilding in this case.
If there is information from previous deployments on the nodes' disks, it is
recommended to at least remove the partitions and partition table(s). See
:doc:`../provisioning/cleaning` for information on how to do it.
Finally, if you want your nodes to boot in the UEFI mode, additional steps may
have to be taken - see :doc:`../provisioning/uefi_boot` for details.
.. warning::
It's not recommended to delete nodes and/or rerun this command after
you have proceeded to the next steps. Particularly, if you start introspection
and then re-register nodes, you won't be able to retry introspection until
the previous one times out (1 hour by default). If you are having issues
with nodes after registration, please follow
:ref:`node_registration_problems`.
Another approach to enrolling node is
:doc:`../provisioning/node_discovery`.
.. _introspection:
Introspect Nodes
----------------
.. admonition:: Validations
:class: validations
Once the undercloud is installed, you can run the
``pre-introspection`` validations::
openstack tripleo validator run --group pre-introspection
Then verify the results as described in :ref:`running_validation_group`.
Nodes must be in the ``manageable`` provisioning state in order to run
introspection. Introspect hardware attributes of nodes with::
openstack overcloud node introspect --all-manageable
Nodes can also be specified individually by UUID. The ``--provide``
flag can be used in order to move the nodes automatically to the
``available`` provisioning state once the introspection is finished,
making the nodes available for deployment.
::
openstack overcloud node introspect --all-manageable --provide
.. note:: **Introspection has to finish without errors.**
The process can take up to 5 minutes for VM / 15 minutes for baremetal. If
the process takes longer, see :ref:`introspection_problems`.
.. note:: If you need to introspect just a single node, see
:doc:`../provisioning/introspect_single_node`
Provide Nodes
-------------
Only nodes in the ``available`` provisioning state can be deployed to
(see :doc:`../provisioning/node_states` for details). To move
nodes from ``manageable`` to ``available`` the following command can be
used::
openstack overcloud node provide --all-manageable
Flavor Details
--------------
The undercloud will have a number of default flavors created at install time.
In most cases these flavors do not need to be modified, but they can be if
desired. By default, all overcloud instances will be booted with the
``baremetal`` flavor, so all baremetal nodes must have at least as much
memory, disk, and cpu as that flavor.
In addition, there are profile-specific flavors created which can be used with
the profile-matching feature. For more details on deploying with profiles,
see :doc:`../provisioning/profile_matching`.
.. _basic-deployment-cli-configure-namserver:
Configure a nameserver for the Overcloud
----------------------------------------
Overcloud nodes can have a nameserver configured in order to resolve
hostnames via DNS. The nameserver is defined in the undercloud's neutron
subnet. If needed, define the nameserver to be used for the environment::
# List the available subnets
openstack subnet list
openstack subnet set <subnet-uuid> --dns-nameserver <nameserver-ip>
.. admonition:: Stable Branch
:class: stable
For Mitaka release and older, the subnet commands are executed within the
`neutron` command::
neutron subnet-list
neutron subnet-update <subnet-uuid> --dns-nameserver <nameserver-ip>
.. note::
A public DNS server, such as 8.8.8.8 or the undercloud DNS name server
can be used if there is no internal DNS server.
.. admonition:: Virtual
:class: virtual
In virtual environments, the libvirt default network DHCP server address,
typically 192.168.122.1, can be used as the overcloud nameserver.
.. _deploy-the-overcloud:
Deploy the Overcloud
--------------------
.. admonition:: Validations
:class: validations
Before you start the deployment, you may want to run the
``pre-deployment`` validations::
openstack tripleo validator run --group pre-deployment
Then verify the results as described in :ref:`running_validation_group`.
By default 1 compute and 1 control node will be deployed, with networking
configured for the virtual environment. To customize this, see the output of::
openstack help overcloud deploy
.. admonition:: Swap
:class: optional
Swap files or partitions can be installed as part of an Overcloud deployment.
For adding swap files there is no restriction besides having
4GB available on / (by default). When using a swap partition,
the partition must exist and be tagged as `swap1` (by default).
To deploy a swap file or partition in each Overcloud node use one
of the following arguments when deploying::
-e /usr/share/openstack-tripleo-heat-templates/environments/enable-swap-partition.yaml
-e /usr/share/openstack-tripleo-heat-templates/environments/enable-swap.yaml
.. admonition:: Ceph
:class: ceph
When deploying Ceph with dedicated CephStorage nodes to host the CephOSD
service it is necessary to specify the number of CephStorage nodes
to be deployed and to provide some additional parameters to enable usage
of Ceph for Glance, Cinder, Nova or all of them. To do so, use the
following arguments when deploying::
--ceph-storage-scale <number of nodes> -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-ansible.yaml
When deploying Ceph without dedicated CephStorage nodes, opting for an HCI
architecture instead, where the CephOSD service is colocated with the
NovaCompute service on the Compute nodes, use the following arguments::
-e /usr/share/openstack-tripleo-heat-templates/environments/hyperconverged-ceph.yaml -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-ansible.yaml
The `hyperconverged-ceph.yaml` environment file will also enable a port on the
`StorageMgmt` network for the Compute nodes. This will be the Ceph private
network and the Compute NIC templates have to be configured to use that, see
:doc:`../features/network_isolation` for more details on how to do
it.
.. admonition:: RHEL Satellite Registration
:class: satellite
To register the Overcloud nodes to a Satellite add the following flags
to the deploy command::
--rhel-reg --reg-method satellite --reg-org <ORG ID#> --reg-sat-url <satellite URL> --reg-activation-key <KEY>
.. note::
Only using an activation key is supported when registering to
Satellite, username/password is not supported for security reasons.
The activation key must enable the following repos:
rhel-7-server-rpms
rhel-7-server-optional-rpms
rhel-7-server-extras-rpms
rhel-7-server-openstack-6.0-rpms
.. admonition:: SSL
:class: optional
To deploy an overcloud with SSL, see :doc:`../features/ssl`.
Run the deploy command, including any additional parameters as necessary::
openstack overcloud deploy --templates [additional parameters]
.. note::
When deploying a new stack or updating a preexisting deployment, it is
important to avoid using component cli along side the unified cli. This
will lead to unexpected results.
Example:
The following will present a behavior where the my_roles_data will persist,
due to the location of the custom roles data, which is stored in swift::
openstack overcloud deploy --templates -r my_roles_data.yaml
heat stack-delete overcloud
Allow the stack to be deleted then continue::
openstack overcloud deploy --templates
The execution of the above will still reference my_roles_data as the
unified command line client will perform a look up against the swift
storage. The reason for the unexpected behavior is due to the heatclient
lack of awareness of the swift storage.
The correct course of action should be as followed::
openstack overcloud deploy --templates -r my_roles_data.yaml
openstack overcloud delete <stack name>
Allow the stack to be deleted then continue::
openstack overcloud deploy --templates
To deploy an overcloud with multiple controllers and achieve HA,
follow :doc:`../features/high_availability`.
.. admonition:: Virtual
:class: virtual
When deploying the Compute node in a virtual machine
without nested guest support, add ``--libvirt-type qemu``
or launching instances on the deployed overcloud will fail.
.. note::
To deploy the overcloud with network isolation, bonds, and/or custom
network interface configurations, instead follow the workflow here to
deploy: :doc:`../features/network_isolation`
.. note::
Previous versions of the client had many parameters defaulted. Some of these
parameters are now pulling defaults directly from the Heat templates. In
order to override these parameters, one should use an environment file to
specify these overrides, via 'parameter_defaults'.
The parameters that controlled these parameters will be deprecated in the
client, and eventually removed in favor of using environment files.
Post-Deployment
---------------
.. admonition:: Validations
:class: validations
After the deployment finishes, you can run the ``post-deployment``
validations::
openstack tripleo validator run --group post-deployment
Then verify the results as described in :ref:`running_validation_group`.
Deployment artifacts
^^^^^^^^^^^^^^^^^^^^
Artifacts from the deployment, including log files, rendered
templates, and generated environment files are saved under the working
directory which can be specified with the ``--work-dir`` argument to
``openstack overcloud deploy``. By default, the location is
``~/overcloud-deploy/<stack>``.
Access the Overcloud
^^^^^^^^^^^^^^^^^^^^
``openstack overcloud deploy`` generates an overcloudrc file appropriate for
interacting with the deployed overcloud in the current user's home directory.
To use it, simply source the file::
source ~/overcloudrc
To return to working with the undercloud, source the ``stackrc`` file again::
source ~/stackrc
Add entries to /etc/hosts
^^^^^^^^^^^^^^^^^^^^^^^^^
In cases where the overcloud hostnames are not already resolvable with DNS,
entries can be added to /etc/hosts to make them resolvable. This is
particularly convenient on the undercloud. The Heat stack provides an output
value that can be appended to /etc/hosts easily. Run the following command to
get the output value and add it to /etc/hosts wherever the hostnames should
be resolvable::
openstack stack output show overcloud HostsEntry -f value -c output_value
Setup the Overcloud network
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Initial networks in Neutron in the overcloud need to be created for tenant
instances. The following are example commands to create the initial networks.
Edit the address ranges, or use the necessary ``neutron`` commands to match the
environment appropriately. This assumes a dedicated interface or native VLAN::
openstack network create public --external --provider-network-type flat \
--provider-physical-network datacentre
openstack subnet create --allocation-pool start=172.16.23.140,end=172.16.23.240 \
--network public --gateway 172.16.23.251 --no-dhcp --subnet-range \
172.16.23.128/25 public
The example shows naming the network "public" because that will allow tempest
tests to pass, based on the default floating pool name set in ``nova.conf``.
You can confirm that the network was created with::
openstack network list
Sample output of the command::
+--------------------------------------+----------+--------------------------------------+
| ID | Name | Subnets |
+--------------------------------------+----------+--------------------------------------+
| 4db8dd5d-fab5-4ea9-83e5-bdedbf3e9ee6 | public | 7a315c5e-f8e2-495b-95e2-48af9442af01 |
+--------------------------------------+----------+--------------------------------------+
To use a VLAN, the following example should work. Customize the address ranges
and VLAN id based on the environment::
openstack network create public --external --provider-network-type vlan \
--provider-physical-network datacentre --provider-segment 195
openstack subnet create --allocation-pool start=172.16.23.140,end=172.16.23.240 \
--network public --no-dhcp --gateway 172.16.23.251 \
--subnet-range 172.16.23.128/25 public
Validate the Overcloud
^^^^^^^^^^^^^^^^^^^^^^
Check the `Tempest`_ documentation on how to run tempest.
.. _tempest: ../post_deployment/tempest/tempest.html
Redeploy the Overcloud
^^^^^^^^^^^^^^^^^^^^^^
The overcloud can be redeployed when desired.
#. First, delete any existing Overcloud::
openstack overcloud delete overcloud
#. Confirm the Overcloud has deleted. It may take a few minutes to delete::
# This command should show no stack once the Delete has completed
openstack stack list
#. It is recommended that you delete existing partitions from all nodes before
redeploying, see :doc:`../provisioning/cleaning` for details.
#. Deploy the Overcloud again::
openstack overcloud deploy --templates

View File

@ -1,325 +0,0 @@
Undercloud Installation
=======================
This section contains instructions on how to install the undercloud. For update
or upgrade to a deployed undercloud see undercloud_upgrade_.
.. _undercloud_upgrade: ../post_deployment/upgrade/undercloud.html
.. _install_undercloud:
Installing the Undercloud
--------------------------
.. note::
Instack-undercloud was deprecated in Rocky cycle. Containerized undercloud
should be installed instead. See :doc:`undercloud`
for backward compatibility related information.
.. note::
Please ensure all your nodes (undercloud, compute, controllers, etc) have
their internal clock set to UTC in order to prevent any issue with possible
file future-dated timestamp if hwclock is synced before any timezone offset
is applied.
#. Log in to your machine (baremetal or VM) where you want to install the
undercloud as a non-root user (such as the stack user)::
ssh <non-root-user>@<undercloud-machine>
.. note::
If you don't have a non-root user created yet, log in as root and create
one with following commands::
sudo useradd stack
sudo passwd stack # specify a password
echo "stack ALL=(root) NOPASSWD:ALL" | sudo tee -a /etc/sudoers.d/stack
sudo chmod 0440 /etc/sudoers.d/stack
su - stack
.. note::
The undercloud is intended to work correctly with SELinux enforcing.
Installations with the permissive/disabled SELinux are not recommended.
The ``undercloud_enable_selinux`` config option controls that setting.
.. note::
vlan tagged interfaces must follow the if_name.vlan_id convention, like for
example: eth0.vlan100 or bond0.vlan120.
.. admonition:: Baremetal
:class: baremetal
Ensure that there is a FQDN hostname set and that the $HOSTNAME environment
variable matches that value. The easiest way to do this is to set the
``undercloud_hostname`` option in undercloud.conf before running the
install. This will allow the installer to configure all of the hostname-
related settings appropriately.
Alternatively the hostname settings can be configured manually, but
this is strongly discouraged. The manual steps are as follows::
sudo hostnamectl set-hostname myhost.mydomain
sudo hostnamectl set-hostname --transient myhost.mydomain
An entry for the system's FQDN hostname is also needed in /etc/hosts. For
example, if the system is named *myhost.mydomain*, /etc/hosts should have
an entry like::
127.0.0.1 myhost.mydomain myhost
#. Enable needed repositories:
.. admonition:: RHEL
:class: rhel
Enable optional repo for RHEL7::
sudo yum install -y yum-utils
sudo yum-config-manager --enable rhelosp-rhel-7-server-opt
.. include:: ../repositories.rst
#. Install the TripleO CLI, which will pull in all other necessary packages as dependencies::
sudo dnf install -y python*-tripleoclient
.. admonition:: RHEL7 / CentOS
For RHEL or CentOS 7 the command would be::
sudo yum install -y python-tripleoclient
.. admonition:: Ceph
:class: ceph
If you intend to deploy Ceph in the overcloud, or configure the overcloud to use an external Ceph cluster, and are running Pike or newer, then install ceph-ansible on the undercloud::
sudo dnf install -y ceph-ansible
.. admonition:: TLS
:class: tls
If you intend to deploy *TLS-everywhere* in the overcloud and are
deploying Train with python3 or Ussuri+, install the following packages::
sudo yum install -y python3-ipalib python3-ipaclient krb5-devel
If you're deploying Train with python2, install the corresponding python2
version of the above packages::
sudo yum install -y python-ipalib python-ipaclient krb5-devel
if you intend to use Novajoin to implement *TLS-everywhere* install the
following package::
sudo yum install -y python-novajoin
You can find more information about deploying with TLS in the
:doc:`../features/tls-introduction` documentation.
#. Prepare the configuration file::
cp /usr/share/python-tripleoclient/undercloud.conf.sample ~/undercloud.conf
It is backwards compatible with non-containerized instack underclouds.
.. admonition:: Stable Branch
:class: stable
For a non-containerized undercloud, copy in the sample configuration
file and edit it to reflect your environment::
cp /usr/share/instack-undercloud/undercloud.conf.sample ~/undercloud.conf
.. note:: There is a tool available that can help with writing a basic
``undercloud.conf``:
`Undercloud Configuration Wizard <http://ucw.tripleo.org/>`_
It takes some basic information about the intended overcloud
environment and generates sane values for a number of the important
options.
#. (OPTIONAL) Generate configuration for preparing container images
As part of the undercloud install, an image registry is configured on port
`8787`. This is used to increase reliability of overcloud image pulls, and
minimise overall network transfers. The undercloud registry will be
populated with images required by the undercloud by generating the following
`containers-prepare-parameter.yaml` file and including it in
``undercloud.conf:
container_images_file=$HOME/containers-prepare-parameter.yaml``::
openstack tripleo container image prepare default \
--local-push-destination \
--output-env-file ~/containers-prepare-parameter.yaml
.. note::
This command is available since Rocky.
See :ref:`prepare-environment-containers` for details on using
`containers-prepare-parameter.yaml` to control what can be done
during the container images prepare phase of an undercloud install.
Additionally, ``docker_insecure_registries`` and ``docker_registry_mirror``
parameters allow to customize container registries via the
``undercloud.conf`` file.
#. (OPTIONAL) Override heat parameters and environment files used for undercloud
deployment.
Similarly to overcloud deployments, see :ref:`override-heat-templates` and
:ref:`custom-template-location`, the ``undercloud.conf: custom_env_files``
and ``undercloud.conf: templates`` configuration parameters allow to
use a custom heat templates location and override or specify additional
information for Heat resources used for undercloud deployment.
Additionally, the ``undercloud.conf: roles_file`` parameter brings in the
ultimate flexibility of :ref:`custom_roles` and :ref:`composable_services`.
This allows you to deploy an undercloud composed of highly customized
containerized services, with the same workflow that TripleO uses for
overcloud deployments.
.. note:: The CLI and configuration interface used to deploy a containerized
undercloud is the same as that used by 'legacy' non-containerized
underclouds. As noted above however mechanism by which the undercloud is
actually deployed is completely changed and what is more, for the first
time aligns with the overcloud deployment. See the command
``openstack tripleo deploy --standalone`` help for details.
It normally should not be used directly for undercloud installations.
#. Run the command to install the undercloud:
.. admonition:: SSL
:class: optional
To deploy an undercloud with SSL, see :doc:`../features/ssl`.
.. admonition:: Validations
:class: validations
:doc:`../post_deployment/validations/index` will be installed and
configured during undercloud installation. You can set
``enable_validations = false`` in ``undercloud.conf`` to prevent
that.
To deploy an undercloud::
openstack undercloud install
.. note::
The undercloud is containerized by default as of Rocky.
.. note::
It's possible to enable verbose logging with ``--verbose`` option.
.. note::
To install a deprecated instack undercloud, you'll need to deploy
with ``--use-heat=False`` option.
Since Rocky, we will run all the OpenStack services in a moby container runtime
unless the default settings are overwritten.
This command requires 2 services to be running at all times. The first one is a
basic keystone service, which is currently executed by `tripleoclient` itself, the
second one is `heat-all` which executes the templates and installs the services.
The latter can be run on baremetal or in a container (tripleoclient will run it
in a container by default).
Once the install has completed, you should take note of the files ``stackrc`` and
``undercloud-passwords.conf``. You can source ``stackrc`` to interact with the
undercloud via the OpenStack command-line client. The ``undercloud-passwords.conf``
file contains the passwords used for each service in the undercloud. These passwords
will be automatically reused if the undercloud is reinstalled on the same system,
so it is not necessary to copy them to ``undercloud.conf``.
.. note:: Heat installer configuration, logs and state is ephemeral for
undercloud deployments. Generated artifacts for consequent deployments get
overwritten or removed (when ``undercloud.conf: cleanup = true``).
Although, you can still find them stored in compressed files.
Miscellaneous undercloud deployment artifacts, like processed heat templates and
compressed files, can be found in ``undercloud.conf: output_dir`` locations
like ``~/tripleo-heat-installer-templates``.
There is also a compressed file created and placed into the output dir, named as
``undercloud-install-<TS>.tar.bzip2``, where TS represents a timestamp.
Downloaded ansible playbooks and inventory files (see :ref:`config_download`)
used for undercloud deployment are stored in the tempdir
``~/undercloud-ansible-<XXXX>`` by default.
.. note::
In order to obtain the ansible command used for the installation of the
Undercloud in the artifacts directory, it is necessary to pass the option
``--reproduce-command`` in the Undercloud deployment command.
.. note::
Any passwords set in ``undercloud.conf`` will take precedence over the ones in
``undercloud-passwords.conf``.
.. note::
The undercloud installation command can be rerun to reapply changes from
``undercloud.conf`` to the undercloud. Note that this should be done with
caution if an overcloud has already been deployed or is in progress as some
configuration changes could affect the overcloud. These changes include but
are not limited to:
#. Package repository changes on the undercloud, followed by running the
installation command could update the undercloud such that further
management operations are not possible on the overcloud until the
overcloud update or upgrade procedure is followed.
#. Reconfiguration of the undercloud container registry if the
overcloud is using the undercloud as the source for container images.
#. Networking configuration changes on the undercloud which may affect
the overcloud's ability to connect to the undercloud for
instance metadata services.
.. note::
If running ``docker`` commands as a stack user after an undercloud install fail
with a permission error, log out and log in again. The stack user does get added
to the docker group during install, but that change gets reflected only after a
new login.
Cleaning the Undercloud
-----------------------
This procedure isn't cleaning everything that TripleO generates, but enough
so an Undercloud could be re-deployed.
.. note::
This procedure has been tested on Train and onward. There is no guarantee
that it works before this version, due to container commands and
new directories.
#. Log in to your machine (baremetal or VM) where you want to cleanup the
undercloud as a non-root user (such as the stack user)::
ssh <non-root-user>@<undercloud-machine>
#. Cleanup the containers and their images::
sudo podman rm -af
sudo podman rmi -af
#. Remove directories generated by TripleO::
sudo rm -rf \
/var/lib/tripleo-config \
/var/lib/config-data \
/var/lib/container-config-scripts \
/var/lib/container-puppet \
/var/lib/heat-config \
/var/lib/image-service \
/var/lib/mysql
#. Cleanup systemd::
sudo rm -rf /etc/systemd/system/tripleo*
sudo systemctl daemon-reload

View File

@ -1,485 +0,0 @@
.. _network_v2:
Networking Version 2 (Two)
==========================
Introduction
------------
In the Wallaby cycle TripleO Networking has been refactored so that no
OS::Neutron heat resources are used. This was a pre-requisite for
:doc:`./ephemeral_heat`. Managing non-ephemeral neutron resources with an
ephemeral heat stack is not feasible, so the management of neutron resources
has been externalized from the overcloud heat stack.
High level overview of the changes
..................................
* NIC config templates was migrated to ansible j2 templates during the
Victoria release. Replacing the heat templates previously used for NIC
configuration. Sample ansible j2 templates are available in the
`tripleo-ansible <https://opendev.org/openstack/tripleo-ansible/src/branch/master/tripleo_ansible/roles/tripleo_network_config/templates>`_
git repository as well as in
``/usr/share/ansible/roles/tripleo_network_config/templates/`` on a deployed
undercloud.
Please refer to :ref:`creating_custom_interface_templates` on the
:ref:`network_isolation` documentation page for further details on writing
custom Ansible j2 NIC config templates.
* A new schema for the network definitions used for Jinja2 rendering of the
``tripleo-heat-templates`` was introduced, in addition to tripleoclient
commands to provision networks using the new network definitions schema.
* A new schema for network Virtual IPs was introduced in conjunction with
tripleoclient commands to provision the Virtual IPs.
* Service Virtual IPs (redis and ovsdb) was refactored so that the neutron
resources are created by the deploy-steps playbook post-stack create/update.
* The baremetal provisioning schema was extended to include advanced network
layouts. The ``overcloud node provision`` command was extended so that it
also provision neutron port resources for all networks defined for instances/
roles in the baremetal provisioning definition.
* The tool (``tripleo-ansible-inventory``) used to generate the ansible
inventory was extended to use neutron as a source for the inventory in
addition to the overcloud heat stack outputs.
* With the TripleO ansible inventory's support to use neutron resources as a
data source, the baremetal provisioning schema and ``overcloud node
provision`` command was extended to allow arbitrary playbook
execute against the provisioned nodes, as well as applying node network
configuration utilizing the ``tripleo_network_config`` ansible role and the
ansible j2 NIC config templates.
With all of the above in place the ``overcloud deploy`` command was extended so
that it can run all the steps:
#. Create Networks
Run the ``cli-overcloud-network-provision.yaml`` ansible playbook using the
network definitions provided via the ``--network-file`` argument. This
playbook creates/updates the neutron networks on the undercloud and
generates the ``networks-deployed.yaml`` environment file which is included
as a user-environment when creating the overcloud heat stack.
#. Create Virtual IPs
Run the ``cli-overcloud-network-vip-provision.yaml`` ansible playbook using
the Virtual IP definitions provided via the ``--vip-file`` argument. This
playbook creates/updates the Virtual IP port resources in neutron on the
undercloud and generates the ``virtual-ips-deployed.yaml`` environment file
which is included as a user-environment when creating the overcloud heat
stack.
#. Provision Baremetal Instances
Run the ``cli-overcloud-node-provision.yaml`` ansible playbook using the
baremetal instance definitions provided via the ``--baremetal-deployment``
argument in combination with the ``--network-config`` argument so that
baremetal nodes are provisioned and network port resources are created. Also
run any arbitrary Ansible playbooks provided by the user on the provisioned
nodes before finally configured overcloud node networking using the
``tripleo_network_config`` ansible role.
#. Create the overcloud Ephemeral Heat stack
The environment files with the parameters and resource registry overrides
required is automatically included when the ``overcloud deploy`` command is
run with the arguments: ``--vip-file``, ``--baremetal-deployment`` and
``--network-config``.
#. Run Config-Download and the deploy-steps playbook
As an external deploy step the neutron ports for Service Virtual IPs are
created, and the properties of the Virtual IPs are included in hieradata.
.. admonition:: Ceph
:class: ceph
Optionally Ceph may be deployed after the baremetal instances
are provisioned but before the ephemeral Heat stack is created
as described in :doc:`../features/deployed_ceph`.
Using
-----
Pre-Provision networks
......................
The command to pre-provision networks for one or more overcloud stack(s) is
``openstack overcloud network provision``. The command takes a network-v2
version networks definitions YAML file as input, and writes a heat environment
file to the file specified using the ``--output`` argument.
Please refer to the :ref:`network_definition_opts` reference section on the
:ref:`custom_networks` document page for a reference on available options in
the network data YAML schema.
Sample network definition YAML files can be located in the
`tripleo-heat-templates git repository
<https://opendev.org/openstack/tripleo-heat-templates/src/branch/master/network-data-samples/>`_,
or in the ``/usr/share/openstack-tripleo-heat-templates/network-data-samples``
directory on the undercloud.
**Example**: Networks definition YAML file defining the external network.
.. code-block:: yaml
- name: External
name_lower: external
vip: true
mtu: 1500
subnets:
external_subnet:
ip_subnet: 10.0.0.0/24
allocation_pools:
- start: 10.0.0.4
end: 10.0.0.250
gateway_ip: 10.0.0.1
vlan: 10
**Example**: Create or update networks
.. code-block:: bash
$ openstack overcloud network provision \
--output ~/overcloud-networks-deployed.yaml \
~/network_data_v2.yaml
When deploying the overcloud include the environment file generated by the
``overcloud network provision`` command.
.. code-block:: bash
$ openstack overcloud deploy --templates \
-e ~/overcloud-networks-deployed.yaml
Pre-Provision network Virtual IPs
.................................
The command to pre-provision Virtual IPs for an overcloud stack is:
``openstack overcloud network vip provision``. The command takes a Virtual IPs
definitions YAML file as input, and writes a heat environment file to the file
specified using the ``--output`` argument. The ``--stack`` argument defines the
name of the overcloud stack for which Virtual IPs will be provisioned.
Please refer to the :ref:`virtual_ips_definition_opts` reference section on the
:ref:`custom_networks` document page for a reference on available options in
the Virtual IPs YAML schema.
Sample network definition YAML files can be located in the
`tripleo-heat-templates git repository
<https://opendev.org/openstack/tripleo-heat-templates/src/branch/master/network-data-samples/>`_,
or in the ``/usr/share/openstack-tripleo-heat-templates/network-data-samples``
directory on the undercloud.
**Example**: Virtual IPs definition YAML file defining the ctlplane and the
external network Virtual IPs.
.. code-block:: yaml
- network: ctlplane
dns_name: overcloud
- network: external
dns_name: overcloud
**Example**: Create or update Virtual IPs
.. code-block:: bash
$ openstack overcloud network vip provision \
--stack overcloud \
--output ~/overcloud-vip-deployed.yaml \
~/vip_data.yaml
When deploying the overcloud include the environment file generated by the
``overcloud network provision`` command. For example:
.. code-block:: bash
$ openstack overcloud deploy --templates \
-e ~/overcloud-vip-deployed.yaml
Service Virtual IPs
...................
Service Virtual IPs are created as needed when the service is enabled. To
configure the subnet to use the existing ``ServiceVipMap`` heat parameter.
For a fixed IP allocation the existing heat parameters ``RedisVirtualFixedIPs``
and/or ``OVNDBsVirtualFixedIPs`` can be used.
**Example**: Setting fixed ips:
.. code-block:: yaml
parameter_defaults:
RedisVirtualFixedIPs: [{'ip_address': '172.20.0.11'}]
OVNDBsVirtualFixedIPs: [{'ip_address': '172.20.0.12'}]
**Example**: Setting fixed IP address and not creating a neutron resource:
.. code-block:: yaml
parameter_defaults:
RedisVirtualFixedIPs: [{'ip_address': '172.20.0.11', 'use_neutron': false}]
OVNDBsVirtualFixedIPs: [{'ip_address': '172.20.0.12', 'use_neutron': false}]
.. note:: Overriding the Service Virtual IPs using the resource registry
entries ``OS::TripleO::Network::Ports::RedisVipPort`` and
``OS::TripleO::Network::Ports::OVNDBsVipPort`` is no longer
supported.
Provision Baremetal Instances
.............................
Pre provisioning baremetal instances using Metalsmith has been supported for a
while. The TripleO Network v2 work extended the workflow that provision
baremetal instances to also provision the neutron network port resources and
added the interface to run arbitrary Ansible playbooks after node provisioning.
Please refer to the :ref:`baremetal_provision` document page for a reference on
available options in the Baremetal Deployment YAML schema.
**Example**: Baremetal Deployment YAML set up for default the default
network-isolation scenario, including one pre-network config Ansible playbook
that will be run against the nodes in each role.
.. code-block:: yaml
- name: Controller
count: 1
hostname_format: controller-%index%
ansible_playbooks:
- playbook: bm-deploy-playbook.yaml
defaults:
profile: control
networks:
- network: external
subnet: external_subnet
- network: internal_api
subnet: internal_api_subnet01
- network: storage
subnet: storage_subnet01
- network: storage_mgmt
subnet: storage_mgmt_subnet01
- network: tenant
subnet: tenant_subnet01
network_config:
template: templates/multiple_nics/multiple_nics_dvr.j2
default_route_network:
- external
- name: Compute
count: 1
hostname_format: compute-%index%
ansible_playbooks:
- playbook: bm-deploy-playbook.yaml
defaults:
profile: compute-leaf2
networks:
- network: internal_api
subnet: internal_api_subnet02
- network: tenant
subnet: tenant_subnet02
- network: storage
subnet: storage_subnet02
network_config:
template: templates/multiple_nics/multiple_nics_dvr.j2
**Example**: Arbitrary Ansible playbook example bm-deploy-playbook.yaml
.. code-block:: yaml
- name: Overcloud Node Network Config
hosts: allovercloud
any_errors_fatal: true
gather_facts: false
tasks:
- name: A task
debug:
msg: "A message"
To provision baremetal nodes, create neutron port resource and apply network
configuration as defined in the above definition run the ``openstack overcloud
node provision`` command including the ``--network-config`` argument as shown
in the below example:
.. code-block:: bash
$ openstack overcloud node provision \
--stack overcloud \
--network-config \
--output ~/overcloud-baremetal-deployed.yaml \
~/baremetal_deployment.yaml
When deploying the overcloud include the environment file generated by the
``overcloud node provision`` command and enable the ``--deployed-server``
argument.
.. code-block:: bash
$ openstack overcloud deploy --templates \
--deployed-server \
-e ~/overcloud-baremetal-deployed.yaml
The *All-in-One* alternative using overcloud deploy command
.............................................................
It is possible to instruct the ``openstack overcloud deploy`` command to do all
of the above steps in one go. The same YAML definitions can be used and the
environment files will be automatically included.
**Example**: Use the **All-in-One** deploy command:
.. code-block:: bash
$ openstack overcloud deploy \
--templates \
--stack overcloud \
--network-config \
--deployed-server \
--roles-file ~/my_roles_data.yaml \
--networks-file ~/network_data_v2.yaml \
--vip-file ~/vip_data.yaml \
--baremetal-deployment ~/baremetal_deployment.yaml
Managing Multiple Overclouds
............................
When managing multiple overclouds using a single undercloud one would have to
use a different ``--stack`` name and ``--output`` as well as per-overcloud
YAML definitions for provisioning Virtual IPs and baremetal nodes.
Networks can be shared, or separate for each overcloud stack. If they are
shared, use the same network definition YAML and deployed network environment
for all stacks. In the case where networks are not shared, a separate network
definitions YAML and a separate deployed network environment file must be used
by each stack.
.. note:: The ``ctlplane`` provisioning network will always be shared.
Migrating existing deployments
------------------------------
To facilitate the migration for deployed overclouds tripleoclient commands to
extract information from deployed overcloud stacks has been added. During the
upgrade to Wallaby these tools will be executed as part of the undercloud
upgrade, placing the generated YAML definition files in the working directory
(Defaults to: ``~/overcloud-deploy/$STACK_NAME/``). Below each export command
is described, and examples to use them manually with the intent for developers
and operators to be able to better understand what happens "under the hood"
during the undercloud upgrade.
There is also a tool ``convert_heat_nic_config_to_ansible_j2.py`` that can be
used to convert heat template NIC config to Ansible j2 templates.
.. warning:: If migrating to use Networking v2 while using the non-Ephemeral
heat i.e ``--heat-type installed``, the existing overcloud stack
must **first** be updated to set the ``deletion_policy`` for
``OS::Nova`` and ``OS::Neutron`` resources. This can be done
using a ``--stack-only`` update, including an environment file
setting the following tripleo-heat-templates parameters
``NetworkDeletionPolicy``, ``PortDeletionPolicy`` and
``ServerDeletionPolicy`` to ``retain``.
If the deletion policy is not set to ``retain`` the
orchestration service will **delete** the existing resources
when an update using the Networking v2 environments is
performed.
Conflicting legacy environment files
....................................
The heat environment files created by the Networking v2 commands uses resource
registry overrides to replace the existing resources with *pre-deployed*
resource types. These resource registry entries was also used by legacy
environment files, such as ``network-isolation.yaml``. The legacy files should
no longer be used, as they will nullify the new overrides.
It is recommended to compare the generated environment files with existing
environment files used with the overcloud deployment prior to the migration and
remove all settings that overlap with the settings in the generated environment
files.
Convert NIC configs
...................
In the tripleo-heat-templates ``tools`` directory there is a script
``convert_heat_nic_config_to_ansible_j2.py`` that can be used to convert heat
NIC config templates to Ansible j2 NIC config templates.
**Example**: Convert the compute.yaml heat NIC config template to Ansible j2.
.. code-block:: bash
$ /usr/share/openstack-tripleo-heat-templates/convert_heat_nic_config_to_ansible_j2.py \
--stack overcloud \
--networks-file network_data.yaml \
~/nic-configs/compute.yaml
.. warning:: The tool does a best-effort to fully automate the conversion. The
new Ansible j2 template files should be inspected, there may be
a need to manually edit the new Ansible j2 template. The tool will
try to highlight any issues that need manual intervention by
adding comments in the Ansible j2 file.
The :ref:`migrating_existing_network_interface_templates` section on the
:ref:`network_isolation` documentation page provides a guide for manual
migration.
Generate Network YAML
.....................
The command ``openstack overcloud network extract`` can be used to generate
a Network definition YAML file from a deployed overcloud stack. The YAML
definition file can then be used with ``openstack overcloud network provision``
and the ``openstack overcloud deploy`` command.
**Example**: Generate a Network definition YAML for the ``overcloud`` stack:
.. code-block:: bash
$ openstack overcloud network extract \
--stack overcloud \
--output ~/network_data_v2.yaml
Generate Virtual IPs YAML
.........................
The command ``openstack overcloud network vip extract`` can be used to generate
a Virtual IPs definition YAML file from a deployed overcloud stack. The YAML
definition file can then be used with ``openstack overcloud network vip
provision`` command and/or the ``openstack overcloud deploy`` command.
**Example**: Generate a Virtual IPs definition YAML for the ``overcloud``
stack:
.. code-block:: bash
$ openstack overcloud network vip extract \
--stack overcloud \
--output /home/centos/overcloud/network_vips_data.yaml
Generate Baremetal Provision YAML
.................................
The command ``openstack overcloud node extract provisioned`` can be used to
generate a Baremetal Provision definition YAML file from a deployed overcloud
stack. The YAML definition file can then be used with ``openstack overcloud
node provision`` command and/or the ``openstack overcloud deploy`` command.
**Example**: Export deployed overcloud nodes to Baremetal Deployment YAML
definition
.. code-block:: bash
$ openstack overcloud node extract provisioned \
--stack overcloud \
--roles-file ~/tht_roles_data.yaml \
--output ~/baremetal_deployment.yaml

View File

@ -1,84 +0,0 @@
Containers based Overcloud Deployment
======================================
This documentation explains how to deploy a fully containerized overcloud
utilizing Podman which is the default since the Stein release.
The requirements for a containerized overcloud are the same as for any other
overcloud deployment. The real difference is in where the overcloud services
will be deployed (containers vs base OS).
Architecture
------------
The container-based overcloud architecture is not very different from the
baremetal/VM based one. The services deployed in the traditional baremetal
overcloud are also deployed in the docker-based one.
One obvious difference between these two types of deployments is that the
Openstack services are deployed as containers in a container runtime rather
than directly on the host operating system. This reduces the required packages
in the host to the bare minimum for running the container runtime and managing
the base network layer.
Manual overcloud deployment
----------------------------
This section explains how to deploy a containerized overcloud manually. For an
automated overcloud deployment, please follow the steps in the
`Using TripleO Quickstart`_ section below.
Preparing overcloud images
..........................
As part of the undercloud install, an image registry is configured on port
`8787`. This is used to increase reliability of overcloud image pulls, and
minimise overall network transfers. The undercloud registry will be populated
with images required by the overcloud deploy by generating the following
`containers-prepare-parameter.yaml` file and using that for the prepare call::
openstack tripleo container image prepare default \
--local-push-destination \
--output-env-file containers-prepare-parameter.yaml
.. note:: The file `containers-prepare-parameter.yaml` may have already been
created during :ref:`install_undercloud`. It is
encouraged to share the same `containers-prepare-parameter.yaml` file
for undercloud install and overcloud deploy.
See :ref:`prepare-environment-containers` for details on using
`containers-prepare-parameter.yaml` to control what can be done
with image preparation during overcloud deployment.
.. _overcloud-prepare-container-images:
Deploying the containerized Overcloud
-------------------------------------
A containerized overcloud deployment follows all the steps described in the
baremetal :ref:`deploy-the-overcloud` documentation with the exception that it
requires an extra environment file to be added to the ``openstack overcloud
deploy`` command::
-e ~/containers-prepare-parameter.yaml
If deploying with highly available controller nodes, include the
following extra environment file in addition to the above and in place
of the `environments/puppet-pacemaker.yaml` file::
-e /usr/share/openstack-tripleo-heat-templates/environments/docker-ha.yaml
Using TripleO Quickstart
------------------------
.. note:: Please refer to the `TripleO Quickstart`_ docs for more info about
quickstart, the minimum requirements, the setup process and the
available plugins.
The command below will deploy a containerized overcloud on top of a baremetal undercloud::
bash quickstart.sh --config=~/.quickstart/config/general_config/containers_minimal.yml $VIRTHOST
.. _TripleO Quickstart: https://docs.openstack.org/tripleo-quickstart/

View File

@ -1,6 +0,0 @@
:orphan:
Repository Enablement
=====================
.. include:: ../repositories.rst

File diff suppressed because it is too large Load Diff

View File

@ -1,85 +0,0 @@
Deploying with Heat Templates
=============================
It is possible to use the ``--templates`` and ``--environment-file``
options to override specific templates or even deploy using a separate
set of templates entirely.
Deploying an Overcloud using the default templates
--------------------------------------------------
The ``--templates`` option without an argument enables deploying using
the packaged Heat templates::
openstack overcloud deploy --templates
.. note::
The default location for the templates is
`/usr/share/openstack-tripleo-heat-templates`.
.. _override-heat-templates:
Overriding specific templates with local versions
-------------------------------------------------
You may use heat environment files (via the ``--environment-file`` or ``-e``
option), combined with the ``--templates`` option to override specific
templates, e.g to test a bugfix outside of the location of the packaged
templates.
The mapping between heat resource types and the underlying templates can be
found in
`/usr/share/\
openstack-tripleo-heat-templates/overcloud-resource-registry-puppet.j2.yaml`
Here is an example of copying a specific resource template and overriding
so the deployment uses the local version::
mkdir local_templates
cp /usr/share/openstack-tripleo-heat-templates/puppet/controller-puppet.yaml local_templates
cat > override_templates.yaml << EOF
resource_registry:
OS::TripleO::Controller: local_templates/controller-puppet.yaml
EOF
openstack overcloud deploy --templates --environment-file override_templates.yaml
.. note::
The ``--environment-file``/``-e`` option may be specified multiple times,
if duplicate keys are specified in the environment files, the last one
takes precedence.
.. note::
You must also pass the environment files (again using the ``-e`` or
``--environment-file`` option) whenever you make subsequent changes to the
overcloud, such as :doc:`../post_deployment/scale_roles`,
:doc:`../post_deployment/delete_nodes` or
:doc:`../post_deployment/upgrade/minor_update`.
.. _custom-template-location:
Using a custom location for all templates
-----------------------------------------
You may specify a path to the ``--templates`` option, such that the packaged
tree may be copied to another location, which is useful e.g for developer usage
where you wish to check the templates into a revision control system.
.. note::
Use caution when using this approach as you will need to rebase any local
changes on updates to the openstack-tripleo-heat-templates package, and
care will be needed to avoid modifying anything in the tree which the CLI
tools rely on (such as top-level parameters). In many cases using the
:doc:`ExtraConfig <../features/extra_config>` interfaces or specific template overrides
as outlined above may be preferable.
Here is an example of copying the entire tripleo-heat-templates tree to a
local directory and launching a deployment using the new location::
cp -r /usr/share/openstack-tripleo-heat-templates /home/stack/
openstack overcloud deploy --templates /home/stack/openstack-tripleo-heat-templates

View File

@ -1,380 +0,0 @@
Tips and Tricks for containerizing services
===========================================
This document contains a list of tips and tricks that are useful when
containerizing an OpenStack service.
Important Notes
---------------
Podman
------
Prior to Stein, containerized OpenStack deployments used Docker.
Starting with the Stein release, Docker is no longer part of OpenStack,
and Podman has taken its place. The notes here are regarding Stein and later.
Monitoring containers
---------------------
It's often useful to monitor the running containers and see what has been
executed and what not. The puppet containers are created and removed
automatically unless they fail. For all the other containers, it's enough to
monitor the output of the command below::
$ watch -n 0.5 sudo podman ps -a --filter label=managed_by=tripleo_ansible
.. admonition:: Train
:class: stable
::
$ watch -n 0.5 sudo podman ps -a --filter label=managed_by=paunch
.. _debug-containers:
Viewing container logs
----------------------
You can view the output of the main process running in a container by running::
$ sudo podman logs $CONTAINER_ID_OR_NAME
Since the Stein release, standard out and standard error from containers are
captured in `/var/log/containers/stdouts`.
We export traditional logs from containers into the `/var/log/containers`
directory on the host, where you can look at them.
systemd and podman
------------------
Throughout this document you'll find references to direct podman commands
for things like restarting services. These are valid and supported methods,
but it's worth noting that services are tied into the systemd management
system, which is often the preferred way to operate.
Restarting nova_scheduler for example::
$ sudo systemctl restart tripleo_nova_scheduler
Stopping a container with systemd::
$ sudo systemctl stop tripleo_nova_scheduler
.. _toggle_debug:
Toggle debug
------------
For services that support `reloading their configuration at runtime`_::
$ sudo podman exec -u root nova_scheduler crudini --set /etc/nova/nova.conf DEFAULT debug true
$ sudo podman kill -s SIGHUP nova_scheduler
.. _reloading their configuration at runtime: https://storyboard.openstack.org/#!/story/2001545
Restart the container to turn back the configuration to normal::
$ sudo podman restart nova_scheduler
Otherwise, if the service does not yet support reloading its configuration, it
is necessary to change the configuration on the host filesystem and restart the
container::
$ sudo crudini --set /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf DEFAULT debug true
$ sudo podman restart nova_scheduler
Apply the inverse change to restore the default log verbosity::
$ sudo crudini --set /var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf DEFAULT debug false
$ sudo podman restart nova_scheduler
Debugging container failures
----------------------------
The following commands are useful for debugging containers.
* **inspect**: This command allows for inspecting the container's structure and
metadata. It provides info about the bind mounts on the container, the
container's labels, the container's command, etc::
$ sudo podman inspect $CONTAINER_ID_OR_NAME
* **top**: Viewing processes running within a container is trivial with Podman::
$ sudo podman top $CONTAINER_ID_OR_NAME
* **exec**: Running commands on or attaching to a running container is extremely
useful to get a better understanding of what's happening in the container.
It's possible to do so by running the following command::
$ sudo podman exec -ti $CONTAINER_ID_OR_NAME /bin/bash
Replace the `/bin/bash` above with other commands to run oneshot commands. For
example::
$ sudo podman exec -ti mysql mysql -u root -p $PASSWORD
The above will start a mysql shell on the mysql container.
* **export** When the container fails, it's basically impossible to know what
happened. It's possible to get the logs from docker but those will contain
things that were printed on the stdout by the entrypoint. Exporting the
filesystem structure from the container will allow for checking other logs
files that may not be in the mounted volumes::
$ sudo podman export $CONTAINER_ID_OR_NAME -o $CONTAINER_ID_OR_NAME.tar
Debugging with tripleo_container_manage Ansible role
----------------------------------------------------
The debugging manual for tripleo_container_manage is documented in the role_
directly.
.. _role: https://docs.openstack.org/tripleo-ansible/latest/roles/role-tripleo_container_manage.html#debug
Debugging with Paunch
---------------------
.. note:: During Ussuri cycle, Paunch has been replaced by the
tripleo_container_manage Ansible role. Therefore, the following block
is deprecated in favor of the new role which contains a Debug manual.
The ``paunch debug`` command allows you to perform specific actions on a given
container. This can be used to:
* Run a container with a specific configuration.
* Dump the configuration of a given container in either json or yaml.
* Output the docker command line used to start the container.
* Run a container with any configuration additions you wish such that you can
run it with a shell as any user etc.
The configuration options you will likely be interested in include:
::
--file <file> YAML or JSON file containing configuration data
--action <name> Action can be one of: "dump-json", "dump-yaml",
"print-cmd", or "run"
--container <name> Name of the container you wish to manipulate
--interactive Run container in interactive mode - modifies config
and execution of container
--shell Similar to interactive but drops you into a shell
--user <name> Start container as the specified user
--overrides <name> JSON configuration information used to override
default config values
--default-runtime Default runtime for containers. Can be docker or
podman.
``file`` is the name of the configuration file to use
containing the configuration for the container you wish to use.
TripleO creates configuration files for starting containers in
``/var/lib/tripleo-config/container-startup-config``. If you look in this directory
you will see a number of files corresponding with the steps in
TripleO heat templates. Most of the time, you will likely want to use
``/var/lib/tripleo-config/container-startup-config/step_4``
as it contains most of the final startup configurations for the running
containers.
``shell``, ``user`` and ``interactive`` are available as shortcuts that
modify the configuration to easily allow you to run an interactive session
in a given container.
To make sure you get the right container you can use the ``paunch list``
command to see what containers are running and which config id they
are using. This config id corresponds to which file you will find the
container configuration in.
TripleO uses ``managed_by`` and ``config_id`` labels to help identify the
containers it is managing. These can be checked by inspecting the labels section
like so:
::
# podman inspect nova_api | jq '.[0].Config.Labels | "managed_by=\(.managed_by) config_id=\(.config_id)"'
"managed_by=tripleo-Controller config_id=tripleo_step4"
Note that if you wish to replace a currently running container you will
want to ``sudo podman rm -f`` the running container before starting a new one.
Here is an example of using ``paunch debug`` to start a root shell inside the
heat api container:
::
# paunch debug --file /var/lib/tripleo-config/container-startup-config/step_4 --managed-by=tripleo-Controller --config-id=tripleo_step4 --interactive --shell --user root --container nova_api --action run
This will drop you into an interactive session inside the heat api container,
starting /bin/bash running as root.
To see how this container is started by TripleO:
::
# paunch debug --file /var/lib/tripleo-config/container-startup-config/step_4 --managed-by=tripleo-Controller --config-id=tripleo_step4 --container nova_api --action print-cmd
podman run --name nova_api-1jpm5kyv --label config_id=tripleo_step4 --label container_name=nova_api --label managed_by=tripleo-Controller --label config_data={"environment": {"KOLLA_CONFIG_STRATEGY": "COPY_ALWAYS", "TRIPLEO_CONFIG_HASH": "5cbcd2d39667626874f547214d3980ec"}, "healthcheck": {"test": "/openstack/healthcheck"}, "image": "undercloud-0.ctlplane.redhat.local:8787/rh-osbs/rhosp16-openstack-nova-api:16.1_20210726.1", "net": "host", "privileged": false, "restart": "always", "start_order": 2, "user": "root", "volumes": ["/etc/hosts:/etc/hosts:ro", "/etc/localtime:/etc/localtime:ro", "/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro", "/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro", "/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro", "/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro", "/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro", "/dev/log:/dev/log", "/etc/puppet:/etc/puppet:ro", "/var/log/containers/nova:/var/log/nova:z", "/var/log/containers/httpd/nova-api:/var/log/httpd:z", "/var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro", "/var/lib/config-data/puppet-generated/nova:/var/lib/kolla/config_files/src:ro"]} --conmon-pidfile=/var/run/nova_api-1jpm5kyv.pid --detach=true --env=KOLLA_CONFIG_STRATEGY=COPY_ALWAYS --env=TRIPLEO_CONFIG_HASH=5cbcd2d39667626874f547214d3980ec --net=host --privileged=false --user=root --volume=/etc/hosts:/etc/hosts:ro --volume=/etc/localtime:/etc/localtime:ro --volume=/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro --volume=/etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro --volume=/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro --volume=/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro --volume=/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro --volume=/dev/log:/dev/log --volume=/etc/puppet:/etc/puppet:ro --volume=/var/log/containers/nova:/var/log/nova:z --volume=/var/log/containers/httpd/nova-api:/var/log/httpd:z --volume=/var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro --volume=/var/lib/config-data/puppet-generated/nova:/var/lib/kolla/config_files/src:ro undercloud-0.ctlplane.redhat.local:8787/rh-osbs/rhosp16-openstack-nova-api:16.1_20210726.1
You can also dump the configuration of a container to a file so you can
edit it and rerun it with different a different configuration:
::
# paunch debug --file /var/lib/tripleo-config/container-startup-config/step_4 --container nova_api --action dump-json > nova_api.json
You can then use ``nova_api.json`` as your ``--file`` argument after
editing it to your liking.
To add configuration elements on the command line you can use the
``overrides`` option. In this example I'm adding a health check to
the container:
::
# paunch debug --file nova_api.json --overrides '{"health-cmd": "/usr/bin/curl -f http://localhost:8004/v1/", "health-interval": "30s"}' --container nova_api --managed-by=tripleo-Controller --config-id=tripleo_step4 --action run
f47949a7cb205083a3adaa1530fcdd4ed7dcfa9b9afb4639468357b36786ecf0
Debugging container-puppet.py
-----------------------------
The :ref:`container-puppet.py` script manages the config file generation and
puppet tasks for each service. This also exists in the `common` directory
of tripleo-heat-templates. When writing these tasks, it's useful to be
able to run them manually instead of running them as part of the entire
stack. To do so, one can run the script as shown below::
CONFIG=/path/to/task.json /path/to/container-puppet.py
.. note:: Prior to the Train cycle, container-puppet.py was called
docker-puppet.py which was located in the `docker` directory.
The json file must follow the following form::
[
{
"config_image": ...,
"config_volume": ...,
"puppet_tags": ...,
"step_config": ...
}
]
Using a more realistic example. Given a `puppet_config` section like this::
puppet_config:
config_volume: glance_api
puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config
step_config: {get_attr: [GlanceApiPuppetBase, role_data, step_config]}
config_image: {get_param: DockerGlanceApiConfigImage}
Would generated a json file called `/var/lib/container-puppet/container-puppet-tasks2.json` that looks like::
[
{
"config_image": "tripleomaster/centos-binary-glance-api:latest",
"config_volume": "glance_api",
"puppet_tags": "glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config",
"step_config": "include ::tripleo::profile::base::glance::api\n"
}
]
Setting the path to the above json file as the `CONFIG` environment
variable passed to `container-puppet.py` will create a container using
the `centos-binary-glance-api:latest` image and it and run puppet on a
catalog restricted to the given puppet `puppet_tags`.
As mentioned above, it's possible to create custom json files and call
`container-puppet.py` manually, which makes developing and debugging puppet
steps easier.
`container-puppet.py` also supports the environment variable `SHOW_DIFF`,
which causes it to print out a docker diff of the container before and
after the configuration step has occurred.
By default `container-puppet.py` runs things in parallel. This can make
it hard to see the debug output of a given container so there is a
`PROCESS_COUNT` variable that lets you override this. A typical debug
run for container-puppet might look like::
SHOW_DIFF=True PROCESS_COUNT=1 CONFIG=glance_api.json ./container-puppet.py
Testing a code fix in a container
---------------------------------
Let's assume that we need to test a code patch or an updated package in a
container. We will look at a few steps that can be taken to test a fix
in a container on an existing deployment.
For example let's update packages for the mariadb container::
(undercloud) [stack@undercloud ~]$ sudo podman images | grep mariadb
192.168.24.1:8787/tripleomaster/centos-binary-mariadb latest 035a8237c376 2 weeks ago 723.5 MB
So container image `035a8237c376` is the one we need to base our work on. Since
container images are supposed to be immutable we will base our work off of
`035a8237c376` and create a new one::
mkdir -p galera-workaround
cat > galera-workaround/Dockerfile <<EOF
FROM 192.168.24.1:8787/tripleomaster/centos-binary-mariadb:latest
USER root
RUN yum-config-manager --add-repo http://people.redhat.com/mbaldess/rpms/container-repo/pacemaker-bundle.repo && yum clean all && rm -rf /var/cache/yum
RUN yum update -y pacemaker pacemaker-remote pcs libqb resource-agents && yum clean all && rm -rf /var/cache/yum
USER mysql
EOF
To determine which user is the default one being used in a container you can run `docker run -it 035a8237c376 whoami`.
Then we build the new image and tag it with `:workaround1`::
docker build --rm -t 192.168.24.1:8787/tripleomaster/centos-binary-mariadb:workaround1 ~/galera-workaround
Then we push it in our docker registry on the undercloud::
docker push 192.168.24.1:8787/tripleomaster/centos-binary-mariadb:workaround1
At this stage we can either point THT to use
`192.168.24.1:8787/tripleomaster/centos-binary-mariadb:workaround1` as the
container image by tweaking the necessary environment files and we redeploy the overcloud.
If we only want to test a tweaked image, the following steps can be used:
First, determine if the containers are managed by pacemaker (those will typically have a `:pcmklatest` tag) or by paunch.
For the paunch-managed containers see `Debugging with Paunch`_.
For the pacemaker-managed containers you can (best done on your staging env, as it might be an invasive operation) do the following::
1. `pcs cluster cib cib.xml`
2. Edit the cib.xml with the changes around the bundle you are tweaking
3. `pcs cluster cib-push --config cib.xml`
Testing in CI
-------------
When new service containers are added, be sure to update the image names in
`container-images` in the tripleo-common repo. These service
images are pulled in and available in the local docker registry that the
containers ci job uses.
Packages versions in containers
-------------------------------
With the container CI jobs, it can be challenging to find which version of OpenStack runs in the containers.
An easy way to find out is to use the `logs/undercloud/home/zuul/overcloud_containers.yaml.txt.gz` log file and
see which tag was deployed.
For example::
container_images:
- imagename: docker.io/tripleomaster/centos-binary-ceilometer-central:ac82ea9271a4ae3860528eaf8a813da7209e62a6_28eeb6c7
push_destination: 192.168.24.1:8787
So we know the tag is `ac82ea9271a4ae3860528eaf8a813da7209e62a6_28eeb6c7`.
The tag is actually a Delorean hash. You can find out the versions
of packages by using this tag.
For example, `ac82ea9271a4ae3860528eaf8a813da7209e62a6_28eeb6c7` tag,
is in fact using this `Delorean repository`_.
.. _Delorean repository: https://trunk.rdoproject.org/centos7-master/ac/82/ac82ea9271a4ae3860528eaf8a813da7209e62a6_28eeb6c7/

View File

@ -1,31 +0,0 @@
Containers based Undercloud Deployment
======================================
The requirements for a containerized undercloud are the same as for any other
undercloud deployment. The real difference is in where the undercloud services
will be deployed (containers vs base OS).
The undercloud architecture based on Moby_ (also Podman_ as of Stein) containers
is not very different from the baremetal/VM based one. The services deployed in
the traditional baremetal undercloud are also deployed in the containers based
one.
.. _Moby: https://mobyproject.org/
.. _Podman: https://podman.io/
One obvious difference between these two types of deployments is that the
openstack services are deployed as containers in a container runtime rather than
directly on the host operating system. This reduces the required packages in
the host to the bare minimum for running the container runtime and managing the
base network layer.
.. note:: Check the :doc:`install_undercloud` and :doc:`../post_deployment/upgrade/undercloud`
sections for deploying and upgrading a containerized undercloud.
.. note:: Check the :ref:`debug-containers` section for more tips and tricks for
debugging containers.
.. note:: Check our "Deep Dive" video_ which explain the architecture backgrounds and changes
as well as some demos and Q/A.
.. _video: https://www.youtube.com/watch?v=lv233gPynwk

View File

@ -1,14 +0,0 @@
Uploading a Single Image
========================
After a new image is built, it can be uploaded using the same command as
before, with the ``--update-existing`` flag added::
openstack overcloud image upload --update-existing
Note that if the new image is a ramdisk, the Ironic nodes need to be
re-configured to use it. This can be done by re-running::
openstack overcloud node configure --all-manageable
Now the new image should be fully ready for use by new deployments.

View File

@ -1,399 +0,0 @@
Baremetal Environment
---------------------
|project| can be used in an all baremetal environment. One machine will be
used for Undercloud, the others will be used for your Overcloud.
Minimum System Requirements
^^^^^^^^^^^^^^^^^^^^^^^^^^^
To deploy a minimal TripleO cloud with |project| you need the following baremetal
machines:
* 1 Undercloud
* 1 Overcloud Controller
* 1 Overcloud Compute
For each additional Overcloud role, such as Block Storage or Object Storage,
you need an additional baremetal machine.
..
<REMOVE WHEN HA IS AVAILABLE>
For minimal **HA (high availability)** deployment you need at least 3 Overcloud
Controller machines and 2 Overcloud Compute machines.
The baremetal machines must meet the following minimum specifications:
* 8 core CPU
* 12 GB memory
* 60 GB free disk space
Larger systems are recommended for production deployments, however.
For instance, the undercloud needs a bit more capacity, especially regarding RAM (minimum of 16G is advised)
and is pretty intense for the I/O - fast disks (SSD, SAS) are strongly advised.
Please also note the undercloud needs space in order to store twice the "overcloud-full" image (one time
in its glance, one time in /var/lib subdirectories for PXE/TFTP).
TripleO is supporting only the following operating systems:
* RHEL 9 (x86_64)
* CentOS Stream 9 (x86_64)
Please also ensure your node clock is set to UTC in order to prevent any issue
when the OS hwclock syncs to the BIOS clock before applying timezone offset,
causing files to have a future-dated timestamp.
Preparing the Baremetal Environment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Networking
^^^^^^^^^^
The overcloud nodes will be deployed from the undercloud machine and therefore the machines need to have their network settings modified to allow for the overcloud nodes to be PXE booted using the undercloud machine. As such, the setup requires that:
* All overcloud machines in the setup must support IPMI
* A management provisioning network is setup for all of the overcloud machines.
One NIC from every machine needs to be in the same broadcast domain of the
provisioning network. In the tested environment, this required setting up a new
VLAN on the switch. Note that you should use the same NIC on each of the
overcloud machines ( for example: use the second NIC on each overcloud
machine). This is because during installation we will need to refer to that NIC
using a single name across all overcloud machines e.g. em2
* The provisioning network NIC should not be the same NIC that you are using
for remote connectivity to the undercloud machine. During the undercloud
installation, a openvswitch bridge will be created for Neutron and the
provisioning NIC will be bridged to the openvswitch bridge. As such,
connectivity would be lost if the provisioning NIC was also used for remote
connectivity to the undercloud machine.
* The overcloud machines can PXE boot off the NIC that is on the private VLAN.
In the tested environment, this required disabling network booting in the BIOS
for all NICs other than the one we wanted to boot and then ensuring that the
chosen NIC is at the top of the boot order (ahead of the local hard disk drive
and CD/DVD drives).
* For each overcloud machine you have: the MAC address of the NIC that will PXE
boot on the provisioning network the IPMI information for the machine (i.e. IP
address of the IPMI NIC, IPMI username and password)
Refer to the following diagram for more information
.. image:: ../_images/TripleO_Network_Diagram_.jpg
Setting Up The Undercloud Machine
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#. Select a machine within the baremetal environment on which to install the
undercloud.
#. Install RHEL 9 x86_64 or CentOS Stream 9 x86_64 on this machine.
#. If needed, create a non-root user with sudo access to use for installing the
Undercloud::
sudo useradd stack
sudo passwd stack # specify a password
echo "stack ALL=(root) NOPASSWD:ALL" | sudo tee -a /etc/sudoers.d/stack
sudo chmod 0440 /etc/sudoers.d/stack
.. admonition:: RHEL
:class: rhel
If using RHEL, register the Undercloud for package installations/updates.
.. admonition:: RHEL Portal Registration
:class: portal
Register the host machine using Subscription Management::
sudo subscription-manager register --username="[your username]" --password="[your password]"
# Find this with `subscription-manager list --available`
sudo subscription-manager attach --pool="[pool id]"
# Verify repositories are available
sudo subscription-manager repos --list
# Enable repositories needed
sudo subscription-manager repos \
--enable=rhel-8-for-x86_64-baseos-eus-rpms \
--enable=rhel-8-for-x86_64-appstream-eus-rpms \
--enable=rhel-8-for-x86_64-highavailability-eus-rpms \
--enable=ansible-2.9-for-rhel-8-x86_64-rpms
.. admonition:: RHEL Satellite Registration
:class: satellite
To register the host machine to a Satellite, the following repos must
be synchronized on the Satellite and enabled for registered systems::
rhel-8-for-x86_64-baseos-eus-rpms
rhel-8-for-x86_64-appstream-eus-rpms
rhel-8-for-x86_64-highavailability-eus-rpms
ansible-2.9-for-rhel-8-x86_64-rpms
See the `Red Hat Satellite User Guide`_ for how to configure the system to
register with a Satellite server. It is suggested to use an activation
key that automatically enables the above repos for registered systems.
.. _Red Hat Satellite User Guide: https://access.redhat.com/documentation/en-US/Red_Hat_Satellite/
Validations
^^^^^^^^^^^
You can run the ``prep`` validations to verify the hardware. Later in
the process, the validations will be run by the undercloud processes.
Refer to the Ansible section for running directly the validations
over baremetal nodes `validations_no_undercloud`_.
Configuration Files
^^^^^^^^^^^^^^^^^^^
.. _instackenv:
instackenv.json
^^^^^^^^^^^^^^^
Create a JSON file describing your Overcloud baremetal nodes, call it
``instackenv.json`` and place in your home directory. The file should contain
a JSON object with the only field ``nodes`` containing list of node
descriptions.
Each node description should contains required fields:
* ``pm_type`` - driver for Ironic nodes, see `Ironic Hardware Types`_
for details
* ``pm_addr`` - node BMC IP address (hypervisor address in case of virtual
environment)
* ``pm_user``, ``pm_password`` - node BMC credentials
Some fields are optional if you're going to use introspection later:
* ``ports`` - list of baremetal port objects, a map specifying the following
keys: address, physical_network (optional) and local_link_connection
(optional). Optional for bare metal. Example::
"ports": [
{
"address": "52:54:00:87:c8:2f",
"physical_network": "physical-network",
"local_link_connection": {
"switch_info": "switch",
"port_id": "gi1/0/11",
"switch_id": "a6:18:66:33:cb:48"
}
}
]
* ``cpu`` - number of CPU's in system
* ``arch`` - CPU architecture (common values are ``i386`` and ``x86_64``)
* ``memory`` - memory size in MiB
* ``disk`` - hard driver size in GiB
It is also possible (but optional) to set Ironic node capabilities directly
in the JSON file. This can be useful for assigning node profiles or setting
boot options at registration time:
* ``capabilities`` - Ironic node capabilities. For example::
"capabilities": "profile:compute,boot_option:local"
There are also two additional and optional fields that can be used to help a
user identifying machines inside ``instackenv.json`` file:
* ``name`` - name associated to the node, it will appear in the ``Name``
column while listing nodes
* ``_comment`` to associate a comment to the node (like position, long
description and so on). Note that this field will not be considered by
Ironic during the import
Also if you're working in a diverse environment with multiple architectures
and/or platforms within an architecture you may find it necessary to include a
platform field:
* ``platform`` - String paired with images to fine tune image selection
For example::
{
"nodes": [
{
"name": "node-a",
"pm_type": "ipmi",
"ports": [
{
"address": "fa:16:3e:2a:0e:36",
"physical_network": "ctlplane"
}
],
"cpu": "2",
"memory": "4096",
"disk": "40",
"arch": "x86_64",
"pm_user": "admin",
"pm_password": "password",
"pm_addr": "10.0.0.8",
"_comment": "Room 1 - Rack A - Unit 22/24"
},
{
"name": "node-b",
"pm_type": "ipmi",
"ports": [
{
"address": "fa:16:3e:da:39:c9",
"physical_network": "ctlplane"
}
],
"cpu": "2",
"memory": "4096",
"disk": "40",
"arch": "x86_64",
"pm_user": "admin",
"pm_password": "password",
"pm_addr": "10.0.0.15",
"_comment": "Room 1 - Rack A - Unit 26/28"
},
{
"name": "node-n",
"pm_type": "ipmi",
"ports": [
{
"address": "fa:16:3e:51:9b:68",
"physical_network": "leaf1"
}
],
"cpu": "2",
"memory": "4096",
"disk": "40",
"arch": "x86_64",
"pm_user": "admin",
"pm_password": "password",
"pm_addr": "10.0.0.16",
"_comment": "Room 1 - Rack B - Unit 10/12"
}
]
}
.. note::
You don't need to create this file, if you plan on using
:doc:`../provisioning/node_discovery`.
Ironic Hardware Types
^^^^^^^^^^^^^^^^^^^^^
Ironic *hardware types* provide various level of support for different
hardware. Hardware types, introduced in the Ocata cycle, are a new generation
of Ironic *drivers*. Previously, the word *drivers* was used to refer to what
is now called *classic drivers*. See `Ironic drivers documentation`_ for a full
explanation of similarities and differences between the two types.
Hardware types are enabled in the ``undercloud.conf`` using the
``enabled_hardware_types`` configuration option. Classic drivers are enabled
using the ``enabled_drivers`` option. It has been deprecated since the Queens
release and should no longer be used. See the `hardware types migration guide`_
for information on how to migrate existing nodes.
Both hardware types and classic drivers can be equally used in the
``pm_addr`` field of the ``instackenv.json``.
See https://docs.openstack.org/ironic/latest/admin/drivers.html for the most
up-to-date information about Ironic hardware types and hardware
interfaces, but note that this page always targets Ironic git master, not the
release we use.
Generic Hardware Types
~~~~~~~~~~~~~~~~~~~~~~~
* This most generic hardware type is ipmi_. It uses the `ipmitool`_ utility
to manage a bare metal node, and supports a vast variety of hardware.
.. admonition:: Stable Branch
:class: stable
This hardware type is supported starting with the Pike release. For older
releases use the functionally equivalent ``pxe_ipmitool`` driver.
.. admonition:: Virtual
:class: virtual
This hardware type can be used for developing and testing TripleO in a
:doc:`virtual` as well.
* Another generic hardware type is redfish_. It provides support for the
quite new `Redfish standard`_, which aims to replace IPMI eventually as
a generic protocol for managing hardware. In addition to the ``pm_*`` fields
mentioned above, this hardware type also requires setting ``pm_system_id``
to the full identifier of the node in the controller (e.g.
``/redfish/v1/Systems/42``).
.. admonition:: Stable Branch
:class: stable
Redfish support was introduced in the Pike release.
The following generic hardware types are not enabled by default:
* The snmp_ hardware type supports controlling PDUs for power management.
It requires boot device to be manually configured on the nodes.
* Finally, the ``manual-management`` hardware type (not enabled by default)
skips power and boot device management completely. It requires manual power
and boot operations to be done at the right moments, so it's not recommended
for a generic production.
.. admonition:: Stable Branch
:class: stable
The functional analog of this hardware type before the Queens release
was the ``fake_pxe`` driver.
Vendor Hardware Types
~~~~~~~~~~~~~~~~~~~~~
TripleO also supports vendor-specific hardware types for some types
of hardware:
* ilo_ targets HPE Proliant Gen 8 and Gen 9 systems.
.. admonition:: Stable Branch
:class: stable
Use the ``pxe_ilo`` classic driver before the Queens release.
* idrac_ targets DELL 12G and newer systems.
.. admonition:: Stable Branch
:class: stable
Use the ``pxe_drac`` classic driver before the Queens release.
The following hardware types are supported but not enabled by default:
* irmc_ targets FUJITSU PRIMERGY servers.
* cisco-ucs-managed_ targets UCS Manager managed Cisco UCS B/C series servers.
* cisco-ucs-standalone_ targets standalone Cisco UCS C series servers.
.. note::
Contact a specific vendor team if you have problems with any of these
drivers, as the TripleO team often cannot assist with them.
.. _Ironic drivers documentation: https://docs.openstack.org/ironic/latest/install/enabling-drivers.html
.. _hardware types migration guide: https://docs.openstack.org/ironic/latest/admin/upgrade-to-hardware-types.html
.. _ipmitool: http://sourceforge.net/projects/ipmitool/
.. _Redfish standard: https://www.dmtf.org/standards/redfish
.. _ipmi: https://docs.openstack.org/ironic/latest/admin/drivers/ipmitool.html
.. _redfish: https://docs.openstack.org/ironic/latest/admin/drivers/redfish.html
.. _snmp: https://docs.openstack.org/ironic/latest/admin/drivers/snmp.html
.. _ilo: https://docs.openstack.org/ironic/latest/admin/drivers/ilo.html
.. _idrac: https://docs.openstack.org/ironic/latest/admin/drivers/idrac.html
.. _irmc: https://docs.openstack.org/ironic/latest/admin/drivers/irmc.html
.. _cisco-ucs-managed: https://docs.openstack.org/ironic/latest/admin/drivers/ucs.html
.. _cisco-ucs-standalone: https://docs.openstack.org/ironic/latest/admin/drivers/cimc.html
.. _validations_no_undercloud: ../../validations/ansible.html

View File

@ -1,12 +0,0 @@
Environment Setup
=================
|project| can be used in baremetal as well as in virtual environments. This
section contains instructions on how to setup your environments properly.
.. toctree::
:maxdepth: 2
standalone
virtual
baremetal

View File

@ -1,12 +0,0 @@
Standalone Environment
----------------------
.. include_after_header
|project| can be used as a standalone environment with all services installed
on a single virtual or baremetal machine.
The machine you are deploying on must meet the following minimum specifications:
* 4 core CPU
* 8 GB memory
* 60 GB free disk space

View File

@ -1,14 +0,0 @@
Virtual Environment
-------------------
|project| can be used in a virtual environment using virtual machines instead
of actual baremetal. However, one baremetal machine is still
needed to act as the host for the virtual machines.
.. warning:: Virtual deployments with TripleO are for development and testing
purposes only. This method cannot be used for production-ready
deployments.
The tripleo-quickstart project is used for creating virtual environments
for use with TripleO. Please see that documentation at
https://docs.openstack.org/tripleo-quickstart/

View File

@ -1,28 +0,0 @@
Configuring API access policies
===============================
Each OpenStack service, has its own role-based access policies.
They determine which user can access which resources in which way,
and are defined in the services policy.json file.
.. Warning::
While editing policy.json is supported, modifying the policy can
have unexpected side effects and is not encouraged.
|project| supports custom API access policies through parameters in
TripleO Heat Templates.
To enable this feature, you need to use some parameters to enable
the custom policies on the services you want.
Creating an environment file and adding the following arguments to your
``openstack overcloud deploy`` command will do the trick::
$ cat ~/nova-policies.yaml
parameter_defaults:
NovaApiPolicies: { nova-context_is_admin: { key: 'compute:get_all', value: '' } }
-e nova-policies.yaml
In this example, we allow anyone to list Nova instances, which is very insecure but
can be done with this feature.

View File

@ -1,16 +0,0 @@
Backend Configuration
=====================
Documentation on how to enable and configure various backends available for
OpenStack projects.
.. toctree::
deploy_manila
cinder_custom_backend
cinder_netapp
deployed_ceph
ceph_external
domain_specific_ldap_backends
swift_external

File diff suppressed because it is too large Load Diff

View File

@ -1,394 +0,0 @@
Use an external Ceph cluster with the Overcloud
===============================================
|project| supports use of an external Ceph cluster for certain services deployed
in the Overcloud.
Deploying Cinder, Glance, Nova, Gnocchi with an external Ceph RBD service
-------------------------------------------------------------------------
The overcloud may be configured to use an external Ceph RBD service by
enabling a particular environment file when deploying the
Overcloud. For Wallaby and newer include
`environments/external-ceph.yaml`.
For Ocata and earlier use
`environments/puppet-ceph-external.yaml`. For Pike through Victoria
use `environments/ceph-ansible/ceph-ansible-external.yaml` and install
ceph-ansible on the Undercloud as described in
:doc:`../deployment/index`. For Pike through Victoria a Ceph container
is downloaded and executed on Overcloud nodes to use Ceph binaries
only available within the container. These binaries are used to create
the CephX client keyrings on the overcloud. Thus, between Pike and
Victoria it was necessary when preparing to deploy a containerized
overcloud, as described in
:doc:`../deployment/container_image_prepare`, to include the Ceph
container even if that overcloud will only connect to an external Ceph
cluster. Starting in Wallaby neither ceph-ansible or cephadm configure
Ceph clients and instead the tripleo-ansible role tripleo_ceph_client
is used. Thus, it is not necessary to install ceph-ansible nor prepare
a Ceph container when configuring external Ceph in Wallaby and
newer. Simply include `environments/external-ceph.yaml` in the
deployment. All parameters described below remain consistent
regardless of external Ceph configuration method.
Some of the parameters in the above environment files can be overridden::
parameter_defaults:
# Enable use of RBD backend in nova-compute
NovaEnableRbdBackend: true
# Enable use of RBD backend in cinder-volume
CinderEnableRbdBackend: true
# Backend to use for cinder-backup
CinderBackupBackend: ceph
# Backend to use for glance
GlanceBackend: rbd
# Backend to use for gnocchi-metricsd
GnocchiBackend: rbd
# Name of the Ceph pool hosting Nova ephemeral images
NovaRbdPoolName: vms
# Name of the Ceph pool hosting Cinder volumes
CinderRbdPoolName: volumes
# Name of the Ceph pool hosting Cinder backups
CinderBackupRbdPoolName: backups
# Name of the Ceph pool hosting Glance images
GlanceRbdPoolName: images
# Name of the Ceph pool hosting Gnocchi metrics
GnocchiRbdPoolName: metrics
# Name of the user to authenticate with the external Ceph cluster
CephClientUserName: openstack
The pools and the CephX user **must** be created on the external Ceph cluster
before deploying the Overcloud. TripleO expects a single user, configured via
CephClientUserName, to have the capabilities to use all the OpenStack pools;
the user could be created with a command like this::
ceph auth add client.openstack mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rwx pool=images, allow rwx pool=backups, allow rwx pool=metrics'
In addition to the above customizations, the deployer **needs** to provide
at least three required parameters related to the external Ceph cluster::
parameter_defaults:
# The cluster FSID
CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
# The CephX user auth key
CephClientKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
# The list of Ceph monitors
CephExternalMonHost: '172.16.1.7, 172.16.1.8, 172.16.1.9'
The above parameters will result in TripleO creating a Ceph
configuration file and cephx keyring in /etc/ceph on every
node which needs to connect to Ceph to use the RBD service.
Configuring Ceph Clients for Multiple External Ceph RBD Services
----------------------------------------------------------------
In Train and newer it's possible to use TripleO to deploy an
overcloud which is capable of using the RBD services of multiple
external Ceph clusters. A separate keyring and Ceph configuration file
is created for each external Ceph cluster in /etc/ceph on every
overcloud node which needs to connect to Ceph. This functionality is
provided by the `CephExternalMultiConfig` parameter.
Do not use `CephExternalMultiConfig` when configuring an overcloud to
use only one external Ceph cluster. Instead follow the example in the
previous section. The example in the previous section and the method
of deploying an internal Ceph cluster documented in
:doc:`deployed_ceph` are mutually exclusive per Heat stack. The
following scenarios are the only supported ones in which
`CephExternalMultiConfig` may be used per Heat stack:
* One external Ceph cluster configured, as described in previous
section, in addition to multiple external Ceph clusters configured
via `CephExternalMultiConfig`.
* One internal Ceph cluster, as described in :doc:`deployed_ceph` in
addition to multiple external ceph clusters configured via
`CephExternalMultiConfig`.
The `CephExternalMultiConfig` parameter is used like this::
CephExternalMultiConfig:
- cluster: 'ceph2'
fsid: 'af25554b-42f6-4d2b-9b9b-d08a1132d3e8'
external_cluster_mon_ips: '172.18.0.5,172.18.0.6,172.18.0.7'
keys:
- name: "client.openstack"
caps:
mgr: "allow *"
mon: "profile rbd"
osd: "profile rbd pool=volumes, profile rbd pool=backups, profile rbd pool=vms, profile rbd pool=images"
key: "AQCwmeRcAAAAABAA6SQU/bGqFjlfLro5KxrB1Q=="
mode: "0600"
dashboard_enabled: false
- cluster: 'ceph3'
fsid: 'e2cba068-5f14-4b0f-b047-acf375c0004a'
external_cluster_mon_ips: '172.18.0.8,172.18.0.9,172.18.0.10'
keys:
- name: "client.openstack"
caps:
mgr: "allow *"
mon: "profile rbd"
osd: "profile rbd pool=volumes, profile rbd pool=backups, profile rbd pool=vms, profile rbd pool=images"
key: "AQCwmeRcAAAAABAA6SQU/bGqFjlfLro5KxrB2Q=="
mode: "0600"
dashboard_enabled: false
The above, in addition to the parameters from the previous section,
will result in an overcloud with the following files in /etc/ceph:
* ceph.client.openstack.keyring
* ceph.conf
* ceph2.client.openstack.keyring
* ceph2.conf
* ceph3.client.openstack.keyring
* ceph3.conf
The first two files which start with `ceph` will be created based on
the parameters discussed in the previous section. The next two files
which start with `ceph2` will be created based on the parameters from
the first list item within the `CephExternalMultiConfig` parameter
(e.g. `cluster: ceph2`). The last two files which start with `ceph3`
will be created based on the parameters from the last list item within
the `CephExternalMultiConfig` parameter (e.g. `cluster: ceph3`).
The last four files in the list which start with `ceph2` or `ceph3`
will also contain parameters found in the first two files which
start with `ceph` except where those parameters intersect. When
there's an intersection those parameters will be overridden with the
values from the `CephExternalMultiConfig` parameter. For example there
will only be one FSID in each Ceph configuration file with the
following values per file:
* ceph.conf will have `fsid = 4b5c8c0a-ff60-454b-a1b4-9747aa737d19`
(as seen in the previous section)
* ceph2.conf will have `fsid = af25554b-42f6-4d2b-9b9b-d08a1132d3e8`
* ceph3.conf will have `fsid = e2cba068-5f14-4b0f-b047-acf375c0004a`
However, if the `external_cluster_mon_ips` key was not set within
the `CephExternalMultiConfig` parameter, then all three Ceph
configuration files would contain `mon host = 172.16.1.7, 172.16.1.8,
172.16.1.9`, as seen in the previous section. Thus, it is necessary to
override the `external_cluster_mon_ips` key within each list item of
the `CephExternalMultiConfig` parameter because each external Ceph
cluster will have its own set of unique monitor IPs.
The `CephExternalMultiConfig` and `external_cluster_mon_ips` keys map
one to one but have different names because each element of the
`CephExternalMultiConfig` list should contain a map of keys and values
directly supported by ceph-ansible. See `ceph-ansible/group_vars`_ for
an example of all possible keys.
The following parameters are the minimum necessary to configure an
overcloud to connect to an external ceph cluster:
* cluster: The name of the configuration file and key name prefix.
This name defaults to "ceph" so if this parameter is not overridden
there will be a name collision. It is not relevant if the
external ceph cluster's name is already "ceph". For client role
configuration this parameter is only used for setting a unique name
for the configuration and key files.
* fsid: The FSID of the external ceph cluster.
* external_cluster_mon_ips: The list of monitor IPs of the external
ceph cluster as a single string where each IP is comma delimited.
If the external Ceph cluster is using both the v1 and v2 MSGR
protocol this value may look like '[v2:10.0.0.1:3300,
v1:10.0.0.1:6789], [v2:10.0.0.2:3300, v1:10.0.0.2:6789],
[v2:10.0.0.3:3300, v1:10.0.0.3:6789]'.
* dashboard_enabled: Always set this value to false when using
`CephExternalMultiConfig`. It ensures that the Ceph Dashboard is not
installed. It is not supported to use ceph-ansible dashboard roles
to communicate with an external Ceph cluster so not passing this
parameter with a value of false within `CephExternalMultiConfig`
will result in a failed deployment because the default value of true
will be used.
* keys: This is a list of maps where each map defines CephX keys which
OpenStack clients will use to connect to an external Ceph cluster.
As stated in the previous section, the pools and the CephX user must
be created on the external Ceph cluster before deploying the
overcloud. The format of each map is the same as found in
ceph-ansible. Thus, if the external Ceph cluster was deployed by
ceph-ansible, then the deployer of that cluster could share that map
with the TripleO deployer so that it could be used as a list item of
`CephExternalMultiConfig`. Similarly, the `CephExtraKeys` parameter,
described in the :doc:`deployed_ceph` documentation, has the same
syntax.
Deploying Manila with an External CephFS Service
------------------------------------------------
If choosing to configure Manila with Ganesha as NFS gateway for CephFS,
with an external Ceph cluster, then add `environments/manila-cephfsganesha-config.yaml`
to the list of environment files used to deploy the overcloud and also
configure the following parameters::
parameter_defaults:
ManilaCephFSDataPoolName: manila_data
ManilaCephFSMetadataPoolName: manila_metadata
ManilaCephFSCephFSAuthId: 'manila'
CephManilaClientKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
Which represent the data and metadata pools in use by the MDS for
the CephFS filesystems, the CephX keyring to use and its secret.
Like for the other services, the pools and keyring must be created on the
external Ceph cluster before attempting the deployment of the overcloud.
The keyring should look like the following::
ceph auth add client.manila mgr "allow *" mon "allow r, allow command 'auth del', allow command 'auth caps', allow command 'auth get', allow command 'auth get-or-create'" mds "allow *" osd "allow rw"
Compatibility Options
---------------------
As of the Train release TripleO will install Ceph Nautilus. If the
external Ceph cluster uses the Hammer release instead, pass the
following parameters to enable backward compatibility features::
parameter_defaults:
ExtraConfig:
ceph::profile::params::rbd_default_features: '1'
Deployment of an Overcloud with External Ceph
---------------------------------------------
Finally add the above environment files to the deploy commandline. For
Wallaby and newer use::
openstack overcloud deploy --templates -e /usr/share/openstack-tripleo-heat-templates/environments/external-ceph.yaml -e ~/my-additional-ceph-settings.yaml
For Train use::
openstack overcloud deploy --templates -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-ansible-external.yaml -e ~/my-additional-ceph-settings.yaml
Standalone Ansible Roles for External Ceph
------------------------------------------
To configure an overcloud to use an external Ceph cluster, a directory
(e.g. /etc/ceph) in the overcloud containers should be populated with
Ceph configuration files and overcloud services (e.g. Nova) should be
configured to use those files. Tripleo provides Ansible roles to do
this standalone without tripleo-heat-templates or config-download.
Single Ceph Cluster
^^^^^^^^^^^^^^^^^^^
The `tripleo_ceph_client_files` Ansible role copies files from a
source directory (`tripleo_ceph_client_files_source`) on the host
where Ansible is run to a destination directory
(`tripleo_ceph_client_config_home`) on the overcloud nodes.
The user must create and populate the
`tripleo_ceph_client_files_source` directory with actual Ceph
configuration and cephx key files before running the role. For
example::
$ ls -l /home/stack/ceph_files/
total 16
-rw-r--r--. 1 stack stack 245 Nov 14 13:40 ceph.client.openstack.keyring
-rw-r--r--. 1 stack stack 173 Nov 14 13:40 ceph.conf
If the above directory exists on the host where the `ansible-playbook`
command is run, then the `tripleo_ceph_client_files_source` parameter
should be set to `/home/stack/ceph_files/`. The optional parameter
`tripleo_ceph_client_config_home` defaults to
`/var/lib/tripleo-config/ceph` since OpenStack containers will bind
mount this directory to `/etc/ceph`. The `tripleo_nova_libvirt`
Ansible role will add a secret key to libvirt so that it uses the
cephx key put in place by the `tripleo_ceph_client_files` role; it
does this if either `tripleo_nova_libvirt_enable_rbd_backend` or
`tripleo_cinder_enable_rbd_backend` are true. When these roles
are used to configure a compute node the following `group_vars` should
be set::
tripleo_ceph_client_files_source: /home/stack/ceph_files
tripleo_ceph_client_config_home: /var/lib/tripleo-config/ceph
tripleo_nova_libvirt_enable_rbd_backend: true
tripleo_cinder_enable_rbd_backend: true
The `tripleo_ceph_client_files` role may then be included in a
playbook as follows in order to configure a standalone compute node to
use a single Ceph cluster::
- name: configure ceph client
import_role:
name: tripleo_ceph_client_files
In order for Nova to use the Ceph cluster, the `libvirt` section of
the `nova.conf` file should be configured. The `tripleo_nova_compute`
role `tripleo_nova_compute_config_overrides` variable may be set as
follows in the inventory to set the `libvirt` values along with
others::
Compute:
vars:
tripleo_nova_compute_config_overrides:
libvirt:
images_rbd_ceph_conf: /etc/ceph/ceph.conf
images_rbd_glance_copy_poll_interval: '15'
images_rbd_glance_copy_timeout: '600'
images_rbd_glance_store_name: default_backend
images_rbd_pool: vms
images_type: rbd
rbd_secret_uuid: 604c9994-1d82-11ed-8ae5-5254003d6107
rbd_user: openstack
TripleO's convention is to set the `rbd_secret_uuid` to the FSID of
the Ceph cluster. The FSID should be in the ceph.conf file. The
`tripleo_nova_libvirt` role will use `virsh secret-*` commands so that
libvirt can retrieve the cephx secret using the FSID as a key. This
can be confirmed after running Ansible with `podman exec
nova_virtsecretd virsh secret-get-value $FSID`.
The `tripleo_ceph_client_files` role only supports the _configure_
aspect of the standalone tripleo-ansible roles because it just
configures one or more pairs of files on its target nodes. Thus, the
`import_role` example above could be placed in a playbook file like
`deploy-tripleo-openstack-configure.yml`, before the roles for
`tripleo_nova_libvirt` and `tripleo_nova_compute` are imported.
Multiple Ceph Clusters
^^^^^^^^^^^^^^^^^^^^^^
To configure more than one Ceph backend include the
`tripleo_ceph_client_files` role from the single cluster example
above. Populate the `tripleo_ceph_client_files_source` directory with
all of the ceph configuration and cephx key files For example::
$ ls -l /home/stack/ceph_files/
total 16
-rw-r--r--. 1 stack stack 213 Nov 14 13:41 ceph2.client.openstack.keyring
-rw-r--r--. 1 stack stack 228 Nov 14 13:41 ceph2.conf
-rw-r--r--. 1 stack stack 245 Nov 14 13:40 ceph.client.openstack.keyring
-rw-r--r--. 1 stack stack 173 Nov 14 13:40 ceph.conf
For multiple Ceph clusters, the `tripleo_nova_libvirt` role expects a
`tripleo_cinder_rbd_multi_config` Ansible variable like this::
tripleo_cinder_rbd_multi_config:
ceph2:
CephClusterName: ceph2
CephClientUserName: openstack
It is not necessary to put the default Ceph cluster (named "ceph" from
the single node example) in `tripleo_cinder_rbd_multi_config`. Only
the additional clusters (e.g. ceph2) and name their keys so they
match the `CephClusterName`. In the above example, the
`CephClusterName` value "ceph2" matches the "ceph2.conf" and
"ceph2.client.openstack.keyring". Also, the `CephClientUserName` value
"openstack" matches "ceph2.client.openstack.keyring". The
`tripleo_nova_libvirt` Ansible role uses the
`tripleo_cinder_rbd_multi_config` map as a guide to know which libvirt
secrets to create and which cephx keys to make available within the
Nova containers.
If the combined examples above from the single cluster section for
the primary cluster "ceph" and this section for the seconary Ceph
cluster "ceph2" are used, then the directory defined by
`tripleo_ceph_client_config_home` will be populated with four files:
`ceph.conf`, `ceph2.conf`, `ceph.client.openstack.keyring` and
`ceph2.client.openstack.keyring`, which will be mounted into the Nova
containers and two libvirt secrets will be created for each cephx
key. To add more Ceph clusters, extend the list
`tripleo_cinder_rbd_multi_config` and populate
`tripleo_ceph_client_files_source` with additional files.
.. _`ceph-ansible/group_vars`: https://github.com/ceph/ceph-ansible/tree/master/group_vars

View File

@ -1,69 +0,0 @@
Configuring Cinder with a Custom Unmanaged Backend
==================================================
This guide assumes that your undercloud is already installed and ready to
deploy an overcloud.
Adding a custom backend to Cinder
---------------------------------
It is possible to provide the config settings to add an arbitrary and
unmanaged backend to Cinder at deployment time via Heat environment files.
Each backend is represented in `cinder.conf` with a ``stanza`` and a
reference to it from the `enabled_backends` key. The keys valid in the
backend ``stanza`` are dependent on the actual backend driver and
unknown to Cinder.
For example, to provision in Cinder two additional backends one could
create a Heat environment file with the following contents::
parameter_defaults:
ExtraConfig:
cinder::config::cinder_config:
netapp1/volume_driver:
value: cinder.volume.drivers.netapp.common.NetAppDriver
netapp1/netapp_storage_family:
value: ontap_7mode
netapp1/netapp_storage_protocol:
value: iscsi
netapp1/netapp_server_hostname:
value: 1.1.1.1
netapp1/netapp_server_port:
value: 80
netapp1/netapp_login:
value: root
netapp1/netapp_password:
value: 123456
netapp1/volume_backend_name:
value: netapp_1
netapp2/volume_driver:
value: cinder.volume.drivers.netapp.common.NetAppDriver
netapp2/netapp_storage_family:
value: ontap_7mode
netapp2/netapp_storage_protocol:
value: iscsi
netapp2/netapp_server_hostname:
value: 2.2.2.2
netapp2/netapp_server_port:
value: 80
netapp2/netapp_login:
value: root
netapp2/netapp_password:
value: 123456
netapp2/volume_backend_name:
value: netapp_2
cinder_user_enabled_backends: ['netapp1','netapp2']
This will not interfere with the deployment of the other backends managed by
TripleO, like Ceph or NFS and will just add these two to the list of the
backends enabled in Cinder.
Remember to add such an environment file to the deploy commandline::
openstack overcloud deploy [other overcloud deploy options] -e ~/my-backends.yaml
.. note::
The :doc:`extra_config` doc has more details on the usage of the different
ExtraConfig interfaces.

View File

@ -1,60 +0,0 @@
Configuring Cinder with a NetApp Backend
========================================
This guide assumes that your undercloud is already installed and ready to
deploy an overcloud.
Deploying the Overcloud
-----------------------
.. note::
The :doc:`../deployment/template_deploy` doc has a more detailed explanation of the
following steps.
#. Copy the NetApp configuration file to your home directory::
sudo cp /usr/share/openstack-tripleo-heat-templates/environments/cinder-netapp-config.yaml ~
#. Edit the permissions (user is typically ``stack``)::
sudo chown $USER ~/cinder-netapp-config.yaml
sudo chmod 755 ~/cinder-netapp-config.yaml
#. Edit the parameters in this file to fit your requirements. Ensure that the following line is changed::
OS::TripleO::ControllerExtraConfigPre: /usr/share/openstack-tripleo-heat-templates/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
#. Continue following the TripleO instructions for deploying an overcloud.
Before entering the command to deploy the overcloud, add the environment
file that you just configured as an argument::
openstack overcloud deploy --templates -e ~/cinder-netapp-config.yaml
#. Wait for the completion of the overcloud deployment process.
Creating a NetApp Volume
------------------------
.. note::
The following steps will refer to running commands as an admin user or a
tenant user. Sourcing the ``overcloudrc`` file will authenticate you as
the admin user. You can then create a tenant user and use environment
files to switch between them.
#. Create a new volume type that maps to the new NetApp backend [admin]::
cinder type-create [name]
cinder type-key [name] set volume_backend_name=tripleo_netapp
#. Create the volume [admin]::
cinder create --volume-type [type name] [size of volume]
#. Attach the volume to a server::
nova volume-attach <server> <volume> <device>

View File

@ -1,58 +0,0 @@
.. _composable_services:
Deploying with Composable Services
==================================
TripleO offers the option of deploying with a user-defined list of services
per role (where "role" means group of nodes, e.g "Controller", and "service"
refers to the individual services or configurations e.g "Nova API").
Deploying with custom service lists
-----------------------------------
Each role to be used in the deployment is defined in a `roles_data.yaml` file.
There is a sample file in `/usr/share/openstack-tripleo-heat-templates`, or the
tripleo-heat-templates_ git repository. Additional example roles are located in
the `/usr/share/openstack-tripleo-heat-templates/roles` directory and can be used
to create a custom `roles_data.yaml` file. See :doc:`custom_roles` for additional
usage details.
The data in `roles_data.yaml` is used to set the defaults for per-role parameters
e.g `ControllerServices`. These defaults can be overridden via environment
files, e.g::
cat > keystone_only_params.yaml << EOF
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::RabbitMQ
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::Keepalived
ComputeCount: 0
EOF
The example above overrides the default list of services, and instead deploys
Keystone and the services it requires. It also sets the ComputeCount to zero
to enable a minimal "keystone only" deployment on a single node.
You can then pass the environment file on deployment as follows::
openstack overcloud deploy -e keystone_only_params.yaml
The same approach can be used for any role.
.. warning::
While considerable flexibility is available regarding service placement with
these interfaces, the flexible placement of pacemaker managed services is only
available since the Ocata release.
.. warning::
In general moving control-plane services to the Compute role is not
recommended, as the compute nodes require a different upgrade lifecycle
and thus control-plane services on this role may present problems during
major upgrades between releases.
.. _tripleo-heat-templates: https://opendev.org/openstack/tripleo-heat-templates

View File

@ -1,83 +0,0 @@
Manage Virtual Persistent Memory (vPMEM)
=====================================================
Virtual Persistent Memory (vPMEM) is a Nova feature that allows to expose
Persistent Memory (PMEM) namespaces to guests using libvirt compute driver.
This guide show how the vPMEM feature is supported in TripleO deployment
framework. For in-depth description of Nova's vPMEM feature check Nova
documentation: `Attaching virtual persistent memory to guests
<https://docs.openstack.org/nova/latest/admin/virtual-persistent-memory.html>`_
.. warning::
vPMEM feature is only available in Train(20.0.0) or later releases.
.. contents::
:depth: 3
:backlinks: none
Prerequisite
------------
Operators needs to properly configured PMEM Hardware before deploying Overcloud
with vPMEM support. Example of such a hardware is Intel Optane DC Persistent Memory.
Intel provides tool (`ipmctl <https://software.intel.com/en-us/articles/quick-start-guide-configure-intel-optane-dc-persistent-memory-on-linux>`_)
to configure the PMEM hardware.
Operators need to configure the hardware in such a way to enable TripleO to create
`PMEM namespaces <http://pmem.io/ndctl/ndctl-create-namespace.html>`_ in **devdax** mode.
TripleO currently support one backend NVDIMM region, so in case of multiple NVDIMMs
Interleaved Region needs to be configured.
TripleO vPMEM parameters
------------------------
Following parameter are used within TripleO to configure vPMEM:
.. code::
NovaPMEMMappings:
type: string
description: >
PMEM namespace mappings as backend for vPMEM feature. This parameter
sets Nova's `pmem_namespaces` configuration options. PMEM namespaces
needs to be create manually or with conjunction with `NovaPMEMNamespaces`
parameter.
Requires format: $LABEL:$NSNAME[|$NSNAME][,$LABEL:$NSNAME[|$NSNAME]].
default: ""
tags:
- role_specific
NovaPMEMNamespaces:
type: string
description: >
Creates PMEM namespaces on the host server using `ndctl` tool
through Ansible.
Requires format: $SIZE:$NSNAME[,$SIZE:$NSNAME...].
$SIZE supports the suffixes "k" or "K" for KiB, "m" or "M" for MiB, "g"
or "G" for GiB and "t" or "T" for TiB.
NOTE: This requires properly configured NVDIMM regions and enough space
for requested namespaces.
default: ""
tags:
- role_specific
Both parameters are role specific and should be used with custom role. Please check documentation on
how to use `Role-Specific Parameters <https://docs.openstack.org/project-deploy-guide/tripleo-docs/latest/features/role_specific_parameters.html>`_.
Examples
--------
.. code::
parameter_defaults:
ComputePMEMParameters:
NovaPMEMMappings: "6GB:ns0|ns1|ns2,LARGE:ns3"
NovaPMEMNamespaces: "6G:ns1,6G:ns1,6G:ns2,100G:ns3"
The following example will perform following steps:
* ensure **ndctl** tool is installed on hosts with role **ComputePMEM**
* create PMEM namespaces as specified in the **NovaPMEMNamespaces** parameter.
- ns0, ns1, ns2 with size 6GiB
- ns3 with size 100GiB
* set Nova parameter **pmem_namespaces** in nova.conf to map create namespaces to vPMEM as specified in **NovaPMEMMappings**.
In this example the label '6GB' will map to one of ns0, ns1 or ns2 namespace and the label 'LARGE' will map to ns3 namespace.
After deployment you need to configure flavors as described in documentation `Nova: Configure a flavor <https://docs.openstack.org/nova/latest/admin/virtual-persistent-memory.html#configure-a-flavor>`_

View File

@ -1,579 +0,0 @@
.. _custom_networks:
Deploying with Custom Networks
==============================
TripleO offers the option of deploying with a user-defined list of networks,
where each network can be enabled (or not) for each role (group of servers) in
the deployment.
Default networks
----------------
TripleO offers a default network topology when deploying with network isolation
enabled, and this is reflected in the default-network-isolation_ file in
tripleo-heat-templates_.
.. admonition:: Victoria and prior releases
In Victoria and prior releases the default network topology is reflected in
the network_data.yaml_ file in tripleo-heat-templates_.
These default networks are as follows:
* ``External`` - External network traffic (disabled by default for
Compute/Storage nodes)
* ``InternalApi`` - Internal API traffic, most intra-service traffic uses this
network by default
* ``Storage`` - Storage traffic
* ``StorageMgmt`` - Storage management traffic (such as replication traffic
between storage nodes)
* ``Tenant`` - Tenant networks for compute workloads running on the cloud
Deploying with custom networks
------------------------------
Each network is defined in the ``network_data`` YAML file. There are sample
files in ``/usr/share/openstack-tripleo-heat-templates/network-data-samples``,
or the tripleo-heat-templates_ git repository which can be copied and modified
as needed.
The ``network_data`` YAML file contains a list of networks, with definitions
like:
.. code-block:: yaml
- name: CustomNetwork
vip: false
name_lower: custom_network
subnets:
custom_network_subnet:
ip_subnet: 172.16.6.0/24
allocation_pools:
- start: 172.16.6.4
- end: 172.16.6.250
gateway_ip: 172.16.6.1
.. admonition:: Victoria and prior releases
Victoria and releases prior to it used a slightly different ``network_data``
YAML.
.. code-block:: yaml
- name: CustomNetwork
vip: false
name_lower: custom_network
ip_subnet: '172.16.6.0/24'
allocation_pools: [{'start': '172.16.6.4', 'end': '172.16.6.250'}]
gateway_ip: '172.16.6.1'
The data in the ``network_data`` YAML definition is used to create and update
the network and subnet API resources in Neutron on the undercloud. It is also
used to perform templating with jinja2_ such that arbitrary user-defined
networks may be added, and the default networks may be modified or removed.
The steps to define your custom networks are:
#. Copy one of the sample ``network_data`` YAML definitions provided by
tripleo-heat-templates_, for example::
cp /usr/share/openstack-tripleo-heat-templates/network-data-samples/default-network-isolation.yaml \
custom_network_data.yaml
.. admonition:: Victoria and prior releases
In Victoria and earlier releases the sample network data YAML was in a
different location.
::
cp /usr/share/openstack-tripleo-heat-templates/network_data.yaml custom_network_data.yaml
#. Modify the ``custom_network_data.yaml`` file as required. The network data
is a list of networks, where each network contains at least the
following items:
:name: Name of the network (mandatory)
:vip: Enable creation of a virtual IP on this network
:subnets: Dictionary's, one or more subnet definition items keyed by the
subnet name.
:subnet_name: Name of the subnet
:ip_subnet: IP/CIDR, e.g. ``'10.0.0.0/24'``
:allocation_pools: IP range list, e.g. ``[{'start':'10.0.0.4', 'end':'10.0.0.250'}]``
:gateway_ip: Gateway for the network
:vlan: Vlan ID for this network. (supported in Queens and later)
See `Network data YAML options`_ for a list of all documented options for
the ``network_data`` YAML network definition.
.. admonition:: Victoria and prior releases
Victoria and earlier releases requires the first subnet definition **not**
to be in the *subnets* dictionary.
:name: Name of the network (mandatory)
:vip: Enable creation of a virtual IP on this network
:vlan: Vlan ID for this network. (supported in Queens and later)
:ip_subnet: IP/CIDR, e.g. ``'10.0.0.0/24'``
:allocation_pools: IP range list, e.g. ``[{'start':'10.0.0.4', 'end':'10.0.0.250'}]``
:gateway_ip: Gateway for the network
Other options are supported, see the documentation in the default
network_data.yaml_ for details.
.. warning::
Currently there is no validation of the network subnet and
allocation_pools, so care must be take to ensure these are consistent,
and do not conflict with any existing networks, otherwise your deployment
may fail or result in unexpected results.
#. Copy one of the sample ``vip_data`` YAML definitions provided by
tripleo-heat-templates_, for example::
cp /usr/share/openstack-tripleo-heat-templates/network-data-samples/vip-data-default-network-isolation.yaml \
custom_vip_data.yaml
.. admonition:: Victoria and prior releases
For Victoria and prior releases the Virtual IP resources are created as
part of the overcloud heat stack. This step is not valid for these
releases.
#. Modify the ``custom_vip_data.yaml`` file as required. The Virtual IP data
is a list of Virtual IP address definitions, each containing at a minimum
the name of the network where the IP address should be allocated.
See `Network Virtual IPs data YAML options`_ for a list of all documented
options for the ``vip_data`` YAML network Virtual IPs definition.
.. admonition:: Victoria and prior releases
For Victoria and prior releases the Virtual IP resources are created as
part of the overcloud heat stack. This step is not valid for these
releases.
#. Copy network configuration templates, add new networks.
Prior to Victoria, Heat templates were used to define nic configuration
templates. With the Victoria release, Ansible jinja2_ templates were
introduced, and replaced the heat templates.
The nic configuration examples in tripleo-heat-templates_ was ported to
Ansible jinja2_ templates located in the tripleo_network_config role in
tripleo-ansible_.
If one of the shipped examples match, use it! If not, be inspired by the
shipped examples and create a set of custom Ansible jinja2 templates. Please
refer to the :ref:`creating_custom_interface_templates` documentation page
which provide a detailed guide on how to create custom Ansible jinja2
nic config templates.
For example, copy a sample template to a custom location::
cp -r /usr/share/ansible/roles/tripleo_network_config/templates/single_nic_vlans custom-single-nic-vlans
Modify the templates in custom-single-nic-vlans to match your needs.
.. admonition:: Ussuri and prior releases
Prior to Queens, the nic config templates were not dynamically generated,
so it was necessary to copy those that were in use, and add parameters for
any additional networks, for example::
cp -r /usr/share/openstack-tripleo-heat-templates/network/config/single-nic-vlans custom-single-nic-vlans
Each file in ``single-nic-vlans`` will require updating to add
parameters for each custom network. Copy those that exist for the
default networks, and rename to match the *name* field in
``custom_network_data.yaml``.
.. note::
Since Queens, the NIC config templates are dynamically
generated so this step is only necessary when creating custom NIC
config templates, not when just adding a custom network.
#. Set your environment overrides to enable your nic config templates.
Create or update an existing environment file and set the parameter values
to enable your custom nic config templates, for example create a file
``custom-net-single-nic-with-vlans.yaml`` with these parameter settings::
parameter_defaults:
ControllerNetworkConfigTemplate: '/path/to/custom-single-nic-vlans/single_nic_vlans.j2'
CephStorageNetworkConfigTemplate: '/path/to/custom-single-nic-vlans/single_nic_vlans_storage.j2'
ComputeNetworkConfigTemplate: '/path/to/custom-single-nic-vlans/single_nic_vlans.j2'
#. Create the networks on the undercloud and generate the
``networks-deployed-environment.yaml`` which will be used as an environment
file when deploying the overcloud.
::
openstack overcloud network provision \
--output networks-deployed-environment.yaml \
custom_network_data.yaml
.. admonition:: Victoria and prior releases
For Victoria and earlier releases *skip* this step.
There was no command ``openstack overcloud network provision`` in these
releases. Network resources was created as part of the overcloud heat
stack.
.. note:: This step is optional when using the ``--baremetal-deployment``
and ``--vip-data`` options with the ``overcloud deploy`` command.
The deploy command will detect the new format of the network data
YAML definition, run the workflow to create the networks and
include the ``networks-deployed-environment.yaml`` automatically.
#. Create the overcloud network Virtual IPs and generate the
``vip-deployed-environment.yaml`` which will be used as an environment file
when deploying the overcloud.
.. code-block:: bash
$ openstack overcloud network vip provision \
--output ~/templates/vip-deployed-environment.yaml \
~/templates/custom_vip_data.yaml
.. note:: This step is optional if using the ``--vip-data`` options with the
``overcloud deploy`` command. In that case workflow to create the
Virtual IPs and including the environment is automated.
#. To deploy you pass the ``custom_network_data.yaml`` file via the ``-n``
option to the overcloud deploy, for example:
.. code-block:: bash
openstack overcloud deploy --templates \
-n custom_network_data.yaml \
-e baremetal-deployed-environment.yaml \
-e networks-deployed-environment.yaml \
-e vip-deployed-environment.yaml \
-e custom-net-single-nic-with-vlans.yaml
.. note:: baremetal-deployed-environment.yaml refers to baremetal which
**has already been** deployed as described
:doc:`../provisioning/baremetal_provision`
Alternatively include the network, Virtual IPs and baremetal provisioning
in the ``overcloud deploy`` command to do it all in one:
.. code-block:: bash
openstack overcloud deploy --templates \
--networks-file custom_network_data.yaml \
--vip-file custom_vip_data.yaml \
--baremetal-deployment baremetal_deployment.yaml \
--network-config \
-e custom-net-single-nic-with-vlans.yaml
.. note:: ``baremetal_deployment.yaml`` refers to baremetal which **will
be** deployed by the above command. Please refer to
:doc:`../provisioning/baremetal_provision` for a reference on the
``baremetal_deployment.yaml`` used in the above example.
.. admonition:: Victoria and prior releases
::
openstack overcloud deploy --templates \
-n custom_network_data.yaml \
-e custom-net-single-nic-with-vlans.yaml
.. note::
It is also possible to copy the entire tripleo-heat-templates tree, and
modify the ``network_data.yaml`` file in place, then deploy via
``--templates <copy of tht>``.
.. _network_definition_opts:
Network data YAML options
-------------------------
:name:
Name of the network
type: *string*
:name_lower:
*(optional)* Lower case name of the network
type: *string*
default: *name.lower()*
:dns_domain:
*(optional)* Dns domain name for the network
type: *string*
:mtu:
*(optional)* Set the maximum transmission unit (MTU) that is guaranteed to
pass through the data path of the segments in the network.
type: *number*
default: 1600
:service_net_map_replace:
*(optional)* if name_lower is set to a custom name this should be set to
original default (optional). This field is only necessary when changing the
default network names, not when adding a new custom network.
type: *string*
.. warning:: Please avoid using this option, the correct solution when
changing a *name_lower* of one of the default networks is to
also update the ``ServiceNetMap`` parameter to use the same
custom *name_lower*.
:ipv6:
*(optional)*
type: *boolean*
default: *false*
:vip:
*(optional)* Enable creation of a virtual IP on this network
type: *boolean*
default: *false*
:subnets:
A map of subnets for the network. The collection should contain keys which
define the subnet name. The value for each item is a subnet definition.
Example:
.. code-block:: yaml
subnets:
subnet_name_a:
ip_subnet: 192.0.2.0/24
allocation_pools:
- start: 192.0.2.50
end: 192.0.2.99
gateway_ip: 192.0.2.1
vlan: 102
subnet_name_b:
ip_subnet: 198.51.100.0/24
allocation_pools:
- start: 198.51.100.50
end: 198.51.100.99
gateway_ip: 198.51.100.1
vlan: 101
See `Options for network data YAML subnet definitions`_ for a list of all
documented sub-options for the subnet definitions.
type: *dictionary*
Options for network data YAML subnet definitions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:ip_subnet:
IPv4 CIDR block notation for this subnet. For example: ``192.0.2.0/24``
type: *string*
.. note:: Optional if ``ipv6_subnet`` is specified.
:ipv6_subnet:
IPv6 CIDR block notation for this subnet. For example:
``2001:db8:fd00:1000::/64``
type: *string*
.. note:: Optional if ``ip_subnet`` is specified.
:gateway_ip:
*(optional)* The gateway IPv4 address
type: *string*
:gateway_ipv6:
*(optional)* The gateway IPv6 address
:allocation_pools:
*(optional)* The start and end addresses for the subnets IPv4 allocation
pools.
type: *list*
elements: *dictionary*
:suboptions:
:start: Start address for the allocation pool.
type: *string*
:end: End address for the allocation pool.
type: *string*
Example:
.. code-block:: yaml
allocation_pools:
- start: 192.0.2.50
end: 192.0.2.99
- start: 192.0.2.150
end: 192.0.2.199
:ipv6_allocation_pools:
*(optional)* The start and end addresses for the subnets IPv6 allocation
pools.
type: *list*
elements: *dictionary*
:suboptions:
:start: Start address for the allocation pool.
type: *string*
:end: End address for the allocation pool.
type: *string*
Example:
.. code-block:: yaml
allocation_pools:
- start: 2001:db8:fd00:1000:100::1
end: 2001:db8:fd00:1000:199::1
- start: 2001:db8:fd00:1000:300::1
end: 2001:db8:fd00:1000:399::1
:routes:
*(optional)* List of networks that should be routed via network gateway. A
single /16 supernet route could be used for 255 smaller /24 subnets.
type: *list*
elements: *dictionary*
:suboptions:
:destination: Destination network,
for example: ``198.51.100.0/24``
type: *string*
:nexthop: IP address of the router to use for the destination network,
for example: ``192.0.2.1``
type: *string*
Example:
.. code-block:: yaml
routes:
- destination: 198.51.100.0/24
nexthop: 192.0.2.1
- destination: 203.0.113.0/24
nexthost: 192.0.2.1
:routes_ipv6:
*(optional)* List of IPv6 networks that should be routed via network gateway.
type: *list*
elements: *dictionary*
:suboptions:
:destination: Destination network,
for example: ``2001:db8:fd00:2000::/64``
type: *string*
:nexthop: IP address of the router to use for the destination network,
for example: ``2001:db8:fd00:1000::1``
type: *string*
Example:
.. code-block:: yaml
routes:
- destination: 2001:db8:fd00:2000::/64
nexthop: 2001:db8:fd00:1000:100::1
- destination: 2001:db8:fd00:3000::/64
nexthost: 2001:db8:fd00:1000:100::1
:vlan:
*(optional)* vlan ID for the network
type: *number*
.. _virtual_ips_definition_opts:
Network Virtual IPs data YAML options
-------------------------------------
:network:
Neutron Network name
type: *string*
:ip_address:
*(optional)* IP address, a pre-defined fixed IP address.
type: *string*
:subnet:
*(optional)* Neutron Subnet name, used to specify the subnet to use when
creating the Virtual IP neutron port.
This is required for deployments using routed networks, to ensure the Virtual
IP is allocated on the subnet where controller nodes are attached.
type: *string*
:dns_name:
*(optional)* Dns Name, the hostname part of the FQDN (Fully Qualified Domain
Name)
type: *string*
default: overcloud
:name:
*(optional)* Virtual IP name
type: *string*
default: $network_name_virtual_ip
.. _tripleo-heat-templates: https://opendev.org/openstack/tripleo-heat-templates
.. _default-network-isolation: https://opendev.org/openstack/tripleo-heat-templates/src/branch/master/network-data-samples/default-network-isolation.yaml
.. _network_data.yaml: https://opendev.org/openstack/tripleo-heat-templates/src/branch/master/network_data.yaml
.. _jinja2: http://jinja.pocoo.org/docs/dev/
.. _tripleo-ansible: https://opendev.org/openstack/tripleo-ansible/src/branch/master/tripleo_ansible/roles/tripleo_network_config/templates

View File

@ -1,141 +0,0 @@
.. _custom_roles:
Deploying with Custom Roles
===========================
TripleO offers the option of deploying with a user-defined list of roles,
each running a user defined list of services (where "role" means group of
nodes, e.g "Controller", and "service" refers to the individual services or
configurations e.g "Nova API").
See :doc:`composable_services` if you only wish to modify the default list of
deployed services, or see below if you wish to modify the deployed roles.
Provided example roles
----------------------
TripleO offers examples roles provided in `openstack-tripleo-heat-templates`.
These roles can be listed using the `tripleoclient` by running::
openstack overcloud role list
With these provided roles, the user deploying the overcloud can generate a
`roles_data.yaml` file that contains the roles they would like to use for the
overcloud nodes. Additionally the user can manage their personal custom roles
in a similar manner by storing the individual files in a directory and using
the `tripleoclient` to generate their `roles_data.yaml`. For example, a user
can execute the following to create a `roles_data.yaml` containing only the
`Controller` and `Compute` roles::
openstack overcloud roles generate -o ~/roles_data.yaml Controller Compute
These provided roles can be generated with a different `name` in the
`roles_data.yaml` by using a format like, `Compute:ComputeHardwareA`, which
will add the role `Compute` to `roles_data.yaml` by modifying the `name` of
the role to `ComputeHardwareA`. This helps in associating nodes with a specific
hardware group to a role and target parameters specific to this hardware
group. The example command below generates a `role_data.yaml` with two Compute
roles which can be addressed to specific hardware groups.::
openstack overcloud roles generate -o ~/roles_data.yaml Controller \
Compute:ComputeHardwareA Compute:ComputeHardwareB
Deploying with custom roles
---------------------------
Each role is defined in the `roles_data.yaml` file. There is a sample file in
`/usr/share/openstack-tripleo-heat-templates`, or the tripleo-heat-templates_ git
repository.
The data in `roles_data.yaml` is used to perform templating with jinja2_ such
that arbitrary user-defined roles may be added, and the default roles may
be modified or removed.
The steps to define your custom roles configuration are:
1. Copy the default roles provided by `tripleo-heat-templates`::
mkdir ~/roles
cp /usr/share/openstack-tripleo-heat-templates/roles/* ~/roles
2. Create a new role file with your custom role.
Additional details about the format for the roles file can be found in the
`README.rst <https://opendev.org/openstack/tripleo-heat-templates/src/branch/master/roles/README.rst>`_
in the roles/ directory from `tripleo-heat-templates`. The filename should
match the name of the role. For example if adding a new role named `Galera`,
the role file name should be `Galera.yaml`. The file should at least contain
the following items:
* name: Name of the role e.g "CustomController", mandatory
* ServicesDefault: List of services, optional, defaults to an empty list
See the default roles_data.yaml or overcloud-resource-registry-puppet.j2.yaml
for the list of supported services. Both files can be found in the top
tripleo-heat-templates folder
Additional items like the ones below should be included as well:
* CountDefault: Default number of nodes, defaults to zero
* HostnameFormatDefault: Format string for hostname, optional
* Description: A few sentences describing the role and information
pertaining to the usage of the role.
The role file format is a basic yaml structure. The expectation is that there
is a single role per file. See the roles `README.rst` for additional details. For
example the following role might be used to deploy a pacemaker managed galera
cluster::
- name: Galera
HostnameFormatDefault: '%stackname%-galera-%index%'
ServicesDefault:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Timesync
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::FluentdClient
.. note::
In the example above, if you wanted to deploy the Galera role on specific nodes
you would either use predictable placement :doc:`../provisioning/node_placement` or add a custom
parameter called OvercloudGaleraFlavor::
parameter_defaults:
OvercloudGaleraFlavor: oooq_galera
.. warning::
When scaling your deployment out, you need as well set the role counts in the
"parameter_defaults" section. The ``--control-scale`` and ``--compute-scale``
CLI args are hardcoded to the "Control" and "Compute" role names, so they're in
fact ignored when using custom roles.
3. Create a `roles_data.yaml` file that contains the custom role in addition
to the other roles that will be deployed. For example::
openstack overcloud roles generate --roles-path ~/roles -o ~/my_roles_data.yaml Controller Compute Galera
4. Pass the modified roles_data on deployment as follows::
openstack overcloud deploy --templates -r ~/my_roles_data.yaml
.. note::
It is also possible to copy the entire tripleo-heat-templates tree, and modify
the roles_data.yaml file in place, then deploy via ``--templates <copy of tht>``
.. warning::
Note that in your custom roles you may not use any already predefined name
So in practice you may not override the following roles: Controller, Compute,
BlockStorage, SwiftStorage and CephStorage. You need to use different names
instead.
.. _tripleo-heat-templates: https://opendev.org/openstack/tripleo-heat-templates
.. _jinja2: http://jinja.pocoo.org/docs/dev/

View File

@ -1,30 +0,0 @@
Deploy an additional nova cell v2
=================================
.. warning::
Multi cell support is only supported in Stein and later versions.
The different sections in this guide assume that you are ready to deploy a new
overcloud, or already have installed an overcloud (min Stein release).
.. note::
Starting with CentOS 8 and the TripleO Stein release, podman is the CONTAINERCLI
to be used in the following steps.
The minimum requirement for having multiple cells is to have a central OpenStack
controller cluster running all controller services. Additional cells will
have cell controllers running the cell DB, cell MQ and a nova cell conductor
service. In addition there are 1..n compute nodes. The central nova conductor
service acts as a super conductor of the whole environment.
For more details on the cells v2 layout check `Cells Layout (v2)
<https://docs.openstack.org/nova/latest/user/cellsv2-layout.html>`_
.. toctree::
deploy_cellv2_basic.rst
deploy_cellv2_advanced.rst
deploy_cellv2_routed.rst
deploy_cellv2_additional.rst
deploy_cellv2_manage_cell.rst

View File

@ -1,155 +0,0 @@
Additional cell considerations and features
===========================================
.. warning::
Multi cell support is only supported in Stein or later versions.
.. contents::
:depth: 3
:backlinks: none
.. _cell_availability_zone:
Availability Zones (AZ)
-----------------------
A nova AZ must be configured for each cell to make sure instances stay in the
cell when performing migration and to be able to target a cell when an instance
gets created. The central cell must also be configured as a specific AZs
(or multiple AZs) rather than the default.
Configuring AZs for Nova (compute)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It's also possible to configure the AZ for a compute node by adding it to a
host aggregate after the deployment is completed. The following commands show
creating a host aggregate, an associated AZ, and adding compute nodes to a
`cell-1` AZ:
.. code-block:: bash
source overcloudrc
openstack aggregate create cell1 --zone cell1
openstack aggregate add host cell1 hostA
openstack aggregate add host cell1 hostB
.. note::
Right now we can not use `OS::TripleO::Services::NovaAZConfig` to auto
create the AZ during the deployment as at this stage the initial cell
creation is not complete. Further work is needed to fully automate the
post cell creation steps before `OS::TripleO::Services::NovaAZConfig`
can be used.
Routed networks
---------------
A routed spine and leaf networking layout can be used to deploy the additional
cell nodes in a distributed nature. Not all nodes need to be co-located at the
same physical location or datacenter. See :ref:`routed_spine_leaf_network` for
more details.
Reusing networks from an already deployed stack
-----------------------------------------------
When deploying separate stacks it may be necessary to reuse networks, subnets,
and VIP resources between stacks if desired. Only a single Heat stack can own a
resource and be responsible for its creation and deletion, however the
resources can be reused in other stacks.
Usually the internal api network in case of split cell controller and cell
compute stacks are shared.
To reuse network related resources between stacks, the following parameters
have been added to the network definitions in the `network_data.yaml` file
format:
.. code-block:: bash
external_resource_network_id: Existing Network UUID
external_resource_subnet_id: Existing Subnet UUID
external_resource_segment_id: Existing Segment UUID
external_resource_vip_id: Existing VIP UUID
These parameters can be set on each network definition in the
`network_data.yaml` file used for the deployment of the separate stack.
Not all networks need to be reused or shared across stacks. The
`external_resource_*` parameters can be set for only the networks that are
meant to be shared, while the other networks can be newly created and managed.
For example, to reuse the `internal_api` network from the cell controller stack
in the compute stack, run the following commands to show the UUIDs for the
related network resources:
.. code-block:: bash
openstack network show internal_api -c id -f value
openstack subnet show internal_api_subnet -c id -f value
openstack port show internal_api_virtual_ip -c id -f value
Save the values shown in the output of the above commands and add them to the
network definition for the `internal_api` network in the `network_data.yaml`
file for the separate stack.
In case the overcloud and the cell controller stack uses the same internal
api network there are two ports with the name `internal_api_virtual_ip`.
In this case it is required to identify the correct port and use the id
instead of the name in the `openstack port show` command.
An example network definition would look like:
.. code-block:: bash
- name: InternalApi
external_resource_network_id: 93861871-7814-4dbc-9e6c-7f51496b43af
external_resource_subnet_id: c85c8670-51c1-4b17-a580-1cfb4344de27
external_resource_vip_id: 8bb9d96f-72bf-4964-a05c-5d3fed203eb7
name_lower: internal_api
vip: true
ip_subnet: '172.16.2.0/24'
allocation_pools: [{'start': '172.16.2.4', 'end': '172.16.2.250'}]
ipv6_subnet: 'fd00:fd00:fd00:2000::/64'
ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:2000::10', 'end': 'fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe'}]
mtu: 1400
.. note::
When *not* sharing networks between stacks, each network defined in
`network_data.yaml` must have a unique name across all deployed stacks.
This requirement is necessary since regardless of the stack, all networks are
created in the same tenant in Neutron on the undercloud.
For example, the network name `internal_api` can't be reused between
stacks, unless the intent is to share the network between the stacks.
The network would need to be given a different `name` and `name_lower`
property such as `InternalApiCompute0` and `internal_api_compute_0`.
Configuring nova-metadata API per-cell
--------------------------------------
.. note::
Deploying nova-metadata API per-cell is only supported in Train
and later.
.. note::
NovaLocalMetadataPerCell is only tested with ovn metadata agent to
automatically forward requests to the nova metadata api.
It is possible to configure the nova-metadata API service local per-cell.
In this situation the cell controllers also host the nova-metadata API
service. The `NovaLocalMetadataPerCell` parameter, which defaults to
`false` need to be set to `true`.
Using nova-metadata API service per-cell can have better performance and
data isolation in a multi-cell deployment. Users should consider the use
of this configuration depending on how neutron is setup. If networks span
cells, you might need to run nova-metadata API service centrally.
If your networks are segmented along cell boundaries, then you can
run nova-metadata API service per cell.
.. code-block:: yaml
parameter_defaults:
NovaLocalMetadataPerCell: True
See also information on running nova-metadata API per cell as explained
in the cells v2 layout section `Local per cell <https://docs.openstack.org/nova/latest/user/cellsv2-layout.html#nova-metadata-api-service>`_

View File

@ -1,247 +0,0 @@
Example 2. - Split Cell controller/compute Architecture in Train release
========================================================================
.. warning::
Multi cell support is only supported in Stein or later versions.
This guide addresses Train release and later!
.. contents::
:depth: 3
:backlinks: none
This guide assumes that you are ready to deploy a new overcloud, or have
already installed an overcloud (min Train release).
.. note::
Starting with CentOS 8 and the TripleO Stein release, podman is the CONTAINERCLI
to be used in the following steps.
.. _advanced_cell_arch:
In this scenario the cell computes get split off in its own stack, e.g. to
manage computes from each edge site in its own stack.
This section only explains the differences to the :doc:`deploy_cellv2_basic`.
Like before the following example uses six nodes and the split control plane method
to deploy a distributed cell deployment. The first Heat stack deploys the controller
cluster. The second Heat stack deploys the cell controller. The computes will then
again be split off in its own stack.
.. _cell_export_cell_controller_info:
Extract deployment information from the overcloud stack
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Again like in :ref:`cell_export_overcloud_info` information from the control
plane stack needs to be exported:
.. code-block:: bash
source stackrc
mkdir cell1
export DIR=cell1
openstack overcloud cell export cell1-ctrl -o cell1/cell1-ctrl-input.yaml
Create roles file for the cell stack
____________________________________
The same roles get exported as in :ref:`cell_create_roles_file`.
Create cell parameter file for additional customization (e.g. cell1/cell1.yaml)
_______________________________________________________________________________
The cell parameter file remains the same as in :ref:`cell_parameter_file` with
the only difference that the `ComputeCount` gets set to 0. This is required as
we use the roles file contain both `CellController` and `Compute` role and the
default count for the `Compute` role is 1 (e.g. `cell1/cell1.yaml`):
.. code-block:: yaml
parameter_defaults:
...
# number of controllers/computes in the cell
CellControllerCount: 1
ComputeCount: 0
...
Create the network configuration for `cellcontroller` and add to environment file
_________________________________________________________________________________
Depending on the network configuration of the used hardware and network
architecture it is required to register a resource for the `CellController`
role.
.. code-block:: yaml
resource_registry:
OS::TripleO::CellController::Net::SoftwareConfig: single-nic-vlans/controller.yaml
.. note::
For details on network configuration consult :ref:`network_isolation` guide, chapter *Customizing the Interface Templates*.
Deploy the cell
^^^^^^^^^^^^^^^
Create new flavor used to tag the cell controller
_________________________________________________
Follow the instructions in :ref:`cell_create_flavor_and_tag` on how to create
a new flavor and tag the cell controller.
Run cell deployment
___________________
To deploy the cell controller stack we use the same `overcloud deploy`
command as it was used to deploy the `overcloud` stack and add the created
export environment files:
.. code-block:: bash
openstack overcloud deploy \
--templates /usr/share/openstack-tripleo-heat-templates \
-e ... additional environment files used for overcloud stack, like container
prepare parameters, or other specific parameters for the cell
...
    --stack cell1-ctrl \
    -r $HOME/$DIR/cell_roles_data.yaml \
    -e $HOME/$DIR/cell1-ctrl-input.yaml \
    -e $HOME/$DIR/cell1.yaml
Wait for the deployment to finish:
.. code-block:: bash
openstack stack list
+--------------------------------------+--------------+----------------------------------+-----------------+----------------------+----------------------+
| ID | Stack Name | Project | Stack Status | Creation Time | Updated Time |
+--------------------------------------+--------------+----------------------------------+-----------------+----------------------+----------------------+
| 890e4764-1606-4dab-9c2f-6ed853e3fed8 | cell1-ctrl | 2b303a97f4664a69ba2dbcfd723e76a4 | CREATE_COMPLETE | 2019-02-12T08:35:32Z | None |
| 09531653-1074-4568-b50a-48a7b3cc15a6 | overcloud | 2b303a97f4664a69ba2dbcfd723e76a4 | UPDATE_COMPLETE | 2019-02-09T09:52:56Z | 2019-02-11T08:33:37Z |
+--------------------------------------+--------------+----------------------------------+-----------------+----------------------+----------------------+
Create the cell
^^^^^^^^^^^^^^^
As in :ref:`cell_create_cell` create the cell, but we can skip the final host
discovery step as the computes are note yet deployed.
Extract deployment information from the cell controller stack
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The cell compute stack again requires input information from both the control
plane stack (`overcloud`) and the cell controller stack (`cell1-ctrl`):
.. code-block:: bash
source stackrc
export DIR=cell1
Export EndpointMap, HostsEntry, AllNodesConfig, GlobalConfig and passwords information
______________________________________________________________________________________
As before the `openstack overcloud cell export` functionality of the tripleo-client
is used to export the required data from the cell controller stack.
.. code-block:: bash
openstack overcloud cell export cell1-cmp -o cell1/cell1-cmp-input.yaml -e cell1-ctrl
`cell1-cmp` is the chosen name for the new compute stack. This parameter is used to
set the default export file name, which is then stored on the current directory.
In this case a dedicated export file was set via `-o`.
In addition it is required to use the `--cell-stack <cell stack>` or `-e <cell stack>`
parameter to point the export command to the cell controller stack and indicate
that this is a compute child stack. This is required as the input information for
the cell controller and cell compute stack is not the same.
.. note::
If the export file already exists it can be forced to be overwritten using
`--force-overwrite` or `-f`.
.. note::
The services from the cell stacks use the same passwords services as the
control plane services.
Create cell compute parameter file for additional customization
_______________________________________________________________
A new parameter file is used to overwrite, or customize settings which are
different from the cell controller stack. Add the following content into
a parameter file for the cell compute stack, e.g. `cell1/cell1-cmp.yaml`:
.. code-block:: yaml
resource_registry:
# Since the compute stack deploys only compute nodes ExternalVIPPorts
# are not required.
OS::TripleO::Network::Ports::ExternalVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
parameter_defaults:
# number of controllers/computes in the cell
CellControllerCount: 0
ComputeCount: 1
The above file overwrites the values from `cell1/cell1.yaml` to not deploy
a controller in the cell compute stack. Since the cell compute stack uses
the same role file the default `CellControllerCount` is 1.
If there are other differences, like network config, parameters, ... for
the computes, add them here.
Deploy the cell computes
^^^^^^^^^^^^^^^^^^^^^^^^
Run cell deployment
___________________
To deploy the overcloud we can use the same `overcloud deploy` command as
it was used to deploy the `cell1-ctrl` stack and add the created export
environment files:
.. code-block:: bash
openstack overcloud deploy \
--templates /usr/share/openstack-tripleo-heat-templates \
-e ... additional environment files used for overcloud stack, like container
prepare parameters, or other specific parameters for the cell
...
    --stack cell1-cmp \
-n $HOME/$DIR/cell1-cmp/network_data.yaml \
    -r $HOME/$DIR/cell_roles_data.yaml \
    -e $HOME/$DIR/cell1-ctrl-input.yaml \
    -e $HOME/$DIR/cell1-cmp-input.yaml \
    -e $HOME/$DIR/cell1.yaml \
    -e $HOME/$DIR/cell1-cmp.yaml
Wait for the deployment to finish:
.. code-block:: bash
openstack stack list
+--------------------------------------+--------------+----------------------------------+-----------------+----------------------+----------------------+
| ID | Stack Name | Project | Stack Status | Creation Time | Updated Time |
+--------------------------------------+--------------+----------------------------------+-----------------+----------------------+----------------------+
| 790e4764-2345-4dab-7c2f-7ed853e7e778 | cell1-cmp | 2b303a97f4664a69ba2dbcfd723e76a4 | CREATE_COMPLETE | 2019-02-12T08:35:32Z | None |
| 890e4764-1606-4dab-9c2f-6ed853e3fed8 | cell1-ctrl | 2b303a97f4664a69ba2dbcfd723e76a4 | CREATE_COMPLETE | 2019-02-12T08:35:32Z | None |
| 09531653-1074-4568-b50a-48a7b3cc15a6 | overcloud | 2b303a97f4664a69ba2dbcfd723e76a4 | UPDATE_COMPLETE | 2019-02-09T09:52:56Z | 2019-02-11T08:33:37Z |
+--------------------------------------+--------------+----------------------------------+-----------------+----------------------+----------------------+
Perform cell host discovery
___________________________
The final step is to discover the computes deployed in the cell. Run the host discovery
as explained in :ref:`cell_host_discovery`.
Create and add the node to an Availability Zone
_______________________________________________
After a cell got provisioned, it is required to create an availability zone for the
compute stack, it is not enough to just create an availability zone for the complete
cell. In this used case we want to make sure an instance created in the compute group,
stays in it when performing a migration. Check :ref:`cell_availability_zone` on more
about how to create an availability zone and add the node.
After that the cell is deployed and can be used.
.. note::
Migrating instances between cells is not supported. To move an instance to
a different cell it needs to be re-created in the new target cell.

View File

@ -1,416 +0,0 @@
Example 1. - Basic Cell Architecture in Train release
=====================================================
.. warning::
Multi cell support is only supported in Stein or later versions.
This guide addresses Train release and later!
.. contents::
:depth: 3
:backlinks: none
This guide assumes that you are ready to deploy a new overcloud, or have
already installed an overcloud (min Train release).
.. note::
Starting with CentOS 8 and the TripleO Stein release, podman is the CONTAINERCLI
to be used in the following steps.
.. _basic_cell_arch:
The following example uses six nodes and the split control plane method to
deploy a distributed cell deployment. The first Heat stack deploys a controller
cluster and a compute. The second Heat stack deploys a cell controller and a
compute node:
.. code-block:: bash
openstack overcloud status
+-----------+---------------------+---------------------+-------------------+
| Plan Name | Created | Updated | Deployment Status |
+-----------+---------------------+---------------------+-------------------+
| overcloud | 2019-02-12 09:00:27 | 2019-02-12 09:00:27 | DEPLOY_SUCCESS |
+-----------+---------------------+---------------------+-------------------+
openstack server list -c Name -c Status -c Networks
+----------------------------+--------+------------------------+
| Name | Status | Networks |
+----------------------------+--------+------------------------+
| overcloud-controller-1 | ACTIVE | ctlplane=192.168.24.19 |
| overcloud-controller-2 | ACTIVE | ctlplane=192.168.24.11 |
| overcloud-controller-0 | ACTIVE | ctlplane=192.168.24.29 |
| overcloud-novacompute-0 | ACTIVE | ctlplane=192.168.24.15 |
+----------------------------+--------+------------------------+
The above deployed overcloud shows the nodes from the first stack.
.. note::
In this example the default cell and the additional cell uses the
same network, When configuring another network scenario keep in
mind that it will be necessary for the systems to be able to
communicate with each other.
Extract deployment information from the overcloud stack
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Any additional cell stack requires information from the overcloud Heat stack
where the central OpenStack services are located. The extracted parameters are
needed as input for additional cell stacks. To extract these parameters
into separate files in a directory (e.g. DIR=cell1) run the following:
.. code-block:: bash
source stackrc
mkdir cell1
export DIR=cell1
.. _cell_export_overcloud_info:
Export EndpointMap, HostsEntry, AllNodesConfig, GlobalConfig and passwords information
______________________________________________________________________________________
The tripleo-client in Train provides an `openstack overcloud cell export`
functionality to export the required data from the control plane stack which
then is used as an environment file passed to the cell stack.
.. code-block:: bash
openstack overcloud cell export cell1 -o cell1/cell1-cell-input.yaml
`cell1` is the chosen name for the new cell. This parameter is used to
set the default export file name, which is then stored on the current
directory.
In this case a dedicated export file was set via `-o`.
.. note::
If the export file already exists it can be forced to be overwritten using
`--force-overwrite` or `-f`.
.. note::
The services from the cell stacks use the same passwords services as the
control plane services.
.. _cell_create_roles_file:
Create roles file for the cell stack
____________________________________
Different roles are provided within tripleo-heat-templates, depending on
the configuration and desired services to be deployed.
The default compute role at roles/Compute.yaml can be used for cell computes
if that is sufficient for the use case.
A dedicated role, `roles/CellController.yaml` is provided. This role includes
the necessary roles for the cell controller, where the main services are
galera database, rabbitmq, nova-conductor, nova novnc proxy and nova metadata
in case `NovaLocalMetadataPerCell` is enabled.
Create the roles file for the cell:
.. code-block:: bash
openstack overcloud roles generate --roles-path \
/usr/share/openstack-tripleo-heat-templates/roles \
-o $DIR/cell_roles_data.yaml Compute CellController
.. _cell_parameter_file:
Create cell parameter file for additional customization (e.g. cell1/cell1.yaml)
_______________________________________________________________________________
Each cell has some mandatory parameters which need to be set using an
environment file.
Add the following content into a parameter file for the cell, e.g. `cell1/cell1.yaml`:
.. code-block::
resource_registry:
OS::TripleO::Network::Ports::OVNDBsVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::Network::Ports::RedisVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
parameter_defaults:
# since the same networks are used in this example, the
# creation of the different networks is omitted
ManageNetworks: false
# CELL Parameter to reflect that this is an additional CELL
NovaAdditionalCell: True
# The DNS names for the VIPs for the cell
CloudName: cell1.ooo.test
CloudNameInternal: cell1.internalapi.ooo.test
CloudNameStorage: cell1.storage.ooo.test
CloudNameStorageManagement: cell1.storagemgmt.ooo.test
CloudNameCtlplane: cell1.ctlplane.ooo.test
# Flavors used for the cell controller and computes
OvercloudCellControllerFlavor: cellcontroller
OvercloudComputeFlavor: compute
# Number of controllers/computes in the cell
CellControllerCount: 1
ComputeCount: 1
  # Compute names need to be uniq across cells. Make sure to have a uniq
# hostname format for cell nodes
  ComputeHostnameFormat: 'cell1-compute-%index%'
# default gateway
ControlPlaneStaticRoutes:
- ip_netmask: 0.0.0.0/0
next_hop: 192.168.24.1
default: true
DnsServers:
- x.x.x.x
The above file disables creating networks by setting ``ManageNetworks`` parameter
to ``false`` so that the same ``network_data.yaml`` file from the overcloud stack
can be used. When ``ManageNetworks`` is set to false, ports will be created for
the nodes in the separate stacks on the existing networks that were already created
in the ``overcloud`` stack.
It also specifies that this will be an additional cell using parameter
`NovaAdditionalCell`.
.. note::
Compute hostnames need to be uniq across cells. Make sure to use
`ComputeHostnameFormat` to have uniq hostnames.
Create the network configuration for `cellcontroller` and add to environment file
_________________________________________________________________________________
Depending on the network configuration of the used hardware and network
architecture it is required to register a resource for the `CellController`
role.
.. code-block:: yaml
resource_registry:
OS::TripleO::CellController::Net::SoftwareConfig: single-nic-vlans/controller.yaml
OS::TripleO::Compute::Net::SoftwareConfig: single-nic-vlans/compute.yaml
.. note::
This example just reused the exiting network configs as it is a shared L2
network. For details on network configuration consult :ref:`network_isolation` guide,
chapter *Customizing the Interface Templates*.
Deploy the cell
^^^^^^^^^^^^^^^
.. _cell_create_flavor_and_tag:
Create new flavor used to tag the cell controller
_________________________________________________
Depending on the hardware create a flavor and tag the node to be used.
.. code-block:: bash
openstack flavor create --id auto --ram 4096 --disk 40 --vcpus 1 cellcontroller
openstack flavor set --property "cpu_arch"="x86_64" \
--property "capabilities:boot_option"="local" \
--property "capabilities:profile"="cellcontroller" \
--property "resources:CUSTOM_BAREMETAL=1" \
--property "resources:DISK_GB=0" \
--property "resources:MEMORY_MB=0" \
--property "resources:VCPU=0" \
cellcontroller
The properties need to be modified to the needs of the environment.
Tag node into the new flavor using the following command
.. code-block:: bash
baremetal node set --property \
capabilities='profile:cellcontroller,boot_option:local' <node id>
Verify the tagged cellcontroller:
.. code-block:: bash
openstack overcloud profiles list
Run cell deployment
___________________
To deploy the overcloud we can use the same `overcloud deploy` command as
it was used to deploy the `overcloud` stack and add the created export
environment files:
.. code-block:: bash
openstack overcloud deploy \
--templates /usr/share/openstack-tripleo-heat-templates \
-e ... additional environment files used for overcloud stack, like container
prepare parameters, or other specific parameters for the cell
...
    --stack cell1 \
    -r $HOME/$DIR/cell_roles_data.yaml \
    -e $HOME/$DIR/cell1-cell-input.yaml \
    -e $HOME/$DIR/cell1.yaml
Wait for the deployment to finish:
.. code-block:: bash
openstack stack list
+--------------------------------------+--------------+----------------------------------+-----------------+----------------------+----------------------+
| ID | Stack Name | Project | Stack Status | Creation Time | Updated Time |
+--------------------------------------+--------------+----------------------------------+-----------------+----------------------+----------------------+
| 890e4764-1606-4dab-9c2f-6ed853e3fed8 | cell1 | 2b303a97f4664a69ba2dbcfd723e76a4 | CREATE_COMPLETE | 2019-02-12T08:35:32Z | None |
| 09531653-1074-4568-b50a-48a7b3cc15a6 | overcloud | 2b303a97f4664a69ba2dbcfd723e76a4 | UPDATE_COMPLETE | 2019-02-09T09:52:56Z | 2019-02-11T08:33:37Z |
+--------------------------------------+--------------+----------------------------------+-----------------+----------------------+----------------------+
.. _cell_create_cell:
Create the cell and discover compute nodes (ansible playbook)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
An ansible role and playbook is available to automate the one time tasks
to create a cell after the deployment steps finished successfully. In
addition :ref:`cell_create_cell_manual` explains the tasks being automated
by this ansible way.
.. note::
When using multiple additional cells, don't place all inventories of the cells
in one directory. The current version of the `create-nova-cell-v2.yaml` playbook
uses `CellController[0]` to get the `database_connection` and `transport_url`
to create the new cell. When all cell inventories get added to the same directory
`CellController[0]` might not be the correct cell controller for the new cell.
.. code-block:: bash
export CONTAINERCLI=podman #choose appropriate container cli here
source stackrc
mkdir inventories
for i in overcloud cell1; do \
/usr/bin/tripleo-ansible-inventory \
--static-yaml-inventory inventories/${i}.yaml --stack ${i}; \
done
ANSIBLE_HOST_KEY_CHECKING=False ANSIBLE_SSH_RETRIES=3 ansible-playbook -i inventories \
/usr/share/ansible/tripleo-playbooks/create-nova-cell-v2.yaml \
-e tripleo_cellv2_cell_name=cell1 \
-e tripleo_cellv2_containercli=${CONTAINERCLI}
The playbook requires two parameters `tripleo_cellv2_cell_name` to provide
the name of the new cell and until docker got dropped `tripleo_cellv2_containercli`
to specify either if podman or docker is used.
.. _cell_create_cell_manual:
Create the cell and discover compute nodes (manual way)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following describes the manual needed steps to finalize the cell
deployment of a new cell. These are the steps automated in the ansible
playbook mentioned in :ref:`cell_create_cell`.
Get control plane and cell controller IPs:
.. code-block:: bash
CTRL_IP=$(openstack server list -f value -c Networks --name overcloud-controller-0 | sed 's/ctlplane=//')
CELL_CTRL_IP=$(openstack server list -f value -c Networks --name cell1-cellcontrol-0 | sed 's/ctlplane=//')
Add cell information to overcloud controllers
_____________________________________________
On all central controllers add information on how to reach the cell controller
endpoint (usually internalapi) to `/etc/hosts`, from the undercloud:
.. code-block:: bash
CELL_INTERNALAPI_INFO=$(ssh heat-admin@${CELL_CTRL_IP} egrep \
cell1.*\.internalapi /etc/hosts)
ansible -i /usr/bin/tripleo-ansible-inventory Controller -b \
-m lineinfile -a "dest=/etc/hosts line=\"$CELL_INTERNALAPI_INFO\""
.. note::
Do this outside the `HEAT_HOSTS_START` .. `HEAT_HOSTS_END` block, or
add it to an `ExtraHostFileEntries` section of an environment file for the
central overcloud controller. Add the environment file to the next
`overcloud deploy` run.
Extract transport_url and database connection
_____________________________________________
Get the `transport_url` and database `connection` endpoint information
from the cell controller. This information is used to create the cell in the
next step:
.. code-block:: bash
CELL_TRANSPORT_URL=$(ssh heat-admin@${CELL_CTRL_IP} sudo \
crudini --get /var/lib/config-data/nova/etc/nova/nova.conf DEFAULT transport_url)
CELL_MYSQL_VIP=$(ssh heat-admin@${CELL_CTRL_IP} sudo \
crudini --get /var/lib/config-data/nova/etc/nova/nova.conf database connection \
| awk -F[@/] '{print $4}'
Create the cell
_______________
Login to one of the central controllers create the cell with reference to
the IP of the cell controller in the `database_connection` and the
`transport_url` extracted from previous step, like:
.. code-block:: bash
ssh heat-admin@${CTRL_IP} sudo ${CONTAINERCLI} exec -i -u root nova_api \
nova-manage cell_v2 create_cell --name computecell1 \
--database_connection "{scheme}://{username}:{password}@$CELL_MYSQL_VIP/nova?{query}" \
--transport-url "$CELL_TRANSPORT_URL"
.. note::
Templated transport cells URLs could be used if the same amount of controllers
are in the default and add on cell. For further information about templated
URLs for cell mappings check: `Template URLs in Cell Mappings
<https://docs.openstack.org/nova/stein/user/cells.html#template-urls-in-cell-mappings>`_
.. code-block:: bash
ssh heat-admin@${CTRL_IP} sudo ${CONTAINERCLI} exec -i -u root nova_api \
nova-manage cell_v2 list_cells --verbose
After the cell got created the nova services on all central controllers need to
be restarted.
Docker:
.. code-block:: bash
ansible -i /usr/bin/tripleo-ansible-inventory Controller -b -a \
"docker restart nova_api nova_scheduler nova_conductor"
Podman:
.. code-block:: bash
ansible -i /usr/bin/tripleo-ansible-inventory Controller -b -a \
"systemctl restart tripleo_nova_api tripleo_nova_conductor tripleo_nova_scheduler"
We now see the cell controller services registered:
.. code-block:: bash
(overcloud) [stack@undercloud ~]$ nova service-list
Perform cell host discovery
___________________________
The final step is to discover the computes deployed in the cell. Run the host discovery
as explained in :ref:`cell_host_discovery`.
Create and add the node to an Availability Zone
_______________________________________________
After a cell got provisioned, it is required to create an availability zone for the
cell to make sure an instance created in the cell, stays in the cell when performing
a migration. Check :ref:`cell_availability_zone` on more about how to create an
availability zone and add the node.
After that the cell is deployed and can be used.
.. note::
Migrating instances between cells is not supported. To move an instance to
a different cell it needs to be re-created in the new target cell.

View File

@ -1,189 +0,0 @@
Managing the cell
-----------------
.. _cell_host_discovery:
Add a compute to a cell
~~~~~~~~~~~~~~~~~~~~~~~
To increase resource capacity of a running cell, you can start more servers of
a selected role. For more details on how to add nodes see :doc:`../post_deployment/scale_roles`.
After the node got deployed, login to one of the overcloud controllers and run
the cell host discovery:
.. code-block:: bash
CTRL=overcloud-controller-0
CTRL_IP=$(openstack server list -f value -c Networks --name $CTRL | sed 's/ctlplane=//')
# CONTAINERCLI can be either docker or podman
export CONTAINERCLI='docker'
# run cell host discovery
ssh heat-admin@${CTRL_IP} sudo ${CONTAINERCLI} exec -i -u root nova_api \
nova-manage cell_v2 discover_hosts --by-service --verbose
# verify the cell hosts
ssh heat-admin@${CTRL_IP} sudo ${CONTAINERCLI} exec -i -u root nova_api \
nova-manage cell_v2 list_hosts
# add new node to the availability zone
source overcloudrc
(overcloud) $ openstack aggregate add host <cell name> <compute host>
.. note::
Optionally the cell uuid can be specified to the `discover_hosts` and
`list_hosts` command to only target against a specific cell.
Delete a compute from a cell
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* As initial step migrate all instances off the compute.
* From one of the overcloud controllers, delete the computes from the cell:
.. code-block:: bash
source stackrc
CTRL=overcloud-controller-0
CTRL_IP=$(openstack server list -f value -c Networks --name $CTRL | sed 's/ctlplane=//')
# CONTAINERCLI can be either docker or podman
export CONTAINERCLI='docker'
# list the cell hosts
ssh heat-admin@${CTRL_IP} sudo ${CONTAINERCLI} exec -i -u root nova_api \
nova-manage cell_v2 list_hosts
# delete a node from a cell
ssh heat-admin@${CTRL_IP} sudo ${CONTAINERCLI} exec -i -u root nova_api \
nova-manage cell_v2 delete_host --cell_uuid <uuid> --host <compute>
* Delete the node from the cell stack
See :doc:`../post_deployment/delete_nodes`.
* Delete the resource providers from placement
This step is required as otherwise adding a compute node with the same hostname
will make it to fail to register and update the resources with the placement
service.:
.. code-block:: bash
sudo dnf install python3-osc-placement
source overcloudrc
openstack resource provider list
+--------------------------------------+---------------------------------------+------------+
| uuid | name | generation |
+--------------------------------------+---------------------------------------+------------+
| 9cd04a8b-5e6c-428e-a643-397c9bebcc16 | computecell1-novacompute-0.site1.test | 11 |
+--------------------------------------+---------------------------------------+------------+
openstack resource provider delete 9cd04a8b-5e6c-428e-a643-397c9bebcc16
Delete a cell
~~~~~~~~~~~~~
* As initial step delete all instances from the cell.
* From one of the overcloud controllers, delete all computes from the cell:
.. code-block:: bash
CTRL=overcloud-controller-0
CTRL_IP=$(openstack server list -f value -c Networks --name $CTRL | sed 's/ctlplane=//')
# CONTAINERCLI can be either docker or podman
export CONTAINERCLI='docker'
# list the cell hosts
ssh heat-admin@${CTRL_IP} sudo ${CONTAINERCLI} exec -i -u root nova_api \
nova-manage cell_v2 list_hosts
# delete a node from a cell
ssh heat-admin@${CTRL_IP} sudo ${CONTAINERCLI} exec -i -u root nova_api \
nova-manage cell_v2 delete_host --cell_uuid <uuid> --host <compute>
* On the cell controller delete all deleted instances from the database:
.. code-block:: bash
CELL_CTRL=cell1-cellcontrol-0
CELL_CTRL_IP=$(openstack server list -f value -c Networks --name $CELL_CTRL | sed 's/ctlplane=//')
# CONTAINERCLI can be either docker or podman
export CONTAINERCLI='docker'
ssh heat-admin@${CELL_CTRL_IP} sudo ${CONTAINERCLI} exec -i -u root nova_conductor \
nova-manage db archive_deleted_rows --until-complete --verbose
* From one of the overcloud controllers, delete the cell:
.. code-block:: bash
CTRL=overcloud-controller-0
CTRL_IP=$(openstack server list -f value -c Networks --name $CTRL | sed 's/ctlplane=//')
# CONTAINERCLI can be either docker or podman
export CONTAINERCLI='docker'
# list the cells
ssh heat-admin@${CTRL_IP} sudo ${CONTAINERCLI} exec -i -u root nova_api \
nova-manage cell_v2 list_cells
# delete the cell
ssh heat-admin@${CTRL_IP} sudo ${CONTAINERCLI} exec -i -u root nova_api \
nova-manage cell_v2 delete_cell --cell_uuid <uuid>
* Delete the cell stack:
.. code-block:: bash
openstack stack delete <stack name> --wait --yes && openstack overcloud plan delete <stack name>
.. note::
If the cell consist of a controller and compute stack, delete as a first step the
compute stack and then the controller stack.
* From a system which can reach the placement endpoint, delete the resource providers from placement
This step is required as otherwise adding a compute node with the same hostname
will make it to fail to register as a resource with the placement service.
.. code-block:: bash
sudo dnf install python3-osc-placement
source overcloudrc
openstack resource provider list
+--------------------------------------+---------------------------------------+------------+
| uuid | name | generation |
+--------------------------------------+---------------------------------------+------------+
| 9cd04a8b-5e6c-428e-a643-397c9bebcc16 | computecell1-novacompute-0.site1.test | 11 |
+--------------------------------------+---------------------------------------+------------+
openstack resource provider delete 9cd04a8b-5e6c-428e-a643-397c9bebcc16
Updating a cell
~~~~~~~~~~~~~~~
Each stack in a multi-stack cell deployment must be updated to perform a full minor
update across the entire deployment.
Cells can be updated just like the overcloud nodes following update procedure described
in :ref:`package_update` and using appropriate stack name for update commands.
The control plane and cell controller stack should be updated first by completing all
the steps from the minor update procedure.
Once the control plane stack is updated, re-run the export command to recreate the
required input files for each separate cell stack.
.. note::
Before re-running the export command, backup the previously used input file so that
the previous versions are not overwritten. In the event that a separate cell stack
needs a stack update operation performed prior to the minor update procedure, the
previous versions of the exported files should be used.

View File

@ -1,718 +0,0 @@
Example 3. - Advanced example using split cell controller/compute architecture and routed networks in Train release
===================================================================================================================
.. warning::
Multi cell support is only supported in Stein or later versions.
This guide addresses Train release and later!
.. contents::
:depth: 3
:backlinks: none
This guide assumes that you are ready to deploy a new overcloud, or have
already installed an overcloud (min Train release).
.. note::
Starting with CentOS 8 and the TripleO Stein release, podman is the CONTAINERCLI
to be used in the following steps.
In this example we use the :doc:`deploy_cellv2_advanced` using a routed spine and
leaf networking layout to deploy an additional cell. Not all nodes need
to be co-located at the same physical location or datacenter. See
:ref:`routed_spine_leaf_network` for more details.
The nodes deployed to the control plane, which are part of the overcloud stack,
use different networks then the cell stacks which are separated in a cell
controller stack and a cell compute stack. The cell controller and cell compute
stack use the same networks,
.. note::
In this example the routing for the different VLAN subnets is done by
the undercloud, which must _NOT_ be done in a production environment
as it is a single point of failure!
Used networks
^^^^^^^^^^^^^
The following provides and overview of the used networks and subnet
details for this example:
.. code-block:: yaml
InternalApi
internal_api_subnet
vlan: 20
net: 172.16.2.0/24
route: 172.17.2.0/24 gw: 172.16.2.254
internal_api_cell1
vlan: 21
net: 172.17.2.0/24
gateway: 172.17.2.254
Storage
storage_subnet
vlan: 30
net: 172.16.1.0/24
route: 172.17.1.0/24 gw: 172.16.1.254
storage_cell1
vlan: 31
net: 172.17.1.0/24
gateway: 172.17.1.254
StorageMgmt
storage_mgmt_subnet
vlan: 40
net: 172.16.3.0/24
route: 172.17.3.0/24 gw: 172.16.3.254
storage_mgmt_cell1
vlan: 41
net: 172.17.3.0/24
gateway: 172.17.3.254
Tenant
tenant_subnet
vlan: 50
net: 172.16.0.0/24
External
external_subnet
vlan: 10
net: 10.0.0.0/24
external_cell1
vlan: 11
net: 10.0.1.0/24
gateway: 10.0.1.254
Prepare control plane for cell network routing
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: bash
openstack overcloud status
+-----------+-------------------+
| Plan Name | Deployment Status |
+-----------+-------------------+
| overcloud | DEPLOY_SUCCESS |
+-----------+-------------------+
openstack server list -c Name -c Status -c Networks
+-------------------------+--------+------------------------+
| Name | Status | Networks |
+-------------------------+--------+------------------------+
| overcloud-controller-2 | ACTIVE | ctlplane=192.168.24.29 |
| overcloud-controller-0 | ACTIVE | ctlplane=192.168.24.18 |
| overcloud-controller-1 | ACTIVE | ctlplane=192.168.24.20 |
| overcloud-novacompute-0 | ACTIVE | ctlplane=192.168.24.16 |
+-------------------------+--------+------------------------+
Overcloud stack for the control planed deployed using a `routes.yaml`
environment file to add the routing information for the new cell
subnets.
.. code-block:: yaml
parameter_defaults:
InternalApiInterfaceRoutes:
- destination: 172.17.2.0/24
nexthop: 172.16.2.254
StorageInterfaceRoutes:
- destination: 172.17.1.0/24
nexthop: 172.16.1.254
StorageMgmtInterfaceRoutes:
- destination: 172.17.3.0/24
nexthop: 172.16.3.254
Reuse networks and adding cell subnets
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To prepare the `network_data` parameter file for the cell controller stack
the file from the control plane is used as base:
.. code-block:: bash
cp /usr/share/openstack-tripleo-heat-templates/network_data.yaml cell1/network_data-ctrl.yaml
When deploying a cell in separate stacks it may be necessary to reuse networks,
subnets, segments, and VIP resources between stacks. Only a single Heat stack
can own a resource and be responsible for its creation and deletion, however
the resources can be reused in other stacks.
To reuse network related resources between stacks, the following parameters have
been added to the network definitions in the network_data.yaml file format:
.. code-block:: yaml
external_resource_network_id: Existing Network UUID
external_resource_subnet_id: Existing Subnet UUID
external_resource_segment_id: Existing Segment UUID
external_resource_vip_id: Existing VIP UUID
.. note:
The cell controllers use virtual IPs, therefore the existing VIPs from the
central overcloud stack should not be referenced. In case cell controllers
and cell computes get split into separate stacks, the cell compute stack
network_data file need an external_resource_vip_id reference to the cell
controllers VIP resource.
These parameters can be set on each network definition in the `network_data-ctrl.yaml`
file used for the deployment of the separate stack.
Not all networks need to be reused or shared across stacks. The `external_resource_*`
parameters can be set for only the networks that are meant to be shared, while
the other networks can be newly created and managed.
In this example we reuse all networks, except the management network as it is
not being used at all.
The resulting storage network here looks like this:
.. code-block::
- name: Storage
  external_resource_network_id: 30e9d52d-1929-47ed-884b-7c6d65fa2e00
  external_resource_subnet_id: 11a3777a-8c42-4314-a47f-72c86e9e6ad4
  vip: true
  vlan: 30
  name_lower: storage
  ip_subnet: '172.16.1.0/24'
  allocation_pools: [{'start': '172.16.1.4', 'end': '172.16.1.250'}]
  ipv6_subnet: 'fd00:fd00:fd00:3000::/64'
  ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:3000::10', 'end': 'fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe'}]
  mtu: 1500
  subnets:
    storage_cell1:
      vlan: 31
      ip_subnet: '172.17.1.0/24'
      allocation_pools: [{'start': '172.17.1.10', 'end': '172.17.1.250'}]
      gateway_ip: '172.17.1.254'
We added the `external_resource_network_id` and `external_resource_subnet_id` of
the control plane stack as we want to reuse those resources:
.. code-block:: bash
openstack network show storage -c id -f value
openstack subnet show storage_subnet -c id -f value
In addition a new `storage_cell1` subnet is now added to the `subnets` section
to get it created in the cell controller stack for cell1:
.. code-block::
subnets:
storage_cell1:
vlan: 31
ip_subnet: '172.17.1.0/24'
allocation_pools: [{'start': '172.17.1.10', 'end': '172.17.1.250'}]
gateway_ip: '172.17.1.254'
.. note::
In this example no Management network is used, therefore it was removed.
Full networks data example:
.. code-block::
- name: Storage
external_resource_network_id: 30e9d52d-1929-47ed-884b-7c6d65fa2e00
  external_resource_subnet_id: 11a3777a-8c42-4314-a47f-72c86e9e6ad4
  vip: true
  vlan: 30
  name_lower: storage
  ip_subnet: '172.16.1.0/24'
  allocation_pools: [{'start': '172.16.1.4', 'end': '172.16.1.250'}]
  ipv6_subnet: 'fd00:fd00:fd00:3000::/64'
  ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:3000::10', 'end': 'fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe'}]
  mtu: 1500
  subnets:
    storage_cell1:
      vlan: 31
      ip_subnet: '172.17.1.0/24'
      allocation_pools: [{'start': '172.17.1.10', 'end': '172.17.1.250'}]
      gateway_ip: '172.17.1.254'
- name: StorageMgmt
  name_lower: storage_mgmt
  external_resource_network_id: 29e85314-2177-4cbd-aac8-6faf2a3f7031
  external_resource_subnet_id: 01c0a75e-e62f-445d-97ad-b98a141d6082
  vip: true
  vlan: 40
  ip_subnet: '172.16.3.0/24'
  allocation_pools: [{'start': '172.16.3.4', 'end': '172.16.3.250'}]
  ipv6_subnet: 'fd00:fd00:fd00:4000::/64'
  ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:4000::10', 'end': 'fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe'}]
  mtu: 1500
  subnets:
    storage_mgmt_cell1:
      vlan: 41
      ip_subnet: '172.17.3.0/24'
      allocation_pools: [{'start': '172.17.3.10', 'end': '172.17.3.250'}]
      gateway_ip: '172.17.3.254'
- name: InternalApi
  name_lower: internal_api
  external_resource_network_id: 5eb79743-7ff4-4f68-9904-6e9c36fbaaa6
  external_resource_subnet_id: dbc24086-0aa7-421d-857d-4e3956adec10
  vip: true
  vlan: 20
  ip_subnet: '172.16.2.0/24'
  allocation_pools: [{'start': '172.16.2.4', 'end': '172.16.2.250'}]
  ipv6_subnet: 'fd00:fd00:fd00:2000::/64'
  ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:2000::10', 'end': 'fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe'}]
  mtu: 1500
  subnets:
    internal_api_cell1:
      vlan: 21
      ip_subnet: '172.17.2.0/24'
      allocation_pools: [{'start': '172.17.2.10', 'end': '172.17.2.250'}]
      gateway_ip: '172.17.2.254'
- name: Tenant
  external_resource_network_id: ee83d0fb-3bf1-47f2-a02b-ef5dc277afae
  external_resource_subnet_id: 0b6030ae-8445-4480-ab17-dd4c7c8fa64b
  vip: false  # Tenant network does not use VIPs
  name_lower: tenant
  vlan: 50
  ip_subnet: '172.16.0.0/24'
  allocation_pools: [{'start': '172.16.0.4', 'end': '172.16.0.250'}]
  ipv6_subnet: 'fd00:fd00:fd00:5000::/64'
  ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:5000::10', 'end': 'fd00:fd00:fd00:5000:ffff:ffff:ffff:fffe'}]
  mtu: 1500
- name: External
  external_resource_network_id: 89b7b481-f609-45e7-ad5e-e006553c1d3a
  external_resource_subnet_id: dd84112d-2129-430c-a8c2-77d2dee05af2
  vip: true
  name_lower: external
  vlan: 10
  ip_subnet: '10.0.0.0/24'
  allocation_pools: [{'start': '10.0.0.4', 'end': '10.0.0.250'}]
  gateway_ip: '10.0.0.1'
  ipv6_subnet: '2001:db8:fd00:1000::/64'
  ipv6_allocation_pools: [{'start': '2001:db8:fd00:1000::10', 'end': '2001:db8:fd00:1000:ffff:ffff:ffff:fffe'}]
  gateway_ipv6: '2001:db8:fd00:1000::1'
  mtu: 1500
  subnets:
    external_cell1:
      vlan: 11
      ip_subnet: '10.0.1.0/24'
      allocation_pools: [{'start': '10.0.1.10', 'end': '10.0.1.250'}]
      gateway_ip: '10.0.1.254'
.. note:
When not sharing networks between stacks, each network defined in `network_data*.yaml`
must have a unique name across all deployed stacks. This requirement is necessary
since regardless of the stack, all networks are created in the same tenant in
Neutron on the undercloud.
Export EndpointMap, HostsEntry, AllNodesConfig, GlobalConfig and passwords information
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Follow the steps as explained in :ref:`cell_export_overcloud_info` on how to
export the required data from the overcloud stack.
Cell roles
^^^^^^^^^^
Modify the cell roles file to use new subnets for `InternalApi`, `Storage`,
`StorageMgmt` and `External` for cell controller and compute:
.. code-block:: bash
openstack overcloud roles generate --roles-path \
/usr/share/openstack-tripleo-heat-templates/roles \
-o $DIR/cell_roles_data.yaml Compute CellController
For each role modify the subnets to match what got defined in the previous step
in `cell1/network_data-ctrl.yaml`:
.. code-block::
- name: Compute
  description: |
    Basic Compute Node role
  CountDefault: 1
  # Create external Neutron bridge (unset if using ML2/OVS without DVR)
  tags:
    - external_bridge
  networks:
    InternalApi:
      subnet: internal_api_cell1
    Tenant:
      subnet: tenant_subnet
    Storage:
      subnet: storage_cell1
...
- name: CellController
  description: |
    CellController role for the nova cell_v2 controller services
  CountDefault: 1
  tags:
    - primary
    - controller
  networks:
    External:
      subnet: external_cell1
    InternalApi:
      subnet: internal_api_cell1
    Storage:
      subnet: storage_cell1
    StorageMgmt:
      subnet: storage_mgmt_cell1
    Tenant:
      subnet: tenant_subnet
Create the cell parameter file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Each cell has some mandatory parameters which need to be set using an
environment file.
Add the following content into a parameter file for the cell, e.g. `cell1/cell1.yaml`:
.. code-block:: yaml
parameter_defaults:
# new CELL Parameter to reflect that this is an additional CELL
NovaAdditionalCell: True
# The DNS names for the VIPs for the cell
CloudName: cell1.ooo.test
CloudNameInternal: cell1.internalapi.ooo.test
CloudNameStorage: cell1.storage.ooo.test
CloudNameStorageManagement: cell1.storagemgmt.ooo.test
CloudNameCtlplane: cell1.ctlplane.ooo.test
# Flavors used for the cell controller and computes
OvercloudCellControllerFlavor: cellcontroller
OvercloudComputeFlavor: compute
# number of controllers/computes in the cell
CellControllerCount: 3
ComputeCount: 0
# Compute names need to be unique, make sure to have a unique
# hostname format for cell nodes
ComputeHostnameFormat: 'cell1-compute-%index%'
# default gateway
ControlPlaneStaticRoutes:
- ip_netmask: 0.0.0.0/0
next_hop: 192.168.24.1
default: true
DnsServers:
- x.x.x.x
Virtual IP addresses
^^^^^^^^^^^^^^^^^^^^
The cell controller is hosting VIPs (Virtual IP addresses) and is not using
the base subnet of one or more networks, therefore additional overrides to the
`VipSubnetMap` are required to ensure VIPs are created on the subnet associated
with the L2 network segment the controller nodes is connected to.
Add a `VipSubnetMap` to the `cell1/cell1.yaml` or a new parameter file to
point the VIPs to the correct subnet:
.. code-block:: yaml
parameter_defaults:
VipSubnetMap:
InternalApi: internal_api_cell1
Storage: storage_cell1
StorageMgmt: storage_mgmt_cell1
External: external_cell1
Create the network configuration for `cellcontroller` and add to environment file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Depending on the network configuration of the used hardware and network
architecture it is required to register a resource for the `CellController`
role in `cell1/cell1.yaml`.
.. code-block:: yaml
resource_registry:
OS::TripleO::CellController::Net::SoftwareConfig: cell1/single-nic-vlans/controller.yaml
OS::TripleO::Compute::Net::SoftwareConfig: cell1/single-nic-vlans/compute.yaml
.. note::
For details on network configuration consult :ref:`network_isolation` guide, chapter *Customizing the Interface Templates*.
Deploy the cell controllers
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Create new flavor used to tag the cell controller
_________________________________________________
Follow the instructions in :ref:`cell_create_flavor_and_tag` on how to create
a new flavor and tag the cell controller.
Run cell deployment
___________________
To deploy the overcloud we can use the same `overcloud deploy` command as
it was used to deploy the `overcloud` stack and add the created export
environment files:
.. code-block:: bash
openstack overcloud deploy \
--templates /usr/share/openstack-tripleo-heat-templates \
-e ... additional environment files used for overcloud stack, like container
prepare parameters, or other specific parameters for the cell
...
--stack cell1-ctrl \
  -n $HOME/$DIR/network_data-ctrl.yaml \
  -r $HOME/$DIR/cell_roles_data.yaml \
-e $HOME/$DIR/cell1-ctrl-input.yaml \
-e $HOME/$DIR/cell1.yaml
Wait for the deployment to finish:
.. code-block:: bash
openstack stack list
+--------------------------------------+------------+----------------------------------+-----------------+----------------------+----------------------+
| ID | Stack Name | Project | Stack Status | Creation Time | Updated Time |
+--------------------------------------+------------+----------------------------------+-----------------+----------------------+----------------------+
| 6403ed94-7c8f-47eb-bdb8-388a5ac7cb20 | cell1-ctrl | f7736589861c47d8bbf1ecd29f02823d | CREATE_COMPLETE | 2019-08-15T14:46:32Z | None |
| 925a2875-fbbb-41fd-bb06-bf19cded2510 | overcloud | f7736589861c47d8bbf1ecd29f02823d | UPDATE_COMPLETE | 2019-08-13T10:43:20Z | 2019-08-15T10:13:41Z |
+--------------------------------------+------------+----------------------------------+-----------------+----------------------+----------------------+
Create the cell
^^^^^^^^^^^^^^^
As in :ref:`cell_create_cell` create the cell, but we can skip the final host
discovery step as the computes are note yet deployed.
Extract deployment information from the cell controller stack
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Follow the steps explained in :ref:`cell_export_cell_controller_info` on
how to export the required input data from the cell controller stack.
Create cell compute parameter file for additional customization
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Create the `cell1/cell1-cmp.yaml` parameter file to overwrite settings
which are different from the cell controller stack.
.. code-block:: yaml
parameter_defaults:
# number of controllers/computes in the cell
CellControllerCount: 0
ComputeCount: 1
The above file overwrites the values from `cell1/cell1.yaml` to not deploy
a controller in the cell compute stack. Since the cell compute stack uses
the same role file the default `CellControllerCount` is 1.
Reusing networks from control plane and cell controller stack
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For the cell compute stack we reuse the networks from the control plane
stack and the subnet from the cell controller stack. Therefore references
to the external resources for network, subnet, segment and vip are required:
.. code-block:: bash
cp cell1/network_data-ctrl.yaml cell1/network_data-cmp.yaml
The storage network definition in `cell1/network_data-cmp.yaml` looks
like this:
.. code-block::
- name: Storage
  external_resource_network_id: 30e9d52d-1929-47ed-884b-7c6d65fa2e00
  external_resource_subnet_id: 11a3777a-8c42-4314-a47f-72c86e9e6ad4
  external_resource_vip_id: 4ed73ea9-4cf6-42c1-96a5-e32b415c738f
  vip: true
  vlan: 30
  name_lower: storage
  ip_subnet: '172.16.1.0/24'
  allocation_pools: [{'start': '172.16.1.4', 'end': '172.16.1.250'}]
  ipv6_subnet: 'fd00:fd00:fd00:3000::/64'
  ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:3000::10', 'end': 'fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe'}]
  mtu: 1500
  subnets:
    storage_cell1:
      vlan: 31
      ip_subnet: '172.17.1.0/24'
      allocation_pools: [{'start': '172.17.1.10', 'end': '172.17.1.250'}]
      gateway_ip: '172.17.1.254'
      external_resource_subnet_id: 7930635d-d1d5-4699-b318-00233c73ed6b
      external_resource_segment_id: 730769f8-e78f-42a3-9dd4-367a212e49ff
Previously we already added the `external_resource_network_id` and `external_resource_subnet_id`
for the network in the upper level hierarchy.
In addition we add the `external_resource_vip_id` of the VIP of the stack which
should be reused for this network (Storage).
Important is that the `external_resource_vip_id` for the InternalApi points
the VIP of the cell controller stack!
.. code-block:: bash
openstack port show <id storage_virtual_ip overcloud stack> -c id -f value
In the `storage_cell1` subnet section we add the `external_resource_subnet_id`
and `external_resource_segment_id` of the cell controller stack:
.. code-block:: yaml
storage_cell1:
vlan: 31
ip_subnet: '172.17.1.0/24'
allocation_pools: [{'start': '172.17.1.10', 'end': '172.17.1.250'}]
gateway_ip: '172.17.1.254'
external_resource_subnet_id: 7930635d-d1d5-4699-b318-00233c73ed6b
external_resource_segment_id: 730769f8-e78f-42a3-9dd4-367a212e49ff
.. code-block:: bash
openstack subnet show storage_cell1 -c id -f value
openstack network segment show storage_storage_cell1 -c id -f value
Full networks data example for the compute stack:
.. code-block::
- name: Storage
  external_resource_network_id: 30e9d52d-1929-47ed-884b-7c6d65fa2e00
  external_resource_subnet_id: 11a3777a-8c42-4314-a47f-72c86e9e6ad4
  external_resource_vip_id: 4ed73ea9-4cf6-42c1-96a5-e32b415c738f
  vip: true
  vlan: 30
  name_lower: storage
  ip_subnet: '172.16.1.0/24'
  allocation_pools: [{'start': '172.16.1.4', 'end': '172.16.1.250'}]
  ipv6_subnet: 'fd00:fd00:fd00:3000::/64'
  ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:3000::10', 'end': 'fd00:fd00:fd00:3000:ffff:ffff:ffff:fffe'}]
  mtu: 1500
  subnets:
    storage_cell1:
      vlan: 31
      ip_subnet: '172.17.1.0/24'
      allocation_pools: [{'start': '172.17.1.10', 'end': '172.17.1.250'}]
      gateway_ip: '172.17.1.254'
      external_resource_subnet_id: 7930635d-d1d5-4699-b318-00233c73ed6b
      external_resource_segment_id: 730769f8-e78f-42a3-9dd4-367a212e49ff
- name: StorageMgmt
  name_lower: storage_mgmt
  external_resource_network_id: 29e85314-2177-4cbd-aac8-6faf2a3f7031
  external_resource_subnet_id: 01c0a75e-e62f-445d-97ad-b98a141d6082
  external_resource_segment_id: 4b4f6f83-f031-4495-84c5-7422db1729d5
  vip: true
  vlan: 40
  ip_subnet: '172.16.3.0/24'
  allocation_pools: [{'start': '172.16.3.4', 'end': '172.16.3.250'}]
  ipv6_subnet: 'fd00:fd00:fd00:4000::/64'
  ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:4000::10', 'end': 'fd00:fd00:fd00:4000:ffff:ffff:ffff:fffe'}]
  mtu: 1500
  subnets:
    storage_mgmt_cell1:
      vlan: 41
      ip_subnet: '172.17.3.0/24'
      allocation_pools: [{'start': '172.17.3.10', 'end': '172.17.3.250'}]
      gateway_ip: '172.17.3.254'
      external_resource_subnet_id: de9233d4-53a3-485d-8433-995a9057383f
      external_resource_segment_id: 2400718d-7fbd-4227-8318-245747495241
- name: InternalApi
  name_lower: internal_api
  external_resource_network_id: 5eb79743-7ff4-4f68-9904-6e9c36fbaaa6
  external_resource_subnet_id: dbc24086-0aa7-421d-857d-4e3956adec10
  external_resource_vip_id: 1a287ad7-e574-483a-8288-e7c385ee88a0
  vip: true
  vlan: 20
  ip_subnet: '172.16.2.0/24'
  allocation_pools: [{'start': '172.16.2.4', 'end': '172.16.2.250'}]
  ipv6_subnet: 'fd00:fd00:fd00:2000::/64'
  ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:2000::10', 'end': 'fd00:fd00:fd00:2000:ffff:ffff:ffff:fffe'}]
  mtu: 1500
  subnets:
    internal_api_cell1:
      external_resource_subnet_id: 16b8cf48-6ca1-4117-ad90-3273396cb41d
      external_resource_segment_id: b310daec-7811-46be-a958-a05a5b0569ef
      vlan: 21
      ip_subnet: '172.17.2.0/24'
      allocation_pools: [{'start': '172.17.2.10', 'end': '172.17.2.250'}]
      gateway_ip: '172.17.2.254'
- name: Tenant
  external_resource_network_id: ee83d0fb-3bf1-47f2-a02b-ef5dc277afae
  external_resource_subnet_id: 0b6030ae-8445-4480-ab17-dd4c7c8fa64b
  vip: false  # Tenant network does not use VIPs
  name_lower: tenant
  vlan: 50
  ip_subnet: '172.16.0.0/24'
  allocation_pools: [{'start': '172.16.0.4', 'end': '172.16.0.250'}]
  ipv6_subnet: 'fd00:fd00:fd00:5000::/64'
  ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:5000::10', 'end': 'fd00:fd00:fd00:5000:ffff:ffff:ffff:fffe'}]
  mtu: 1500
- name: External
  external_resource_network_id: 89b7b481-f609-45e7-ad5e-e006553c1d3a
  external_resource_subnet_id: dd84112d-2129-430c-a8c2-77d2dee05af2
  external_resource_vip_id: b7a0606d-f598-4dc6-9e85-e023c64fd20b
  vip: true
  name_lower: external
  vlan: 10
  ip_subnet: '10.0.0.0/24'
  allocation_pools: [{'start': '10.0.0.4', 'end': '10.0.0.250'}]
  gateway_ip: '10.0.0.1'
  ipv6_subnet: '2001:db8:fd00:1000::/64'
  ipv6_allocation_pools: [{'start': '2001:db8:fd00:1000::10', 'end': '2001:db8:fd00:1000:ffff:ffff:ffff:fffe'}]
  gateway_ipv6: '2001:db8:fd00:1000::1'
  mtu: 1500
  subnets:
    external_cell1:
      vlan: 11
      ip_subnet: '10.0.1.0/24'
      allocation_pools: [{'start': '10.0.1.10', 'end': '10.0.1.250'}]
      gateway_ip: '10.0.1.254'
      external_resource_subnet_id: 81ac9bc2-4fbe-40be-ac0e-9aa425799626
      external_resource_segment_id: 8a877c1f-cb47-40dd-a906-6731f042e544
Deploy the cell computes
^^^^^^^^^^^^^^^^^^^^^^^^
Run cell deployment
___________________
To deploy the overcloud we can use the same `overcloud deploy` command as
it was used to deploy the `cell1-ctrl` stack and add the created export
environment files:
.. code-block:: bash
openstack overcloud deploy \
--templates /usr/share/openstack-tripleo-heat-templates \
-e ... additional environment files used for overcloud stack, like container
prepare parameters, or other specific parameters for the cell
...
--stack cell1-cmp \
-r $HOME/$DIR/cell_roles_data.yaml \
-n $HOME/$DIR/network_data-cmp.yaml \
-e $HOME/$DIR/cell1-ctrl-input.yaml \
-e $HOME/$DIR/cell1-cmp-input.yaml \
-e $HOME/$DIR/cell1.yaml \
-e $HOME/$DIR/cell1-cmp.yaml
Wait for the deployment to finish:
.. code-block:: bash
openstack stack list
+--------------------------------------+------------+----------------------------------+--------------------+----------------------+----------------------+
| ID | Stack Name | Project | Stack Status | Creation Time | Updated Time |
+--------------------------------------+------------+----------------------------------+--------------------+----------------------+----------------------+
| 12e86ea6-3725-482a-9b05-b283378dcf30 | cell1-cmp | f7736589861c47d8bbf1ecd29f02823d | CREATE_COMPLETE | 2019-08-15T15:57:19Z | None |
| 6403ed94-7c8f-47eb-bdb8-388a5ac7cb20 | cell1-ctrl | f7736589861c47d8bbf1ecd29f02823d | CREATE_COMPLETE | 2019-08-15T14:46:32Z | None |
| 925a2875-fbbb-41fd-bb06-bf19cded2510 | overcloud | f7736589861c47d8bbf1ecd29f02823d | UPDATE_COMPLETE | 2019-08-13T10:43:20Z | 2019-08-15T10:13:41Z |
+--------------------------------------+------------+----------------------------------+--------------------+----------------------+----------------------+
Perform cell host discovery
___________________________
The final step is to discover the computes deployed in the cell. Run the host discovery
as explained in :ref:`cell_host_discovery`.
Create and add the node to an Availability Zone
_______________________________________________
After a cell got provisioned, it is required to create an availability zone for the
compute stack, it is not enough to just create an availability zone for the complete
cell. In this used case we want to make sure an instance created in the compute group,
stays in it when performing a migration. Check :ref:`cell_availability_zone` on more
about how to create an availability zone and add the node.
After that the cell is deployed and can be used.
.. note::
Migrating instances between cells is not supported. To move an instance to
a different cell it needs to be re-created in the new target cell.

View File

@ -1,349 +0,0 @@
Deploying Manila in the Overcloud
=================================
This guide assumes that your undercloud is already installed and ready to
deploy an overcloud with Manila enabled.
Deploying the Overcloud with the Internal Ceph Backend
------------------------------------------------------
Ceph deployed by TripleO can be used as a Manila share backend. Make sure that
Ceph, Ceph MDS and Manila Ceph environment files are included when deploying the
Overcloud::
openstack overcloud deploy --templates \
-e /usr/share/openstack-tripleo-heat-templates/environments/cephadm/cephadm.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/cephadm/ceph-mds.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/manila-cephfsnative-config.yaml
.. note::
These and any other environment files or options passed to the overcloud
deploy command, are referenced below as the "full environment". We assumed
the ``--plan`` flag is not what we want to use for this example.
Network Isolation
~~~~~~~~~~~~~~~~~
When mounting a ceph share from a user instance, the user instance needs access
to the Ceph public network. When mounting a ceph share from a user instance,
the user instance needs access to the Ceph public network, which in TripleO
maps to the Overcloud storage network. In an Overcloud which uses isolated
networks the tenant network and storage network are isolated from one another
so user instances cannot reach the Ceph public network unless the cloud
administrator creates a provider network in neutron that maps to the storage
network and exposes access to it.
Before deploying Overcloud make sure that there is a bridge for storage network
interface. If single NIC with VLANs network configuration is used (as in
``/usr/share/openstack-tripleo-heat-templates/network/config/single-nic-vlans/``)
then by default ``br-ex`` bridge is used for storage network and no additional
customization is required for Overcloud deployment. If a dedicated interface is
used for storage network (as in
``/usr/share/openstack-tripleo-heat-templates/network/config/multiple-nics/``)
then update storage interface for each node type (controller, compute, ceph) to
use bridge. The following interface definition::
- type: interface
name: nic2
use_dhcp: false
addresses:
- ip_netmask:
get_param: StorageIpSubnet
should be replaced with::
- type: ovs_bridge
name: br-storage
use_dhcp: false
addresses:
- ip_netmask:
get_param: StorageIpSubnet
members:
- type: interface
name: nic2
use_dhcp: false
primary: true
And pass following parameters when deploying Overcloud to allow Neutron to map
provider networks to the storage bridge::
parameter_defaults:
NeutronBridgeMappings: datacentre:br-ex,storage:br-storage
NeutronFlatNetworks: datacentre,storage
If the storage network uses VLAN, include storage network in
``NeutronNetworkVLANRanges`` parameter. For example::
NeutronNetworkVLANRanges: 'datacentre:100:1000,storage:30:30'
.. warning::
If network isolation is used, make sure that storage provider network
subnet doesn't overlap with IP allocation pool used for Overcloud storage
nodes (controlled by ``StorageAllocationPools`` heat parameter).
``StorageAllocationPools`` is by default set to
``[{'start': '172.16.1.4', 'end': '172.16.1.250'}]``. It may be necessary
to shrink this pool, for example::
StorageAllocationPools: [{'start': '172.16.1.4', 'end': '172.16.1.99'}]
When Overcloud is deployed, create a provider network which can be used to
access storage network.
* If single NIC with VLANs is used, then the provider network is mapped
to the default datacentre network::
neutron net-create storage --shared --provider:physical_network \
datacentre --provider:network_type vlan --provider:segmentation_id 30
neutron subnet-create --name storage-subnet \
--allocation-pool start=172.16.1.100,end=172.16.1.120 \
--enable-dhcp storage 172.16.1.0/24
* If a custom bridge was used for storage network interface (``br-storage`` in
the example above) then provider network is mapped to the network specified
by ``NeutronBridgeMappings`` parameter (``storage`` network in the example
above)::
neutron net-create storage --shared --provider:physical_network storage \
--provider:network_type flat
neutron subnet-create --name storage-subnet \
--allocation-pool start=172.16.1.200,end=172.16.1.220 --enable-dhcp \
storage 172.16.1.0/24 --no-gateway
.. note::
Allocation pool should not overlap with storage network
pool used for storage nodes (``StorageAllocationPools`` parameter).
You may also need to shrink storage nodes pool size to reserve more IPs
for tenants using the provider network.
.. note::
Make sure that subnet CIDR matches storage network CIDR (``StorageNetCidr``
parameter)and
segmentation_id matches VLAN ID for the storage network traffic
(``StorageNetworkVlanID`` parameter).
Then Ceph shares can be accessed from a user instance by adding the provider
network to the instance.
.. note::
Cloud-init by default configures only first network interface to use DHCP
which means that user instances will not have network interface for storage
network autoconfigured. You can configure it manually or use
`dhcp-all-interfaces <https://docs.openstack.org/diskimage-builder/elements/dhcp-all-interfaces/README.html>`_.
Deploying Manila in the overcloud with CephFS through NFS and a composable network
----------------------------------------------------------------------------------
The CephFS through NFS back end is composed of Ceph metadata servers (MDS),
NFS Ganesha (the NFS gateway), and the Ceph cluster service components.
The manila CephFS NFS driver uses NFS-Ganesha gateway to provide NFSv4 protocol
access to CephFS shares.
The Ceph MDS service maps the directories and file names of the file system
to objects that are stored in RADOS clusters.
The NFS-Ganesha service runs on the Controller nodes with the Ceph services.
CephFS with NFS-Ganesha deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CephFS through NFS deployments use an extra isolated network, StorageNFS.
This network is deployed so users can mount shares over NFS on that network
without accessing the Storage or Storage Management networks which are
reserved for infrastructure traffic.
The ControllerStorageNFS custom role configures the isolated StorageNFS network.
This role is similar to the default `Controller.yaml` role file with the addition
of the StorageNFS network and the CephNfs service, indicated by the `OS::TripleO::Services:CephNfs`
service.
#. To create the StorageNFSController role, used later in the process by the
overcloud deploy command, run::
openstack overcloud roles generate --roles-path /usr/share/openstack-tripleo-heat-templates/roles \
-o /home/stack/roles_data.yaml ControllerStorageNfs Compute CephStorage
#. Run the overcloud deploy command including the new generated `roles_data.yaml`
and the `network_data_ganesha.yaml` file that will trigger the generation of
this new network. The final overcloud command must look like the following::
openstack overcloud deploy \
--templates /usr/share/openstack-tripleo-heat-templates \
-n /usr/share/openstack-tripleo-heat-templates/network_data_ganesha.yaml \
-r /home/stack/roles_data.yaml \
-e /home/stack/containers-default-parameters.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml \
-e /home/stack/network-environment.yaml \
-e/usr/share/openstack-tripleo-heat-templates/environments/cephadm/cephadm.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/cephadm/ceph-mds.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/manila-cephfsganesha-config.yaml
.. note::
The network_data_ganesha.yaml file contains an additional section that defines
the isolated StorageNFS network. Although the default settings work for most
installations, you must edit the YAML file to add your network settings,
including the VLAN ID, subnet, and other settings::
name: StorageNFS
enabled: true
vip: true
name_lower: storage_nfs
vlan: 70
ip_subnet: '172.16.4.0/24'
allocation_pools: [{'start': '172.16.4.4', 'end': '172.16.4.149'}]
ipv6_subnet: 'fd00:fd00:fd00:7000::/64'
ipv6_allocation_pools: [{'start': 'fd00:fd00:fd00:7000::10', 'end': 'fd00:fd00:fd00:7000:ffff:ffff:ffff:fffe'}]
Configure the StorageNFS network
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
After the overcloud deployment is over, create a corresponding `StorageNFSSubnet` on
the neutron-shared provider network.
The subnet is the same as the storage_nfs network definition in the `network_data_ganesha.yml`
and ensure that the allocation range for the StorageNFS subnet and the corresponding
undercloud subnet do not overlap.
.. note::
No gateway is required because the StorageNFS subnet is dedicated to serving NFS shares
In order to create the storage_nfs subnet, run::
openstack subnet create --allocation-pool start=172.16.4.150,end=172.16.4.250 \
--dhcp --network StorageNFS --subnet-range 172.16.4.0/24 \
--gateway none StorageNFSSubnet
#. Replace the `start=172.16.4.150,end=172.16.4.250` IP values with the IP
values for your network.
#. Replace the `172.16.4.0/24` subnet range with the subnet range for your
network.
Deploying the Overcloud with an External Backend
------------------------------------------------
.. note::
The :doc:`../deployment/template_deploy` doc has a more detailed explanation of the
following steps.
#. Copy the Manila driver-specific configuration file to your home directory:
- Dell-EMC Isilon driver::
sudo cp /usr/share/openstack-tripleo-heat-templates/environments/manila-isilon-config.yaml ~
- Dell-EMC Unity driver::
sudo cp /usr/share/openstack-tripleo-heat-templates/environments/manila-unity-config.yaml ~
- Dell-EMC Vmax driver::
sudo cp /usr/share/openstack-tripleo-heat-templates/environments/manila-vmax-config.yaml ~
- Dell-EMC VNX driver::
sudo cp /usr/share/openstack-tripleo-heat-templates/environments/manila-vnx-config.yaml ~
- NetApp driver::
sudo cp /usr/share/openstack-tripleo-heat-templates/environments/manila-netapp-config.yaml ~
#. Edit the permissions (user is typically ``stack``)::
sudo chown $USER ~/manila-*-config.yaml
sudo chmod 755 ~/manila-*-config.yaml
#. Edit the parameters in this file to fit your requirements.
- Fill in or override the values of parameters for your back end.
- Since you have copied the file out of its original location,
replace relative paths in the resource_registry with absolute paths
based on ``/usr/share/openstack-tripleo-heat-templates``.
#. Continue following the TripleO instructions for deploying an overcloud.
Before entering the command to deploy the overcloud, add the environment
file that you just configured as an argument. For example::
openstack overcloud deploy --templates \
-e <full environment> -e ~/manila-[isilon or unity or vmax or vnx or netapp]-config.yaml
#. Wait for the completion of the overcloud deployment process.
Creating the Share
------------------
.. note::
The following steps will refer to running commands as an admin user or a
tenant user. Sourcing the ``overcloudrc`` file will authenticate you as
the admin user. You can then create a tenant user and use environment
files to switch between them.
#. Create a share network to host the shares:
- Create the overcloud networks. The :doc:`../deployment/install_overcloud`
doc has a more detailed explanation about creating the network
and subnet. Note that you may also need to perform the following
steps to get Manila working::
neutron router-create router1
neutron router-interface-add router1 [subnet id]
- List the networks and subnets [tenant]::
neutron net-list && neutron subnet-list
- Create a share network (typically using the private default-net net/subnet)
[tenant]::
manila share-network-create --neutron-net-id [net] --neutron-subnet-id [subnet]
#. Create a new share type (yes/no is for specifying if the driver handles
share servers) [admin]::
manila type-create [name] [yes/no]
#. Create the share [tenant]::
manila create --share-network [share net ID] --share-type [type name] [nfs/cifs] [size of share]
Accessing the Share
-------------------
#. To access the share, create a new VM on the same Neutron network that was
used to create the share network::
nova boot --image [image ID] --flavor [flavor ID] --nic net-id=[network ID] [name]
#. Allow access to the VM you just created::
manila access-allow [share ID] ip [IP address of VM]
#. Run ``manila list`` and ensure that the share is available.
#. Log into the VM::
ssh [user]@[IP]
.. note::
You may need to configure Neutron security rules to access the
VM. That is not in the scope of this document, so it will not be covered
here.
5. In the VM, execute::
sudo mount [export location] [folder to mount to]
6. Ensure the share is mounted by looking at the bottom of the output of the
``mount`` command.
7. That's it - you're ready to start using Manila!

View File

@ -1,116 +0,0 @@
Deploy and Scale Swift in the Overcloud
=======================================
This guide assumes that you are ready to deploy a new overcloud. To ensure
that Swift nodes are all using the same Ring, some manual steps are required.
Initial Deploy
--------------
To correctly deploy Swift, we need to manually manage the Swift Rings. This
can be achieved by disabling the Ring building process in TripleO by setting
the ``SwiftRingBuild`` and ``RingBuild`` parameters both to ``false``. For
example::
parameter_defaults:
SwiftRingBuild: false
RingBuild: false
.. note::
If this is saved in a file named ``deploy-parameters.yaml`` then it can
be deployed with ``openstack overcloud deploy --templates -e
deploy-parameters.yaml``.
After the deploy is completed, you will need to ssh onto the overcloud node as
the ``heat-admin`` user and switch to the root user with ``sudo -i``. The IP
addresses is available in the output of ``openstack server list``. Once
connected, in the ``/etc/swift/`` directory follow the instructions in the
`Swift documentation <http://docs.openstack.org/mitaka/install-guide-rdo
/swift-initial-rings.html>`_ to create the Rings.
After this is completed you will need to copy the ``/etc/swift/*.ring.gz`` and
``/etc/swift/*.builder`` files from the controller to all other controllers and
Swift storage nodes. These files will also be used when adding additional Swift
nodes. You should have six files::
/etc/swift/account.builder
/etc/swift/account.ring.gz
/etc/swift/container.builder
/etc/swift/container.ring.gz
/etc/swift/object.builder
/etc/swift/object.ring.gz
.. note::
These files will be updated each time a new node is added with
swift-ring-builder.
Scaling Swift
-------------
TripleO doesn't currently automatically update and scale Swift Rings. This
needs to be done manually, with similar steps to the above initial
deployment. First we need to define how many dedicated Swift nodes we want to
deploy with the ``ObjectStorageCount`` parameter. In this example we are
adding two Swift nodes::
parameter_defaults:
SwiftRingBuild: false
RingBuild: false
ObjectStorageCount: 2
After we have deployed again with this new environment we will have two Swift
nodes that need to be added to the ring we created during the initial
deployment. Follow the instructions on `Managing the Rings
<https://docs.openstack.org/swift/admin_guide.html#managing-the-rings>`_
to add the new devices to the rings and copy the new rings to *all* nodes in
the Swift cluster.
.. note::
Also read the section on `Scripting ring creation
<https://docs.openstack.org/swift/admin_guide.html#scripting-ring-creation>`_
to automate this process of scaling the Swift cluster.
Viewing the Ring
----------------
The swift ring can be viewed on each node with the ``swift-ring-builder``
command. It can be executed against all of the ``*.builder`` files. Its
output will display all the nodes in the Ring like this::
$ swift-ring-builder /etc/swift/object.builder
/etc/swift/object.builder, build version 4
1024 partitions, 3.000000 replicas, 1 regions, 1 zones, 3 devices, 0.00 balance, 0.00 dispersion
The minimum number of hours before a partition can be reassigned is 1
The overload factor is 0.00% (0.000000)
Devices: id region zone ip address port replication ip replication port name weight partitions balance meta
0 1 1 192.168.24.22 6000 192.168.24.22 6000 d1 100.00 1024 0.00
1 1 1 192.168.24.24 6000 192.168.24.24 6000 d1 100.00 1024 0.00
2 1 1 192.168.24.6 6000 192.168.24.6 6000 d1 100.00 1024 0.00
Ring configuration be verified by checking the hash of the ``*.ring.gz``
files. It should be the same on all nodes in the ring.::
$ sha1sum /etc/swift/*.ring.gz
d41c1b4f93a98a693a6ede074a1b78585af2dc89 /etc/swift/account.ring.gz
1d10d8cb826308a058c7089fdedfeca122426da9 /etc/swift/container.ring.gz
f26639938660ee0111e4e7bc1b45f28a0b9f6079 /etc/swift/object.ring.gz
You can also check this by using the ``swift-recon`` command on one of the
overcloud nodes. It will query all other servers and compare all checksums and
a summary like this::
[root@overcloud-controller-0 ~]# swift-recon --md5
===============================================================================
--> Starting reconnaissance on 3 hosts (object)
===============================================================================
[2016-10-14 12:37:11] Checking ring md5sums
3/3 hosts matched, 0 error[s] while checking hosts.
===============================================================================
[2016-10-14 12:37:11] Checking swift.conf md5sum
3/3 hosts matched, 0 error[s] while checking hosts.
===============================================================================

File diff suppressed because it is too large Load Diff

View File

@ -1,697 +0,0 @@
.. _deployed_server:
Using Already Deployed Servers
==============================
TripleO can be used with servers that have already been deployed and
provisioned with a running operating system.
In this deployment scenario, Ironic from the Undercloud is not used
to do any server deployment, installation, or power management. An external to
TripleO and already existing provisioning tool is expected to have already
installed an operating system on the servers that are intended to be used as
nodes in the Overcloud.
Additionally, Neutron can be optionally used or not.
.. note::
It's an all or nothing approach when using already deployed servers. Mixing
using deployed servers with servers provisioned with Nova and Ironic is not
currently possible.
Benefits to using this feature include not requiring a dedicated provisioning
network, and being able to use a custom partitioning scheme on the already
deployed servers.
Deployed Server Requirements
----------------------------
Networking
^^^^^^^^^^
Network interfaces
__________________
It's recommended that each server have a dedicated management NIC with
externally configured connectivity so that the servers are reachable outside of
any networking configuration done by the OpenStack deployment.
A separate interface, or set of interfaces should then be used for the
OpenStack deployment itself, configured in the typical fashion with a set of
NIC config templates during the Overcloud deployment. See
:doc:`../features/network_isolation` for more information on configuring networking.
.. note::
When configuring network isolation be sure that the configuration does not
result in a loss of network connectivity from the deployed servers to the
undercloud. The interface(s) that are being used for this connectivity should
be excluded from the NIC config templates so that the configuration does not
unintentionally drop all networking access to the deployed servers.
Undercloud
__________
Neutron in the Undercloud is not used for providing DHCP services for the
Overcloud nodes, hence a dedicated provisioning network with L2 connectivity is
not a requirement in this scenario. Neutron is however still used for IPAM for
the purposes of assigning IP addresses to the port resources created by
tripleo-heat-templates.
Network L3 connectivity is still a requirement between the Undercloud and
Overcloud nodes. The undercloud will need to be able to connect over a routable
IP to the overcloud nodes for software configuration with ansible.
Overcloud
_________
Configure the deployed servers that will be used as nodes in the overcloud with
L3 connectivity from the Undercloud as needed. The configuration could be done
via static or DHCP IP assignment.
Further networking configuration of Overcloud nodes is the same as in a typical
TripleO deployment, except for:
* Initial configuration of L3 connectivity from the undercloud to the
overcloud.
* No requirement for dedicating a separate L2 network for provisioning
Testing Connectivity
____________________
Test connectivity from the undercloud to the overcloud nodes using SSH over the configured IP
address on the deployed servers. This should be the IP address that is
configured on ``--overcloud-ssh-network`` as passed to the ``openstack overcloud
deploy`` command. The key and user to use with the test should be the same as
used with ``--overcloud-ssh-key`` and ``--overcloud-ssh-user`` with the
deployment command.
Package repositories
^^^^^^^^^^^^^^^^^^^^
The servers will need to already have the appropriately enabled yum repositories
as packages will be installed on the servers during the Overcloud deployment.
The enabling of repositories on the Overcloud nodes is the same as it is for
other areas of TripleO, such as Undercloud installation. See
:doc:`../repositories` for the detailed steps on how to
enable the standard repositories for TripleO.
Deploying the Overcloud
-----------------------
Provision networks and ports if using Neutron
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If using Neutron for resource management, Network resources for the deployment
still must be provisioned with the ``openstack overcloud network provision``
command as documented in :ref:`custom_networks`.
Port resources for the deployment still must be provisioned with the
``openstack overcloud node provision`` command as documented in
:ref:`baremetal_provision`.
Set the ``managed`` key to false in either the ``defaults`` dictionary for each
role, or on each instances dictionary in the baremetal provision configuration
file.
The generated file must then be passed to the ``openstack overcloud deploy``
command.
Deployment Command
^^^^^^^^^^^^^^^^^^
With generated baremetal and network environments
_________________________________________________
Include the generated environment files with the deployment command::
openstack overcloud deploy \
--deployed-server \
-e ~/overcloud-networks-deployed.yaml \
-e ~/overcloud-baremetal-deployed.yaml \
<other arguments>
Without generated environments (no Neutron)
___________________________________________
The following command would be used when the ``openstack overcloud network
provision`` and ``openstack overcloud node provision`` commands were not used.
Additional environment files need to be passed to the deployment command::
openstack overcloud deploy \
--deployed-server \
-e /usr/share/openstack-tripleo-heat-templates/environments/deployed-server-environment.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/deployed-networks.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/deployed-ports.yaml \
-e ~/hostnamemap.yaml \
-e ~/deployed-server-network-environment.yaml \
<other arguments>
The environment file ``deployed-server-environment.yaml`` contains the necessary
``resource_registry`` mappings to disable Nova management of overcloud servers
so that deployed servers are used instead.
``deployed-networks.yaml`` and ``deployed-ports.yaml`` enable the necessary
mappings to disable the Neutron management of network resources.
``hostnamemap.yaml`` is optional and should define the ``HostnameMap``
parameter if the actual server hostnames do not match the default role hostname
format. For example::
parameter_defaults:
HostnameMap:
overcloud-controller-0: controller-00-rack01
overcloud-controller-1: controller-01-rack02
overcloud-controller-2: controller-02-rack03
overcloud-novacompute-0: compute-00-rack01
overcloud-novacompute-1: compute-01-rack01
overcloud-novacompute-2: compute-02-rack01
``deployed-server-network-environment.yaml`` should define at a minimum the
following parameters::
NodePortMap
DeployedNetworkEnvironment
ControlPlaneVipData
VipPortMap
OVNDBsVirtualFixedIPs
RedisVirtualFixedIPs
EC2MetadataIp
ControlPlaneDefaultRoute
The following is a sample environment file that shows setting these values
.. code-block:: yaml
parameter_defaults:
NodePortMap:
controller0:
ctlplane:
ip_address: 192.168.100.2
ip_address_uri: 192.168.100.2
ip_subnet: 192.168.100.0/24
external:
ip_address: 10.0.0.10
ip_address_uri: 10.0.0.10
ip_subnet: 10.0.0.10/24
internal_api:
ip_address: 172.16.2.10
ip_address_uri: 172.16.2.10
ip_subnet: 172.16.2.10/24
management:
ip_address: 192.168.1.10
ip_address_uri: 192.168.1.10
ip_subnet: 192.168.1.10/24
storage:
ip_address: 172.16.1.10
ip_address_uri: 172.16.1.10
ip_subnet: 172.16.1.10/24
storage_mgmt:
ip_address: 172.16.3.10
ip_address_uri: 172.16.3.10
ip_subnet: 172.16.3.10/24
tenant:
ip_address: 172.16.0.10
ip_address_uri: 172.16.0.10
ip_subnet: 172.16.0.10/24
compute0:
ctlplane:
ip_address: 192.168.100.3
ip_address_uri: 192.168.100.3
ip_subnet: 192.168.100.0/24
external:
ip_address: 10.0.0.110
ip_address_uri: 10.0.0.110
ip_subnet: 10.0.0.110/24
internal_api:
ip_address: 172.16.2.110
ip_address_uri: 172.16.2.110
ip_subnet: 172.16.2.110/24
management:
ip_address: 192.168.1.110
ip_address_uri: 192.168.1.110
ip_subnet: 192.168.1.110/24
storage:
ip_address: 172.16.1.110
ip_address_uri: 172.16.1.110
ip_subnet: 172.16.1.110/24
storage_mgmt:
ip_address: 172.16.3.110
ip_address_uri: 172.16.3.110
ip_subnet: 172.16.3.110/24
tenant:
ip_address: 172.16.0.110
ip_address_uri: 172.16.0.110
ip_subnet: 172.16.0.110/24
ControlPlaneVipData:
fixed_ips:
- ip_address: 192.168.100.1
name: control_virtual_ip
network:
tags: []
subnets:
- ip_version: 4
VipPortMap:
external:
ip_address: 10.0.0.100
ip_address_uri: 10.0.0.100
ip_subnet: 10.0.0.100/24
internal_api:
ip_address: 172.16.2.100
ip_address_uri: 172.16.2.100
ip_subnet: 172.16.2.100/24
storage:
ip_address: 172.16.1.100
ip_address_uri: 172.16.1.100
ip_subnet: 172.16.1.100/24
storage_mgmt:
ip_address: 172.16.3.100
ip_address_uri: 172.16.3.100
ip_subnet: 172.16.3.100/24
RedisVirtualFixedIPs:
- ip_address: 192.168.100.10
use_neutron: false
OVNDBsVirtualFixedIPs:
- ip_address: 192.168.100.11
use_neutron: false
DeployedNetworkEnvironment:
net_attributes_map:
external:
network:
dns_domain: external.tripleodomain.
mtu: 1400
name: external
tags:
- tripleo_network_name=External
- tripleo_net_idx=0
- tripleo_vip=true
subnets:
external_subnet:
cidr: 10.0.0.0/24
dns_nameservers: []
gateway_ip: null
host_routes: []
ip_version: 4
name: external_subnet
tags:
- tripleo_vlan_id=10
internal_api:
network:
dns_domain: internalapi.tripleodomain.
mtu: 1400
name: internal_api
tags:
- tripleo_net_idx=1
- tripleo_vip=true
- tripleo_network_name=InternalApi
subnets:
internal_api_subnet:
cidr: 172.16.2.0/24
dns_nameservers: []
gateway_ip: null
host_routes: []
ip_version: 4
name: internal_api_subnet
tags:
- tripleo_vlan_id=20
management:
network:
dns_domain: management.tripleodomain.
mtu: 1400
name: management
tags:
- tripleo_net_idx=5
- tripleo_network_name=Management
subnets:
management_subnet:
cidr: 192.168.1.0/24
dns_nameservers: []
gateway_ip: 192.168.1.1
host_routes: []
ip_version: 4
name: management_subnet
tags:
- tripleo_vlan_id=60
storage:
network:
dns_domain: storage.tripleodomain.
mtu: 1400
name: storage
tags:
- tripleo_net_idx=3
- tripleo_vip=true
- tripleo_network_name=Storage
subnets:
storage_subnet:
cidr: 172.16.1.0/24
dns_nameservers: []
gateway_ip: null
host_routes: []
ip_version: 4
name: storage_subnet
tags:
- tripleo_vlan_id=30
storage_mgmt:
network:
dns_domain: storagemgmt.tripleodomain.
mtu: 1400
name: storage_mgmt
tags:
- tripleo_net_idx=4
- tripleo_vip=true
- tripleo_network_name=StorageMgmt
subnets:
storage_mgmt_subnet:
cidr: 172.16.3.0/24
dns_nameservers: []
gateway_ip: null
host_routes: []
ip_version: 4
name: storage_mgmt_subnet
tags:
- tripleo_vlan_id=40
tenant:
network:
dns_domain: tenant.tripleodomain.
mtu: 1400
name: tenant
tags:
- tripleo_net_idx=2
- tripleo_network_name=Tenant
subnets:
tenant_subnet:
cidr: 172.16.0.0/24
dns_nameservers: []
gateway_ip: null
host_routes: []
ip_version: 4
name: tenant_subnet
tags:
- tripleo_vlan_id=50
net_cidr_map:
external:
- 10.0.0.0/24
internal_api:
- 172.16.2.0/24
management:
- 192.168.1.0/24
storage:
- 172.16.1.0/24
storage_mgmt:
- 172.16.3.0/24
tenant:
- 172.16.0.0/24
net_ip_version_map:
external: 4
internal_api: 4
management: 4
storage: 4
storage_mgmt: 4
tenant: 4
.. note::
Beginning in Wallaby, the above parameter values from
``deployed-server-network-environment.yaml`` and the
``deployed-networks.yaml`` and ``deployed-ports.yaml`` environments replace the use of the
``DeployedServerPortMap`` parameter, the
``environments/deployed-server-deployed-neutron-ports.yaml`` environment, and
the ``deployed-neutron-port.yaml`` template.
The previous parameters and environments can still be used with the
exception that no resources can be mapped to any Neutron native Heat
resources (resources starting with ``OS::Neutron::*``) when using
:doc:`ephemeral Heat <../deployment/ephemeral_heat>` as there is no Heat
and Neutron API communication.
Note that the following resources may be mapped to ``OS::Neutron::*``
resources in environment files used prior to Wallaby, and these mappings
should be removed from Wallaby onward::
OS::TripleO::Network::Ports::ControlPlaneVipPort
OS::TripleO::Network::Ports::RedisVipPort
OS::TripleO::Network::Ports::OVNDBsVipPort
.. admonition:: Victoria and prior releases
The ``DeployedServerPortMap`` parameter can be used to assign fixed IP's
from either the ctlplane network or the IP address range for the
overcloud.
If the deployed servers were preconfigured with IP addresses from the ctlplane
network for the initial undercloud connectivity, then the same IP addresses can
be reused during the overcloud deployment. Add the following to a new
environment file and specify the environment file as part of the deployment
command::
resource_registry:
OS::TripleO::DeployedServer::ControlPlanePort: ../deployed-server/deployed-neutron-port.yaml
parameter_defaults:
DeployedServerPortMap:
controller0-ctlplane:
fixed_ips:
- ip_address: 192.168.24.9
subnets:
- cidr: 192.168.24.0/24
network:
tags:
- 192.168.24.0/24
compute0-ctlplane:
fixed_ips:
- ip_address: 192.168.24.8
subnets:
- cidr: 192.168.24..0/24
network:
tags:
- 192.168.24.0/24
The value of the DeployedServerPortMap variable is a map. The keys correspond
to the ``<short hostname>-ctlplane`` of the deployed servers. Specify the ip
addresses and subnet CIDR to be assigned under ``fixed_ips``.
In the case where the ctlplane is not routable from the deployed
servers, the virtual IPs on the ControlPlane, as well as the virtual IPs
for services (Redis and OVNDBs) must be statically assigned.
Use ``DeployedServerPortMap`` to assign an IP address from any CIDR::
resource_registry:
OS::TripleO::DeployedServer::ControlPlanePort: /usr/share/openstack-tripleo-heat-templates/deployed-server/deployed-neutron-port.yaml
OS::TripleO::Network::Ports::ControlPlaneVipPort: /usr/share/openstack-tripleo-heat-templates/deployed-server/deployed-neutron-port.yaml
# Set VIP's for redis and OVN to noop to default to the ctlplane VIP
# The ctlplane VIP is set with control_virtual_ip in
# DeployedServerPortMap below.
#
# Alternatively, these can be mapped to deployed-neutron-port.yaml as
# well and redis_virtual_ip and ovn_dbs_virtual_ip added to the
# DeployedServerPortMap value to set fixed IP's.
OS::TripleO::Network::Ports::RedisVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::Network::Ports::OVNDBsVipPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
parameter_defaults:
NeutronPublicInterface: eth1
EC2MetadataIp: 192.168.100.1
ControlPlaneDefaultRoute: 192.168.100.1
DeployedServerPortMap:
control_virtual_ip:
fixed_ips:
- ip_address: 192.168.100.1
subnets:
- cidr: 192.168.100.0/24
network:
tags:
- 192.168.100.0/24
controller0-ctlplane:
fixed_ips:
- ip_address: 192.168.100.2
subnets:
- cidr: 192.168.100.0/24
network:
tags:
- 192.168.100.0/24
compute0-ctlplane:
fixed_ips:
- ip_address: 192.168.100.3
subnets:
- cidr: 192.168.100.0/24
network:
tags:
- 192.168.100.0/24
In the above example, notice how ``RedisVipPort`` and ``OVNDBsVipPort`` are
mapped to ``network/ports/noop.yaml``. This mapping is due to the fact that
these VIP IP addresses comes from the ctlplane by default, and they will use
the same VIP address that is used for ``ControlPlanePort``. Alternatively
these VIP's can be mapped to their own fixed IP's, in which case a VIP will
be created for each. In this case, the following mappings and values would be
added to the above example::
resource_registry:
OS::TripleO::Network::Ports::RedisVipPort: /usr/share/openstack-tripleo-heat-templates/deployed-server/deployed-neutron-port.yaml
OS::TripleO::Network::Ports::OVNDBsVipPort: /usr/share/openstack-tripleo-heat-templates/deployed-server/deployed-neutron-port.yaml
parameter_defaults:
DeployedServerPortMap:
redis_virtual_ip:
fixed_ips:
- ip_address: 192.168.100.10
subnets:
- cidr: 192.168.100.0/24
network:
tags:
- 192.168.100.0/24
ovn_dbs_virtual_ip:
fixed_ips:
- ip_address: 192.168.100.11
subnets:
- cidr: 192.168.100.0/24
network:
tags:
- 192.168.100.0/24
Use ``DeployedServerPortMap`` to assign an ControlPlane Virtual IP address from
any CIDR, and the ``RedisVirtualFixedIPs`` and ``OVNDBsVirtualFixedIPs``
parameters to assign the ``RedisVip`` and ``OVNDBsVip``::
resource_registry:
OS::TripleO::DeployedServer::ControlPlanePort: /usr/share/openstack-tripleo-heat-templates/deployed-server/deployed-neutron-port.yaml
OS::TripleO::Network::Ports::ControlPlaneVipPort: /usr/share/openstack-tripleo-heat-templates/deployed-server/deployed-neutron-port.yaml
parameter_defaults:
NeutronPublicInterface: eth1
EC2MetadataIp: 192.168.100.1
ControlPlaneDefaultRoute: 192.168.100.1
# Set VIP's for redis and OVN
RedisVirtualFixedIPs:
- ip_address: 192.168.100.10
use_neutron: false
OVNDBsVirtualFixedIPs:
- ip_address: 192.168.100.11
use_neutron: false
DeployedServerPortMap:
control_virtual_ip:
fixed_ips:
- ip_address: 192.168.100.1
subnets:
- cidr: 192.168.100.0/24
network:
tags:
- 192.168.100.0/24
controller0-ctlplane:
fixed_ips:
- ip_address: 192.168.100.2
subnets:
- cidr: 192.168.100.0/24
network:
tags:
- 192.168.100.0/24
compute0-ctlplane:
fixed_ips:
- ip_address: 192.168.100.3
subnets:
- cidr: 192.168.100.0/24
network:
tags:
- 192.168.100.0/24
Scaling the Overcloud
---------------------
Scaling Up
^^^^^^^^^^
When scaling out compute nodes, the steps to be completed by the
user are as follows:
#. Prepare the new deployed server(s) as shown in `Deployed Server
Requirements`_.
#. Start the scale out command. See :doc:`../post_deployment/scale_roles` for reference.
Scaling Down
^^^^^^^^^^^^
Starting in Train and onward, `openstack overcloud node delete` can take
a list of server hostnames instead of instance ids. However they can't be
mixed while running the command. Example: if you use hostnames, it would
have to be for all the nodes to delete.
.. admonition:: Victoria and prior releases
:class: victoria
The following instructions should be used when the cloud is deployed on
Victoria or a prior release.
When scaling down the Overcloud, follow the scale down instructions as normal
as shown in :doc:`../post_deployment/delete_nodes`, however use the following
command to get the uuid values to pass to `openstack overcloud node delete`
instead of using `nova list`::
openstack stack resource list overcloud -n5 --filter type=OS::TripleO::<RoleName>Server
Replace `<RoleName>` in the above command with the actual name of the role that
you are scaling down. The `stack_name` column in the command output can be used
to identify the uuid associated with each node. The `stack_name` will include
the integer value of the index of the node in the Heat resource group. For
example, in the following sample output::
$ openstack stack resource list overcloud -n5 --filter type=OS::TripleO::ComputeDeployedServerServer
+-----------------------+--------------------------------------+------------------------------------------+-----------------+----------------------+-------------------------------------------------------------+
| resource_name | physical_resource_id | resource_type | resource_status | updated_time | stack_name |
+-----------------------+--------------------------------------+------------------------------------------+-----------------+----------------------+-------------------------------------------------------------+
| ComputeDeployedServer | 66b1487c-51ee-4fd0-8d8d-26e9383207f5 | OS::TripleO::ComputeDeployedServerServer | CREATE_COMPLETE | 2017-10-31T23:45:18Z | overcloud-ComputeDeployedServer-myztzg7pn54d-0-pixawichjjl3 |
| ComputeDeployedServer | 01cf59d7-c543-4f50-95df-6562fd2ed7fb | OS::TripleO::ComputeDeployedServerServer | CREATE_COMPLETE | 2017-10-31T23:45:18Z | overcloud-ComputeDeployedServer-myztzg7pn54d-1-ooCahg1vaequ |
| ComputeDeployedServer | 278af32c-c3a4-427e-96d2-3cda7e706c50 | OS::TripleO::ComputeDeployedServerServer | CREATE_COMPLETE | 2017-10-31T23:45:18Z | overcloud-ComputeDeployedServer-myztzg7pn54d-2-xooM5jai2ees |
+-----------------------+--------------------------------------+------------------------------------------+-----------------+----------------------+-------------------------------------------------------------+
The index 0, 1, or 2 can be seen in the `stack_name` column. These indices
correspond to the order of the nodes in the Heat resource group. Pass the
corresponding uuid value from the `physical_resource_id` column to `openstack
overcloud node delete` command.
The physical deployed servers that have been removed from the deployment need
to be powered off. In a deployment not using deployed servers, this would
typically be done with Ironic. When using deployed servers, it must be done
manually, or by whatever existing power management solution is already in
place. If the nodes are not powered down, they will continue to be operational
and could remain functional as part of the deployment, since there are no steps
to unconfigure, uninstall software, or stop services on nodes when scaling
down.
Once the nodes are powered down and all needed data has been saved from the
nodes, it is recommended that they be reprovisioned back to a base operating
system configuration so that they do not unintentionally join the deployment in
the future if they are powered back on.
.. note::
Do not attempt to reuse nodes that were previously removed from the
deployment without first reprovisioning them using whatever provisioning tool
is in place.
Deleting the Overcloud
----------------------
When deleting the Overcloud, the Overcloud nodes need to be manually powered
off, otherwise, the cloud will still be active and accepting any user requests.
After archiving important data (log files, saved configurations, database
files), that needs to be saved from the deployment, it is recommended to
reprovision the nodes to a clean base operating system. The reprovision will
ensure that they do not start serving user requests, or interfere with future
deployments in the case where they are powered back on in the future.
.. note::
As with scaling down, do not attempt to reuse nodes that were previously part
of a now deleted deployment in a new deployment without first reprovisioning
them using whatever provisioning tool is in place.

View File

@ -1,62 +0,0 @@
Deploying DNSaaS (Designate)
============================
Because some aspects of a Designate deployment are specific to the environment
in which it is deployed, there is some additional configuration required
beyond just including an environment file. The following instructions will
explain this configuration.
First, make a copy of the ``designate-config.yaml`` environment.
.. note:: For HA deployments, there is a separate ``designate-config-ha.yaml``
file that should be used instead.
::
cp /usr/share/openstack-tripleo-heat-templates/environments/designate-config.yaml .
This file contains a sample pool configuration which must be edited to match
the intended environment. Each section has comments that explain how to
configure it.
.. TODO(bnemec): Include these notes in the sample environments, or figure
out how to pull these values from the Heat stack and populate
the file automatically.
* ``ns_records``: There should be one of these for each node running designate,
and they should point at the public IP of the node.
* ``nameservers``: There should be one of these for each node running BIND.
The ``host`` value should be the public IP of the node.
* ``targets``: There should be one of these for each node running BIND. Each
target has the following attributes which need to be configured:
* ``masters``: There should be one of these for each node running
designate-mdns. The ``host`` value should be the public IP of the node.
* ``options``: This specifies where the target BIND instance will be
listening. ``host`` should be the public IP of the node, and
``rndc_host`` should be the internal_api IP of the node.
Because this configuration requires the node IPs to be known ahead of time, it
is necessary to use predictable IPs. Full details on configuring those can be
found at :doc:`../provisioning/node_placement`.
Only the external (public) and internal_api networks need to be predictable
for Designate. The following is an example of the addresses that need to be
set::
parameter_defaults:
ControllerIPs:
external:
- 10.0.0.51
- 10.0.0.52
- 10.0.0.53
internal_api:
- 172.17.0.251
- 172.17.0.252
- 172.17.0.253
Include ``enable-designate.yaml``, ``ips-from-pool.yaml``, and either
``designate-config.yaml`` or ``designate-config-ha.yaml`` in the deploy
command::
openstack overcloud deploy --templates -e /usr/share/openstack-tripleo-heat-templates/environments/enable-designate.yaml -e ips-from-pool.yaml -e designate-config.yaml [...]

View File

@ -1,28 +0,0 @@
Disable Telemetry
=================
This guide assumes that your undercloud is already installed and ready to
deploy an overcloud without Telemetry services.
Deploy your overcloud without Telemetry services
------------------------------------------------
If you don't need or don't want Telemetry services (Ceilometer, Gnocchi,
Panko and Aodh), you can disable the services by adding this environment
file when deploying the overcloud::
openstack overcloud deploy --templates \
-e /usr/share/openstack-tripleo-heat-templates/environments/disable-telemetry.yaml
Disabling Notifications
~~~~~~~~~~~~~~~~~~~~~~~
When Telemetry is disabled, OpenStack Notifications will be disabled as well, and
the driver will be set to 'noop' for all OpenStack services.
If you would like to restore notifications, you would need to set NotificationDriver to
'messagingv2' in your environment.
.. Warning::
NotificationDriver parameter can only support 'noop' and 'messagingv2' for now.
Also note that 'messaging' driver is obsolete and isn't supported by TripleO.

File diff suppressed because it is too large Load Diff

View File

@ -1,295 +0,0 @@
Domain-specific LDAP Backends
=============================
It is possible to configure keystone to use one or more LDAP backends for the
identity resources as described in the `OpenStack Identity documentation`_.
This will result in an LDAP backend per keystone domain.
Setup
-----
To configure LDAP backends, set the ``KeystoneLDAPDomainEnable`` flag to
``true``. Enabling this will set the ``domain_specific_drivers_enabled`` option
in keystone in the ``identity`` configuration group. By default the domain
configurations are stored in the **/etc/keystone/domains** directory on the
controller nodes. You can override this directory by setting the
``keystone::domain_config_directory`` hiera key, and setting that via the
``ExtraConfig`` parameter in an environment file. For instance, to set this in
the controller nodes, one would do the following::
parameter_defaults:
ControllerExtraConfig:
keystone::domain_config_directory: /etc/another/directory
The LDAP backend configuration should be provided via the
``KeystoneLDAPBackendConfigs`` parameter in tripleo-heat-templates. It's a
dictionary mapping the LDAP domain names to options that take the following
keys:
* **identity_driver**: Identity backend driver. Defaults to 'ldap'
* **url**: URL for connecting to the LDAP server.
* **user**: User BindDN to query the LDAP server.
* **password**: Password for the BindDN to query the LDAP server.
* **suffix**: LDAP server suffix
* **query_scope**: The LDAP scope for queries, this can be either "one"
(onelevel/singleLevel which is the default in keystone) or "sub"
(subtree/wholeSubtree).
* **page_size**: Maximum results per page; a value of zero ("0") disables
paging. (integer value)
* **user_tree_dn**: Search base for users.
* **user_filter**: LDAP search filter for users.
* **user_objectclass**: LDAP objectclass for users.
* **user_id_attribute**: LDAP attribute mapped to user id. **WARNING**: must
not be a multivalued attribute. (string value)
* **user_name_attribute**: LDAP attribute mapped to user name.
* **user_mail_attribute**: LDAP attribute mapped to user email.
* **user_enabled_attribute**: LDAP attribute mapped to user enabled flag.
* **user_enabled_mask**: Bitmask integer to indicate the bit that the enabled
value is stored in if the LDAP server represents "enabled" as a bit on an
integer rather than a boolean. A value of "0" indicates the mask is not used.
If this is not set to "0" the typical value is "2". This is typically used
when "user_enabled_attribute = userAccountControl". (integer value)
* **user_enabled_default**: Default value to enable users. This should match an
appropriate int value if the LDAP server uses non-boolean (bitmask) values
to indicate if a user is enabled or disabled. If this is not set to "True"
the typical value is "512". This is typically used when
"user_enabled_attribute = userAccountControl".
* **user_enabled_invert**: Invert the meaning of the boolean enabled values.
Some LDAP servers use a boolean lock attribute where "true" means an account
is disabled. Setting "user_enabled_invert = true" will allow these lock
attributes to be used. This setting will have no effect if
"user_enabled_mask" or "user_enabled_emulation" settings are in use.
(boolean value)
* **user_attribute_ignore**: List of attributes stripped off the user on
update. (list value)
* **user_default_project_id_attribute**: LDAP attribute mapped to
default_project_id for users.
* **user_pass_attribute**: LDAP attribute mapped to password.
* **user_enabled_emulation**: If true, Keystone uses an alternative method to
determine if a user is enabled or not by checking if they are a member of
the "user_enabled_emulation_dn" group. (boolean value)
* **user_enabled_emulation_dn**: DN of the group entry to hold enabled users
when using enabled emulation.
* **user_additional_attribute_mapping**: List of additional LDAP attributes
used for mapping additional attribute mappings for users. Attribute mapping
format is <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the
LDAP entry and user_attr is the Identity API attribute. (list value)
* **group_tree_dn**: Search base for groups.
* **group_filter**: LDAP search filter for groups.
* **group_objectclass**: LDAP objectclass for groups.
* **group_id_attribute**: LDAP attribute mapped to group id.
* **group_name_attribute**: LDAP attribute mapped to group name.
* **group_member_attribute**: LDAP attribute mapped to show group membership.
* **group_desc_attribute**: LDAP attribute mapped to group description.
* **group_attribute_ignore**: List of attributes stripped off the group on
update. (list value)
* **group_additional_attribute_mapping**: Additional attribute mappings for
groups. Attribute mapping format is <ldap_attr>:<user_attr>, where ldap_attr
is the attribute in the LDAP entry and user_attr is the Identity API
attribute. (list value)
* **chase_referrals**: Whether or not to chase returned referrals. Note that
it's possible that your client or even your backend do this for you already.
All this does is try to override the client configuration. If your client
doesn't support this, you might want to enable *chaining* on your LDAP server
side. (boolean value)
* **use_tls**: Enable TLS for communicating with LDAP servers. Note that you
might also enable this by using a TLS-enabled scheme in the URL (e.g.
"ldaps"). However, if you configure this via the URL, this option is not
needed. (boolean value)
* **tls_cacertfile**: CA certificate file path for communicating with LDAP
servers.
* **tls_cacertdir**: CA certificate directory path for communicating with LDAP
servers.
* **tls_req_cert**: Valid options for tls_req_cert are demand, never, and allow.
* **use_pool**: Enable LDAP connection pooling. (boolean value and defaults to
true)
* **pool_size**: Connection pool size. (integer value and defaults to '10')
* **pool_retry_max**: Maximum count of reconnect trials. (integer value and
defaults to '3'
* **pool_retry_delay**: Time span in seconds to wait between two reconnect
trials. (floating point value and defaults to '0.1')
* **pool_connection_timeout**: Connector timeout in seconds. Value -1
indicates indefinite wait for response. (integer value and defaults to '-1')
* **pool_connection_lifetime**: Connection lifetime in seconds. (integer value
and defaults to '600')
* **use_auth_pool**: Enable LDAP connection pooling for end user authentication.
If use_pool is disabled, then this setting is meaningless and is not used at
all. (boolean value and defaults to true)
* **auth_pool_size**: End user auth connection pool size. (integer value and
defaults to '100')
* **auth_pool_connection_lifetime**: End user auth connection lifetime in
seconds. (integer value and defaults to '60')
An example of an environment file with LDAP configuration for the keystone
domain called ``tripleodomain`` would look as follows::
parameter_defaults:
KeystoneLDAPDomainEnable: true
KeystoneLDAPBackendConfigs:
tripleodomain:
url: ldap://192.0.2.250
user: cn=openstack,ou=Users,dc=tripleo,dc=example,dc=com
password: Secrete
suffix: dc=tripleo,dc=example,dc=com
user_tree_dn: ou=Users,dc=tripleo,dc=example,dc=com
user_filter: "(memberOf=cn=OSuser,ou=Groups,dc=tripleo,dc=example,dc=com)"
user_objectclass: person
user_id_attribute: cn
This will create a file in the default domain directory
**/etc/keystone/domains** with the name **keystone.tripleodomain.conf**. And
will use the attributes to create such a configuration.
Please note that both the ``KeystoneLDAPDomainEnable`` flag and the
configuration ``KeystoneLDAPBackendConfigs`` must be set.
One can also specify several domains. For instance::
KeystoneLDAPBackendConfigs:
tripleodomain1:
url: ldap://tripleodomain1.example.com
user: cn=openstack,ou=Users,dc=tripleo,dc=example,dc=com
password: Secrete1
...
tripleodomain2:
url: ldaps://tripleodomain2.example.com
user: cn=openstack,ou=Users,dc=tripleo,dc=example,dc=com
password: Secrete2
...
This will add two domains, called ``tripleodomain1`` and ``tripleodomain2``,
with their own configurations.
Post-deployment setup
---------------------
After the overcloud deployment is done, you'll need to give the admin user a
role in the newly created domain.
1. Source the overcloudrc.v3 file::
source overcloudrc.v3
2. Grant admin user on your domain::
openstack role add --domain $(openstack domain show tripleodomain -f value -c id)\
--user $(openstack user show admin --domain default -f value -c id) \
$(openstack role show admin -c id -f value)
3. Test LDAP domain in listing users::
openstack user list --domain tripleodomain
FreeIPA as an LDAP backend
--------------------------
Before configuring the domain, there needs to be a user that will query
FreeIPA. In this case, we'll create an account called ``keystone`` in FreeIPA,
and we'll use it's credentials on our configuration. On the FreeIPA side and
with proper credentials loaded, we'll do the following::
ipa user-add keystone --cn="keystone user" --first="keystone" \
--last="user" --password
This will create the user and we'll be prompted to write the password for it.
Configuring FreeIPA as an LDAP backend for a domain can be done by using the
following template as a configuration::
parameter_defaults:
KeystoneLDAPDomainEnable: true
KeystoneLDAPBackendConfigs:
freeipadomain:
url: ldaps://$FREEIPA_SERVER
user: uid=keystone,cn=users,cn=accounts,$SUFFIX
password: $SOME_PASSWORD
suffix: $SUFFIX
user_tree_dn: cn=users,cn=accounts,$SUFFIX
user_objectclass: inetOrgPerson
user_id_attribute: uid
user_name_attribute: uid
user_mail_attribute: mail
group_tree_dn: cn=groups,cn=accounts,$SUFFIX
group_objectclass: groupOfNames
group_id_attribute: cn
group_name_attribute: cn
group_member_attribute: member
group_desc_attribute: description
user_enabled_attribute: nsAccountLock
user_enabled_default: False
user_enabled_invert: true
* $FREEIPA_SERVER will contain the FQDN that points to your FreeIPA server.
Remember that it needs to be available from some network (most likely the
ctlplane network) in TripleO
* You should also make sure that the ldap ports need to be accessible. In this
case, we need port 636 available since we're using the ``ldaps`` scheme.
However, if you would be using the ``use_tls`` configuration option or if you
are not using TLS at all (not recommended), you might also need port 389.
* To use TLS, the FreeIPA server's certificate must also be trusted by the
openldap client libraries. If you're using novajoin (and
:doc:`tls-everywhere`) this is easily achieved since all the nodes in your
overcloud are enrolled in FreeIPA. If you're not using this setup, you should
then follow the 'Getting the overcloud to trust CAs' section in the
:doc:`ssl` document.
* $SUFFIX will be the domain for your users. Given a domain, the suffix DN can
be created with the following snippet::
suffix=`echo $DOMAIN | sed -e 's/^/dc=/' -e 's/\./,dc=/g'`
Given the domain ``example.com`` the suffix will be ``dc=example,dc=com``.
* In this configuration, we configure this backend as read-only. So you'll need
to create your OpenStack users on the FreeIPA side.
.. References
.. _`OpenStack Identity documentation`: https://docs.openstack.org/admin-guide/identity-integrate-with-ldap.html

View File

@ -1,279 +0,0 @@
Node customization and Third-Party Integration
==============================================
It is possible to enable additional configuration during one of the
following deployment phases:
* firstboot - run once config (performed on each node by cloud-init)
* per-node - run after the node is initially created but before services are deployed and configured (e.g by puppet)
* post-deploy - run after the services have been deployed and configured (e.g by puppet)
.. note::
This documentation assumes some knowledge of heat HOT_ template
syntax, and makes use of heat environment_ files. See the upstream
heat documentation_ for further information.
.. _HOT: https://docs.openstack.org/heat/template_guide/hot_guide.html
.. _environment: https://docs.openstack.org/heat/template_guide/environment.html
.. _documentation: https://docs.openstack.org/heat/template_guide/index.html
Firstboot extra configuration
-----------------------------
Firstboot configuration is optional, and is performed on *all* nodes on initial
deployment.
Any configuration possible via cloud-init may be performed at this point,
either by applying cloud-config yaml or running arbitrary additional
scripts.
The heat templates used for deployment provide the `OS::TripleO::NodeUserData`
resource as the interface to enable this configuration. A basic example of its
usage is provided below, followed by some notes related to real world
usage.
The script snippet below shows how to create a simple example containing two
scripts, combined via the MultipartMime_ resource::
mkdir firstboot
cat > firstboot/one_two.yaml << EOF
heat_template_version: 2014-10-16
resources:
userdata:
type: OS::Heat::MultipartMime
properties:
parts:
- config: {get_resource: one_config}
- config: {get_resource: two_config}
one_config:
type: OS::Heat::SoftwareConfig
properties:
config: |
#!/bin/bash
echo "one" > /tmp/one
two_config:
type: OS::Heat::SoftwareConfig
properties:
config: |
#!/bin/bash
echo "two" > /tmp/two
outputs:
OS::stack_id:
value: {get_resource: userdata}
EOF
.. _MultipartMime: https://docs.openstack.org/heat/template_guide/openstack.html#OS::Heat::MultipartMime
.. note::
The stack must expose an `OS::stack_id` output which references an
OS::Heat::MultipartMime resource.
This template is then mapped to the `OS::TripleO::NodeUserData` resource type
via a heat environment file::
cat > userdata_env.yaml << EOF
resource_registry:
OS::TripleO::NodeUserData: firstboot/one_two.yaml
EOF
You may then deploy your overcloud referencing the additional environment file::
openstack overcloud deploy --templates \
-e <full environment> -e userdata_env.yaml
.. note::
Make sure you pass the same environment parameters that were used at
deployment time in addition to your customization environments at the
end (`userdata_env.yaml`).
.. note::
The userdata is applied to *all* nodes in the deployment. If you need role
specific logic, the userdata scripts can contain conditionals which use
e.g the node hostname to determine the role.
.. note::
OS::TripleO::NodeUserData is only applied on initial node deployment,
not on any subsequent stack update, because cloud-init only processes the
nova user-data once, on first boot. If you need to add custom configuration
that runs on all stack creates and updates, see the
`Post-Deploy extra configuration`_ section below.
For a more complete example, which creates an additional user and configures
SSH keys by accessing the nova metadata server, see
`/usr/share/openstack-tripleo-heat-templates/firstboot/userdata_example.yaml`
on the undercloud node or the tripleo-heat-templates_ repo.
.. _tripleo-heat-templates: https://opendev.org/openstack/tripleo-heat-templates
Per-node extra configuration
----------------------------
This configuration happens after any "firstboot" configuration is applied,
but before any Post-Deploy configuration takes place.
Typically these interfaces are suitable for preparing each node for service
deployment, such as registering nodes with a content repository, or creating
additional data to be consumed by the post-deploy phase. They may also be suitable
integration points for additional third-party services, drivers or plugins.
.. note::
If you only need to provide some additional data to the existing service
configuration, see :ref:`node_config` as this may provide a simpler solution.
.. note::
The per-node interface only enable *individual* nodes to be configured,
if cluster-wide configuration is required, the Post-Deploy interfaces should be
used instead.
The following interfaces are available:
* `OS::TripleO::ControllerExtraConfigPre`: Controller node additional configuration
* `OS::TripleO::ComputeExtraConfigPre`: Compute node additional configuration
* `OS::TripleO::CephStorageExtraConfigPre` : CephStorage node additional configuration
* `OS::TripleO::NodeExtraConfig`: additional configuration applied to all nodes (all roles).
Below is an example of a per-node configuration template that shows additional node configuration
via standard heat SoftwareConfig_ resources::
mkdir -p extraconfig/per-node
cat > extraconfig/per-node/example.yaml << EOF
heat_template_version: 2014-10-16
parameters:
server:
description: ID of the controller node to apply this config to
type: string
resources:
NodeConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: |
#!/bin/sh
echo "Node configured" > /root/per-node
NodeDeployment:
type: OS::Heat::SoftwareDeployment
properties:
config: {get_resource: NodeConfig}
server: {get_param: server}
outputs:
deploy_stdout:
description: Deployment reference, used to trigger post-deploy on changes
value: {get_attr: [NodeDeployment, deploy_stdout]}
EOF
The "server" parameter must be specified in all per-node ExtraConfig templates,
this is the server to apply the configuration to, and is provided by the parent
template. Optionally additional implementation specific parameters may also be
provided by parameter_defaults, see below for more details.
Any resources may be defined in the template, but the outputs must define a "deploy_stdout"
value, which is an identifier used to detect if the configuration applied has changed,
hence when any post-deploy actions (such as re-applying puppet manifests on update)
may need to be performed.
For a more complete example showing how to apply a personalized map of per-node configuration
to each node, see `/usr/share/openstack-tripleo-heat-templates/puppet/extraconfig/pre_deploy/per_node.yaml`
or the tripleo-heat-templates_ repo.
.. _SoftwareConfig: https://docs.openstack.org/heat/template_guide/software_deployment.html
Post-Deploy extra configuration
-------------------------------
Post-deploy additional configuration is possible via the
`OS::TripleO::NodeExtraConfigPost` interface, which is applied after any
per-node configuration has completed.
.. note::
The `OS::TripleO::NodeExtraConfigPost` applies configuration to *all* nodes,
there is currently no per-role NodeExtraConfigPost interface.
Below is an example of a post-deployment configuration template::
mkdir -p extraconfig/post-deploy/
cat > extraconfig/post-deploy/example.yaml << EOF
heat_template_version: 2014-10-16
parameters:
servers:
type: json
EndpointMap:
default: {}
type: json
# Optional implementation specific parameters
some_extraparam:
type: string
resources:
ExtraConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
template: |
#!/bin/sh
echo "extra _APARAM_" > /root/extra
params:
_APARAM_: {get_param: some_extraparam}
ExtraDeployments:
type: OS::Heat::SoftwareDeploymentGroup
properties:
servers: {get_param: servers}
config: {get_resource: ExtraConfig}
actions: ['CREATE'] # Only do this on CREATE
EOF
The "servers" parameter must be specified in all NodeExtraConfigPost
templates, this is the server list to apply the configuration to,
and is provided by the parent template.
Optionally, you may define additional parameters which are consumed by the
implementation. These may then be provided via parameter_defaults in the
environment which enables the configuration.
.. note::
If the parameter_defaults approach is used, care must be used to avoid
unintended reuse of parameter names between multiple templates, because
parameter_defaults is applied globally.
The "actions" property of the `OS::Heat::SoftwareDeploymentGroup` resource may be
used to specify when the configuration should be applied, e.g only on CREATE,
only on DELETE etc. If this is omitted, the heat default is to apply the
config on CREATE and UPDATE, e.g on initial deployment and every subsequent
update.
The extra config may be enabled via an environment file::
cat > post_config_env.yaml << EOF
resource_registry:
OS::TripleO::NodeExtraConfigPost: extraconfig/post-deploy/example.yaml
parameter_defaults:
some_extraparam: avalue123
EOF
You may then deploy your overcloud referencing the additional environment file::
openstack overcloud deploy --templates \
-e <full environment> -e post_config_env.yaml

View File

@ -1,18 +0,0 @@
Configuring High Availability
=============================
|project| supports high availability of the controller services using
Pacemaker. To enable this feature, you need at least three controller
nodes, enable Pacemaker as the resource manager and specify an NTP
server.
Create the following environment file::
$ cat ~/environment.yaml
parameter_defaults:
ControllerCount: 3
And add the following arguments to your `openstack overcloud deploy`
command to deploy with HA::
-e environment.yaml -e /usr/share/openstack-tripleo-heat-templates/environments/docker-ha.yaml --ntp-server pool.ntp.org

View File

@ -1,51 +0,0 @@
Feature Configurations
======================
Documentation on additional features for |project|.
.. toctree::
api_policies
backends
baremetal_overcloud
composable_services
custom_networks
custom_roles
compute_nvdimm
deploy_cellv2
deploy_swift
deployed_server
designate
disable_telemetry
distributed_compute_node
distributed_multibackend_storage
extra_config
tolerated_failure
high_availability
instance_ha
ipsec
keystone_security_compliance
lvmfilter
multiple_overclouds
network_isolation
network_isolation_virt
node_config
node_specific_hieradata
octavia
ops_tools
oslo_messaging_config
ovs_dpdk_config
sriov_deployment
rhsm
role_specific_parameters
routed_spine_leaf_network
server_blacklist
security_hardening
split_stack
ssl
tls-introduction
tls-everywhere
tuned
undercloud_minion
vdpa_deployment
pre_network_config

View File

@ -1,81 +0,0 @@
Configuring Instance High Availability
======================================
|project|, starting with the Queens release, supports a form of instance
high availability when the overcloud is deployed in a specific way.
In order to activate instance high-availability (also called ``IHA``)
the following steps are needed:
1. Add the following environment file to your overcloud deployment command. Make sure you are deploying an HA overcloud::
-e /usr/share/openstack-tripleo-heat-templates/environments/compute-instanceha.yaml
2. Instead of using the ``Compute`` role use the ``ComputeInstanceHA`` role for your compute plane. The ``ComputeInstanceHA`` role has the following additional services when compared to the ``Compute`` role::
- OS::TripleO::Services::ComputeInstanceHA
- OS::TripleO::Services::PacemakerRemote
3. Make sure that fencing is configured for the whole overcloud (controllers and computes). You can do so by adding an environment file to your deployment command that contains the necessary fencing information. For example::
parameter_defaults:
EnableFencing: true
FencingConfig:
devices:
- agent: fence_ipmilan
host_mac: 00:ec:ad:cb:3c:c7
params:
login: admin
ipaddr: 192.168.24.1
ipport: 6230
passwd: password
lanplus: 1
- agent: fence_ipmilan
host_mac: 00:ec:ad:cb:3c:cb
params:
login: admin
ipaddr: 192.168.24.1
ipport: 6231
passwd: password
lanplus: 1
- agent: fence_ipmilan
host_mac: 00:ec:ad:cb:3c:cf
params:
login: admin
ipaddr: 192.168.24.1
ipport: 6232
passwd: password
lanplus: 1
- agent: fence_ipmilan
host_mac: 00:ec:ad:cb:3c:d3
params:
login: admin
ipaddr: 192.168.24.1
ipport: 6233
passwd: password
lanplus: 1
- agent: fence_ipmilan
host_mac: 00:ec:ad:cb:3c:d7
params:
login: admin
ipaddr: 192.168.24.1
ipport: 6234
passwd: password
lanplus: 1
Once the deployment is completed, the overcloud should show a stonith device for each compute node and one for each controller node and a GuestNode for every compute node. The expected behavior is that if a compute node dies, it will be fenced and the VMs that were running on it will be evacuated (i.e. restarted) on another compute node.
In case it is necessary to limit which VMs are to be resuscitated on another compute node it is possible to tag with ``evacuable`` either the image::
openstack image set --tag evacuable 0c305437-89eb-48bc-9997-e4e4ea77e449
the flavor::
nova flavor-key bb31d84a-72b3-4425-90f7-25fea81e012f set evacuable=true
or the VM::
nova server-tag-add 89b70b07-8199-46f4-9b2d-849e5cdda3c2 evacuable
At the moment this last method should be avoided because of a significant reason: setting the tag on a single VM means that just *that* instance will be evacuated, tagging no VM implies that *all* the servers on the compute node will resuscitate. In a partial tagging situation, if a compute node runs only untagged VMs, the cluster will evacuate all of them, ignoring the overall tag status.

View File

@ -1,170 +0,0 @@
.. _ipsec:
Deploying with IPSec
====================
Since the Queens release, it is possible to encrypt communications within the
internal network by setting up IPSec tunnels configured by TripleO.
There are several options that TripleO provides deployers whose requirements call
for encrypting everything in the network. For example, TLS Everywhere has been
supported since the Pike release. This method requires the deployer
to procure a CA server on a separate node. FreeIPA is recommended for this.
However, there are cases where a deployers authorized CA does not have an
interface that can automatically request certificates. Furthermore, it may
not be possible to add another node to the network for various other reasons.
For these cases, IPSec is a viable, alternative solution.
.. note:: For more information on TLS Everywhere, please see
:doc:`tls-everywhere`.
IPSec thus, provides an alternative to TLS Everywhere. With IPSec the encryption
happens on the IP layer, and not over TCP (as happens in TLS). As a result, the
services will communicate with each other over standard 'http', and not
actually know that the underlying traffic is being encrypted. This means that
the services do not require any extra configuration.
Solution Overview
-----------------
The current IPSec solution relies on `Libreswan`_, which is already available
in RHEL and CentOS, and is driven and configured via Ansible.
There are two types of tunnels configured in the overcloud:
* **node-to-node tunnels**: These tunnels are a simple 1-to-1 tunnel between the ip
addresses of two nodes on the same network. This results in a tunnel to each node
in the overcloud for each network that the node is connected to.
* **Virtual IP tunnels**: These are tunnels from each Virtual IP address and
each node that can contact to them. The node hosting the VIP will open a tunnel
for any host in the specific network that can properly authenticate. This
makes the configuration simpler, allows for easier scaling, and assists
deployers to securely communicate with the Virtual IP from hosts
or services that are not necessarily managed by TripleO.
Authentication is currently done via a Pre-Shared Key (PSK) which all the nodes
share. However, future iterations will add more authentication methods to the
deployment.
Currently, the default encryption method is AES using GCM with a block size of
128 bits. Changing this default will be talked about in a further section.
To handle the moving of a Virtual IP from one node to another (VIP failover),
we also deploy a pacemaker resource agent per VIP. This resource agent is in
charge of creating the tunnel when the VIP is set in a certain node, and
removing the tunnel when it moves to another node.
.. note:: One important thing to note is that we set tunnels for every network
except the control plane network. The reason for this is that in our
testing, setting up tunnels for this network cuts of the
communication between the overcloud nodes and the undercloud. We thus
rely on the fact that Ansible uses SSH to communicate with the
overcloud nodes, thus, still giving the deployment secure
communications.
Deployment
----------
.. note:: Please note that the IPSec deployment depends on Ansible being used
for the overcloud deployment. For more information on this, please
see :doc:`../deployment/ansible_config_download`
.. note:: Also note that the IPSec deployment assumes that you're using network
isolation. For more information on this, please see
:doc:`network_isolation`
To enable IPSec tunnels for the overcloud, you need to use the following
environment file::
/usr/share/openstack-tripleo-heat-templates/environments/ipsec.yaml
With this, your deployment command will be similar to this::
openstack overcloud deploy \
...
-e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml \
-e /home/stack/templates/network-environment.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ipsec.yaml
To change the default encryption algorithm, you can use an environment file
that looks as follows::
parameter_defaults:
IpsecVars:
ipsec_algorithm: 'aes_gcm256-null'
The ``IpsecVars`` option is able to change any parameter in the tripleo-ipsec
ansible role.
.. note:: For more information on the algorithms that Libreswan supports,
please check the `Libreswan documentation`_
.. note:: For more information on the available parameters, check the README
file in the `tripleo-ipsec repository`_.
Verification
------------
To verify that the IPSec tunnels were setup correctly after the overcloud
deployment is done, you'll need to do several things:
* Log into each node
* In each node, check the output of ``ipsec status`` with sudo or root
privileges. This will show you the status of all the tunnels that are set up
in the node.
- The line starting with "Total IPsec connections" should show
that there are active connections.
- The Security Associations should be all authenticated::
000 IKE SAs: total(23), half-open(0), open(0), authenticated(23), anonymous(0)
000 IPsec SAs: total(37), authenticated(37), anonymous(0)
Note that this number will vary depending on the number of networks and
nodes you have.
* The configuration files generated can be found in the ``/etc/ipsec.d``
directory.
- They conveniently all start with the prefix **overcloud-** and
you could list them with the following command::
ls /etc/ipsec.d/overcloud-*.conf
- The PSKs can be found with the following command::
ls /etc/ipsec.d/overcloud-*.secrets
- You can find the connection names from the ``*.conf`` files.
- To view the status of a certain connection, you can use the aforementioned
``ipsec status`` command, and filter the result, searching for the specific
connection name. For instance, in the node that's hosting the Internal API
VIP, you can view the status of the tunnels for that VIP with the following
command::
ipsec status | grep overcloud-internal_api-vip-tunnel
* To view the status of the resource agents, you can use ``pcs status``.
- The IPSEC-related agents will have a name with the **tripleo-ipsec**
prefix.
- Note that the resource agents for the tunnels are collocated with the IP
resource agents. This is enforced through a collocation rule in pacemaker.
You can verify this by running the ``pcs constraint`` command.
.. note:: To get further explanations for understanding the output of the
``ipsec status`` command, you can read the `Libreswan wiki entry on
the subject`_.
.. References
.. _Libreswan: https://libreswan.org/
.. _Libreswan documentation: https://libreswan.org/man/ipsec.conf.5.html
.. _Libreswan wiki entry on the subject: https://libreswan.org/wiki/How_to_read_status_output
.. _tripleo-ipsec repository: https://github.com/openstack/tripleo-ipsec/blob/master/README.md

View File

@ -1,50 +0,0 @@
Keystone Security Compliance
============================
Keystone has several configuration options available in order to comply with
standards such as Payment Card Industry - Data Security Standard (PCI-DSS)
v3.1.
TripleO exposes these features via Heat parameters. They will be listed below:
* ``KeystoneChangePasswordUponFirstUse``: Enabling this option requires users
to change their password when the user is created, or upon administrative
reset.
* ``KeystoneDisableUserAccountDaysInactive``: The maximum number of days a user
can go without authenticating before being considered "inactive" and
automatically disabled (locked).
* ``KeystoneLockoutDuration``: The number of seconds a user account will be
locked when the maximum number of failed authentication attempts (as
specified by ``KeystoneLockoutFailureAttempts``) is exceeded.
* ``KeystoneLockoutFailureAttempts``: The maximum number of times that a user
can fail to authenticate before the user account is locked for the number of
seconds specified by ``KeystoneLockoutDuration``.
* ``KeystoneMinimumPasswordAge``: The number of days that a password must be
used before the user can change it. This prevents users from changing their
passwords immediately in order to wipe out their password history and reuse
an old password.
* ``KeystonePasswordExpiresDays``: The number of days for which a password will
be considered valid before requiring it to be changed.
* ``KeystonePasswordRegex``: The regular expression used to validate password
strength requirements.
* ``KeystonePasswordRegexDescription``: Describe your password regular
expression here in language for humans.
* ``KeystoneUniqueLastPasswordCount``: This controls the number of previous
user password iterations to keep in history, in order to enforce that newly
created passwords are unique.
.. note:: All of the aforementioned options only apply to the SQL backend. For
other identity backends like LDAP, these configuration settings
should be applied on that backend's side.
.. note:: All of these parameters are defined as type ``string`` in heat. As
per the implementation, if left unset, they will not be configured at
all in the keystone configuration.

View File

@ -1,38 +0,0 @@
Enable LVM2 filtering on overcloud nodes
========================================
While by default the overcloud image will not use LVM2 volumes, it is
possible that with some Cinder backends, for example remote iSCSI or FC,
the remote LUNs hosting OpenStack volumes will be visible on the nodes
hosting cinder-volume or nova-compute containers.
In that case, should the OpenStack guest create LVM2 volumes inside its
additional disks, those volumes will be scanned by the LVM2 tools
installed on the hosting node.
To prevent that, it is possible to configure an LVM2 global_filter when
deploying or updating the overcloud. The feature is, by default, disabled
and can be enabled passing `LVMFilterEnabled: true` in a Heat environment
file.
When enabled, a global_filter will be computed from the list of physical
devices hosting active LVM2 volumes. This list can be extended further,
manually, listing any additional block device via `LVMFilterAllowlist`
parameter, which supports regexp. A deny list can be configured as well,
via `LVMFilterDenylist` parameter; it defaults to ['.*'] so that any
block device which isn't in the allow list will be ignored by the LVM2
tools by default.
Any of the template parameters can be set per-role; for example, to enable
the feature only on Compute nodes and add `/dev/sdb` to the deny list use::
$ cat ~/environment.yaml
parameter_defaults:
ComputeParameters:
LVMFilterEnabled: true
LVMFilterDenylist:
- /dev/sdb
Then add the following argument to your `openstack overcloud deploy` command::
-e environment.yaml

View File

@ -1,132 +0,0 @@
Multiple Overclouds from a Single Undercloud
============================================
TripleO can be used to deploy multiple Overclouds from a single Undercloud
node.
In this scenario, a single Undercloud deploys and manages multiple Overclouds
as unique Heat stacks, with no stack resources shared between them. This can
be useful for environments where having a 1:1 ratio of Underclouds and
Overclouds creates an unmanageable amount of overhead, such as edge cloud
deployments.
Requirements
------------
All Overclouds must be deployed in the same tenant (admin) on the Undercloud.
If using Ironic for baremetal provisioning, all Overclouds must be on the same
provisioning network.
Undercloud Deployment
---------------------
Deploy the Undercloud :doc:`as usual <../deployment/install_undercloud>`.
First Overcloud
---------------
The first Overcloud can be deployed as usual using the :doc:`cli <../deployment/install_overcloud>`.
Deploying Additional Overclouds
-------------------------------
Additional Overclouds can be deployed by specifying a new stack name and any
necessary parameters in a new deployment plan. Networks for additional
overclouds must be defined as :doc:`custom networks <./custom_networks>`
with ``name_lower`` and ``service_net_map_replace`` directives for each
overcloud to have unique networks in the resulting stack.
If your first cloud was named ``overcloud`` and had the following
``network_data.yaml``::
cat overcloud/network_data.yaml
- name: InternalApi
name_lower: internal_api_cloud_1
service_net_map_replace: internal_api
vip: true
vlan: 201
ip_subnet: '172.17.0.0/24'
allocation_pools: [{'start': '172.17.0.4', 'end': '172.17.0.250'}]
You would create a new ``network_data.yaml`` with unique ``name_lower`` values
and VLANs for each network, making sure to specify ``service_net_map_replace``::
cat overcloud-two/network_data.yaml
- name: InternalApi
name_lower: internal_api_cloud_2
service_net_map_replace: internal_api
vip: true
vlan: 301
ip_subnet: '172.21.0.0/24'
allocation_pools: [{'start': '172.21.0.4', 'end': '172.21.0.250'}]
Then deploy the second Overcloud as::
openstack overcloud deploy --templates ~/overcloud-two/templates/ \
--stack overcloud-two \
-n ~/overcloud-two/network_data.yaml
Managing Heat Templates
-----------------------
If the Heat templates will be customized for any of the deployed clouds
(undercloud, or any overclouds) they should be copied from
/usr/share/openstack-tripleo-heat-templates to a new location before being
modified. Then the location would be specified to the deploy command using
the --templates flag.
The templates could be managed using separate directories for each deployed
cloud::
~stack/undercloud-templates
~stack/overcloud-templates
~stack/overcloud-two-templates
Or by creating a repository in a version control system for the templates
and making a branch for each deployment. For example, using git::
~stack/tripleo-heat-templates $ git branch
* master
undercloud
overcloud
overcloud-two
To deploy to a specific cloud, ensure you are using the correct branch first::
cd ~stack/tripleo-heat-templates ;\
git checkout overcloud-two ;\
openstack overcloud deploy --templates ~stack/tripleo-heat-templates --stack overcloud-two -e $ENV_FILES
Using Pre-Provisioned Nodes
---------------------------
Deploying multiple overclouds with the Ironic baremetal installer currently
requires a shared provisioning network. If this is not possible, you may use
the :ref:`Deployed Servers <deployed_server>` method with routed networks. Ensure that the values
in the ``HostnameMap`` match the stack name being used for each Overcloud.
For example:
``hostnamemap.yaml`` for stack ``overcloud``::
parameter_defaults:
HostnameMap:
overcloud-controller-0: controller-00-rack01
overcloud-controller-1: controller-01-rack02
overcloud-controller-2: controller-02-rack03
overcloud-novacompute-0: compute-00-rack01
overcloud-novacompute-1: compute-01-rack01
overcloud-novacompute-2: compute-02-rack01
``hostnamemap.yaml`` for stack ``overcloud-two``::
parameter_defaults:
HostnameMap:
overcloud-two-controller-0: controller-00-rack01
overcloud-two-controller-1: controller-01-rack02
overcloud-two-controller-2: controller-02-rack03
overcloud-two-novacompute-0: compute-00-rack01
overcloud-two-novacompute-1: compute-01-rack01
overcloud-two-novacompute-2: compute-02-rack01

File diff suppressed because it is too large Load Diff

View File

@ -1,73 +0,0 @@
Configuring Network Isolation in Virtualized Environments
=========================================================
Introduction
------------
This document describes how to configure a virtualized development
environment for use with network isolation. To make things as easy as
possible we will use the ``single-nic-with-vlans`` network isolation
templates to create isolated VLANs on top of the single NIC already
used for the provisioning/``ctlplane``.
The ``single_nic_vlans.j2`` template work well for many virtualized environments
because they do not require adding any extra NICs. Additionally, Open vSwitch
automatically trunks VLANs for us, so there is no extra switch configuration
required.
Create an External VLAN on Your Undercloud
------------------------------------------
By default all instack undercloud machines have a ``br-ctlplane`` which
is used as the provisioning network. We want to add an interface
on the 10.0.0.0/24 network which is used as the default "external"
(public) network for the overcloud. The default VLAN for the external
network is ``vlan10`` so we create an interface file to do this. Create
the following file ``/etc/sysconfig/network-scripts/ifcfg-vlan10``::
DEVICE=vlan10
ONBOOT=yes
HOTPLUG=no
TYPE=OVSIntPort
OVS_BRIDGE=br-ctlplane
OVS_OPTIONS="tag=10"
BOOTPROTO=static
IPADDR=10.0.0.1
PREFIX=24
NM_CONTROLLED=no
And then run ``ifup vlan10`` on your undercloud.
Create a Custom Environment File
--------------------------------
When using network isolation most of the network/config templates configure
static IPs for the ``ctlplane``. To ensure connectivity with Heat and Ec2
metadata, we need to specify a couple of extra Heat parameters. Create a file
called ``/home/stack/custom.yaml`` with the following contents::
parameter_defaults:
EC2MetadataIp: 192.168.24.1
ControlPlaneDefaultRoute: 192.168.24.1
Note that the specified IP addresses ``192.168.24.1`` are the same as the
undercloud IP address.
Modify Your Overcloud Deploy to Enable Network Isolation
--------------------------------------------------------
At this point we are ready to create the overcloud using the network
isolation defaults. The example command below demonstrates how to enable
network isolation by using Heat templates for network isolation, a
custom set of network config templates (single NIC VLANs), and our
``custom.yaml`` config file from above::
TEMPLATES=/path/to/openstack-tripleo-heat-templates
openstack overcloud deploy \
--templates=$TEMPLATES \
-e $TEMPLATES/environments/network-isolation.yaml \
-e $TEMPLATES/environments/net-single-nic-with-vlans.yaml \
-e /home/stack/custom.yaml
After creating the stack you should now have a working virtualized
development environment with network isolation enabled.

View File

@ -1,99 +0,0 @@
.. _node_config:
Modifying default node configuration
====================================
Many service configuration options are already exposed via parameters in the
top-level `overcloud.yaml` template, and these options should
be used wherever available to influence overcloud configuration.
However in the event the service configuration required is not exposed
as a top-level parameter, there are flexible interfaces which enable passing
arbitrary additional configuration to the nodes on deployment.
Making ansible variable changes
-------------------------------
Since the Train release, it is now possible to change any Ansible variable
via group vars overriding.
For example, to override the `chrony_role_action` variable used in
ansible-role-chrony for all the Compute roles, we would do the following::
cat > compute_params.yaml << EOF
parameter_defaults:
ComputeExtraGroupVars:
chrony_role_action: config
EOF
openstack overcloud deploy -e compute_params.yaml
Any variable can be set in that interface and it will take precedence if the
variable was already set somewhere else (e.g. in the composable service).
For any custom roles (defined via roles_data.yaml) the parameter name will
be RoleNameExtraGroupVars where RoleName is the name specified in
roles_data.yaml.
Making puppet configuration changes
-----------------------------------
If you want to make a configuration change, either prior to initial deployment,
or subsequently via an update, you can pass additional data to puppet via hiera
data, using either the global "ExtraConfig" parameter, or one of the role-specific
parameters, e.g using `ComputeExtraConfig` to set the reserved_host_memory
value for compute nodes::
cat > compute_params.yaml << EOF
parameter_defaults:
ComputeExtraConfig:
nova::compute::reserved_host_memory: some_value
EOF
openstack overcloud deploy -e compute_params.yaml
The parameters available are:
* `ExtraConfig`: Apply the data to all nodes, e.g all roles
* `ComputeExtraConfig`: Apply the data only to Compute nodes
* `ControllerExtraConfig`: Apply the data only to Controller nodes
* `BlockStorageExtraConfig`: Apply the data only to BlockStorage nodes
* `ObjectStorageExtraConfig`: Apply the data only to ObjectStorage nodes
* `CephStorageExtraConfig`: Apply the data only to CephStorage nodes
For any custom roles (defined via roles_data.yaml) the parameter name will
be RoleNameExtraConfig where RoleName is the name specified in roles_data.yaml.
.. note::
Previously the parameter for Controller nodes was named
`controllerExtraConfig` (note the inconsistent capitalization). If
you are updating a deployment which used the old parameter, all
values previously passed to `controllerExtraConfig` should be
passed to `ControllerExtraConfig` instead, and
`controllerExtraConfig: {}` should be explicitly set in
`parameter_defaults`, to ensure that values from the old parameter
will not be used anymore. Also ComputeExtraConfig was previously
named NovaComputeExtraConfig, so a similar update should be performed
where the old naming is used.
.. note::
Passing data via the ExtraConfig parameters will override any statically
defined values in the Hiera data files included as part of tripleo-heat-templates,
e.g those located in `puppet/hieradata` directory.
.. note::
If you set a configuration of a puppet class which is not being included
yet, make sure you include it in the ExtraConfig definition, for example
if you want to change the Max IOPS per host setting::
parameter_defaults:
ComputeExtraConfig:
'nova::scheduler::filter::max_io_ops_per_host': '4.0'
Compute_classes:
- '::nova::scheduler::filter'
The Compute_classes data is included via the hiera_include in the
overcloud_common.pp puppet manifest.

View File

@ -1,113 +0,0 @@
Provisioning of node-specific Hieradata
=======================================
This guide assumes that your undercloud is already installed and ready to
deploy an overcloud.
It is possible to provide some node-specific hieradata via Heat environment
files and as such customize one or more settings for a specific node,
regardless of the Heat `ResourceGroup` to which it belongs.
As a sample use case, we will distribute a node-specific disks configuration
for a particular CephStorage node, which by default runs the `ceph-osd` service.
Collecting the node UUID
------------------------
The node-specific hieradata is provisioned based on the node UUID, which is
hardware dependent and immutable across reboots/reinstalls.
First make sure the introspection data is available for the target node, if it
isn't one may run introspection for a particular node as described in:
:doc:`../provisioning/introspect_single_node`. If the `undercloud.conf` does not have
`inspection_extras = true` prior to undercloud installation/upgrade
and introspection, then the machine unique UUID will not be in the
Ironic database.
Then extract the machine unique UUID for the target node with a command like::
baremetal introspection data save NODE-ID | jq .extra.system.product.uuid | tr '[:upper:]' '[:lower:]'
where `NODE-ID` is the target node Ironic UUID. The value returned by the above
command will be a unique and immutable machine UUID which isn't related to the
Ironic node UUID. For the next step, we'll assume the output was
`32e87b4c-c4a7-41be-865b-191684a6883b`.
Creating the Heat environment file
----------------------------------
Assuming we want to use `/dev/sdc` as a data disk for `ceph-osd` on our target
node, we'll create a yaml file, e.g. `my-node-settings.yaml`, with the
following content depending on if either ceph-ansible (Pike and newer)
or puppet-ceph (Ocata and older).
For ceph-ansible use::
parameter_defaults:
NodeDataLookup: {"32e87b4c-c4a7-41be-865b-191684a6883b": {"devices": ["/dev/sdc"]}}
For puppet-ceph use::
resource_registry:
OS::TripleO::CephStorageExtraConfigPre: /path/to/tripleo-heat-templates/puppet/extraconfig/pre_deploy/per_node.yaml
parameter_defaults:
NodeDataLookup: {"32e87b4c-c4a7-41be-865b-191684a6883b": {"ceph::profile::params::osds": {"/dev/sdc": {}}}}
In the above example we're customizing only a single key for a single node, but
the structure is that of a UUID-mapped hash so it is possible to customize
multiple and different keys for multiple nodes.
Generating the Heat environment file for Ceph devices
-----------------------------------------------------
The tools directory of tripleo-heat-templates
(`/usr/share/openstack-tripleo-heat-templates/tools/`) contains a
utility called `make_ceph_disk_list.py` which can be used to create
a valid JSON Heat environment file automatically from Ironic's
introspection data.
Export the introspection data from Ironic for the Ceph nodes to be
deployed::
baremetal introspection data save oc0-ceph-0 > ceph0.json
baremetal introspection data save oc0-ceph-1 > ceph1.json
...
Copy the utility to the stack user's home directory on the undercloud
and then use it to generate a `node_data_lookup.json` file which may
be passed during openstack overcloud deployment::
./make_ceph_disk_list.py -i ceph*.json -o node_data_lookup.json -k by_path
Pass the introspection data file from `baremetal introspection data save` for
all nodes hosting Ceph OSDs to the utility as you may only define
`NodeDataLookup` once during a deployment. The `-i` option can take an
expression like `*.json` or a list of files as input.
The `-k` option defines the key of ironic disk data structure to use
to identify the disk to be used as an OSD. Using `name` is not
recommended as it will produce a file of devices like `/dev/sdd` which
may not always point to the same device on reboot. Thus, `by_path` is
recommended and is the default if `-k` is not specified.
Ironic will have one of the available disks on the system reserved as
the root disk. The utility will always exclude the root disk from the
list of devices generated.
Use `./make_ceph_disk_list.py --help` to see other available options.
Deploying with NodeDataLookup
-----------------------------
Add the environment file described in the previous section to the
deploy commandline::
openstack overcloud deploy [other overcloud deploy options] -e ~/my-node-settings.yaml
or::
openstack overcloud deploy [other overcloud deploy options] -e ~/node_data_lookup.json
JSON is the recommended format (instead of JSON embedded in YAML)
because you may use `jq` to validate the entire file before deployment.

View File

@ -1,77 +0,0 @@
.. _deploy-octavia:
Deploying Octavia in the Overcloud
==================================
This guide assumes that your undercloud is already installed and ready to
deploy an overcloud with Octavia enabled. Please note that only container
deployments are supported.
Preparing to deploy
-------------------
TripleO can upload an Octavia Amphora image to the overcloud if one is
available when deploying.
Configuring the amphora image
-----------------------------
If the Octavia Amphora image is available when deploying it should be placed
in a readable path with the default location being a good choice. On CentOS,
the default location is::
/usr/share/openstack-octavia-amphora-images/amphora-x64-haproxy.qcow2
If deploying on Red Hat Enterprise Linux, the default location is::
/usr/share/openstack-octavia-amphora-images/octavia-amphora.qcow2
On Red Hat Enterprise Linux, downloading an image may be unnecessary as the
amphora image may already be installed.
If using a non-default location, make sure to specify the location through the
``OctaviaAmphoraImageFilename`` variable in an environment file. For example::
parameter_defaults:
OctaviaAmphoraImageFilename: /usr/share/openstack-images/amphora-image.qcow2
.. warning:: Home directories are typically not readable by the workflow
tasks that upload the file image to Glance. Please use a generally
accessible path.
Deploying the overcloud with the octavia services
-------------------------------------------------
To deploy Octavia services in the overcloud, include the sample environment
file provided. For example::
openstack overcloud deploy --templates \
-e /usr/share/openstack-tripleo-heat-templates/environments/services/octavia.yaml \
-e ~/containers-default-parameters.yaml
.. note:: Don't forget to include any additional environment files containing
parameters such as those for the amphora image file.
Uploading/Updating the amphora image after deployment
-----------------------------------------------------
Uploading a new amphora image to Glance in the overcloud can be done after
deployment. This may be required if the amphora image was not available at the
time of deployment or the image needs to be updated.
There are two Octavia specific requirements::
- The image must be tagged in Glance (default value 'amphora-image')
- The image must belong the 'service' project
To upload an amphora image into glance::
openstack image create --disk-format qcow2 --container-format bare \
--tag 'amphora-image' --file [amphora image filename] \
--project service new-amphora-image
.. note:: The amphora image tag name can be customized by setting the
``OctaviaAmphoraImageTag`` variable. Note that if this is changed
after deployment, Octavia will not be able to use any previously
uploaded images until they are retagged.

View File

@ -1,173 +0,0 @@
Deploying Operational Tools
===========================
TripleO comes with an optional suite of tools designed to help operators
maintain an OpenStack environment. The tools perform the following functions:
- Availability Monitoring
- Centralized Logging
- Performance Monitoring
This document will go through the presentation and installation of these tools.
Architecture
------------
#. Operational Tool Server:
- Monitoring Relay/proxy (RabbitMQ_)
- Monitoring Controller/Server (Sensu_)
- Data Store (Redis_)
- API/Presentation Layer (Uchiwa_)
- Log relay/transformer (Fluentd_)
- Data store (Elastic_)
- API/Presentation Layer (Kibana_)
- Performance receptor (Collectd_)
- Aggregator/Relay (Graphite_)
- An API/Presentation Layer (Grafana_)
#. Undercloud:
- There is no operational tools installed by default on the undercloud
#. Overcloud:
- Monitoring Agent (Sensu_)
- Log Collection Agent (Fluentd_)
- Performance Collector Agent (Collectd_)
.. _RabbitMQ: https://www.rabbitmq.com
.. _Sensu: http://sensuapp.org
.. _Redis: https://redis.io
.. _Uchiwa: https://uchiwa.io
.. _Fluentd: http://www.fluentd.org
.. _Elastic: https://www.elastic.co
.. _Kibana: https://www.elastic.co/products/kibana
.. _Collectd: https://collectd.org
.. _Graphite: https://graphiteapp.org
.. _Grafana: https://grafana.com
Deploying the Operational Tool Server
-------------------------------------
There is an ansible project called opstools-ansible (OpsTools_) on github that helps to install the Operator Server, further documentation of the operational tool server installation can be founded at (OpsToolsDoc_).
.. _OpsTools: https://github.com/centos-opstools/opstools-ansible
.. _OpsToolsDoc: https://github.com/centos-opstools/opstools-doc
Deploying the Undercloud
------------------------
As there is nothing to install on the undercloud nothing needs to be done.
Before deploying the Overcloud
------------------------------
.. note::
The :doc:`../deployment/template_deploy` document has a more detailed explanation of the
following steps.
1. Install client packages on overcloud-full image:
- Mount the image and create a chroot::
temp_dir=$(mktemp -d)
sudo tripleo-mount-image -a /path/to/overcloud-full.qcow2 -m $temp_dir
sudo mount -o bind /dev $temp_dir/dev/
sudo cp /etc/resolv.conf $temp_dir/etc/resolv.conf
sudo chroot $temp_dir /bin/bash
- Install the packages inside the chroot::
dnf install -y centos-release-opstools
dnf install -y sensu fluentd collectd
exit
- Unmount the image::
sudo rm $temp_dir/etc/resolv.conf
sudo umount $temp_dir/dev
sudo tripleo-unmount-image -m $temp_dir
- Upload new image to undercloud image registry::
openstack overcloud image upload --update-existing
2. Operational tools configuration files:
The files have some documentation about the parameters that need to be configured
- Availability Monitoring::
/usr/share/openstack-tripleo-heat-templates/environments/monitoring-environment.yaml
- Centralized Logging::
/usr/share/openstack-tripleo-heat-templates/environments/logging-environment.yaml
- Performance Monitoring::
/usr/share/openstack-tripleo-heat-templates/environments/collectd-environment.yaml
3. Configure the environment
The easiest way to configure our environment will be to create a parameter file, let's called parameters.yaml with all the parameters defined.
- Availability Monitoring::
MonitoringRabbitHost: server_ip # Server were the rabbitmq was installed
MonitoringRabbitPort: 5672 # Rabbitmq port
MonitoringRabbitUserName: sensu_user # the rabbitmq user to be used by sensu
MonitoringRabbitPassword: sensu_password # The password of the sensu user
MonitoringRabbitUseSSL: false # Set to false
MonitoringRabbitVhost: "/sensu_vhost" # The virtual host of the rabbitmq
- Centralized Logging::
LoggingServers: # The servers
- host: server_ip # The ip of the server
port: 24224 # Port to send the logs [ 24224 plain & 24284 SSL ]
LoggingUsesSSL: false # Plain or SSL connections
# If LoggingUsesSSL is set to false the following lines can
# be deleted
LoggingSharedKey: secret # The key
LoggingSSLCertificate: | # The content of the SSL Certificate
-----BEGIN CERTIFICATE-----
...contents of server.pem here...
-----END CERTIFICATE-----
- Performance Monitoring::
CollectdServer: collectd0.example.com # Collectd server, where the data is going to be sent
CollectdServerPort: 25826 # Collectd port
# CollectdSecurityLevel: None # Security by default None the other values are
# Encrypt & Sign, but the two following parameters
# need to be set too
# CollectdUsername: user # User to connect to the server
# CollectdPassword: password # Password to connect to the server
# Collectd, by default, comes with several plugins
# extra plugins can added on this parameter
CollectdExtraPlugins:
- disk # disk plugin
- df # df plugin
ExtraConfig: # If the plugins need to be set, this is the location
collectd::plugin::disk::disks:
- "/^[vhs]d[a-f][0-9]?$/"
collectd::plugin::df::mountpoints:
- "/"
collectd::plugin::df::ignoreselected: false
4. Continue following the TripleO instructions for deploying an overcloud::
openstack overcloud deploy --templates \
[-e /usr/share/openstack-tripleo-heat-templates/environments/monitoring-environment.yaml] \
[-e /usr/share/openstack-tripleo-heat-templates/environments/logging-environment.yaml] \
[-e /usr/share/openstack-tripleo-heat-templates/environments/collectd-environment.yaml] \
-e parameters.yaml
5. Wait for the completion of the overcloud deployment process.

View File

@ -1,107 +0,0 @@
Configuring Messaging RPC and Notifications
===========================================
TripleO can configure oslo.messaging RPC and Notification services and
deploy the corresponding messaging backends for the undercloud and
overcloud. The roles OsloMessagingRPC and OsloMessagingNotify have been
added in place of the RabbitMQ Server. Having independent roles for RPC
and Notify allows for the separation of messaging backends as well as
the deployment of different messaging backend intermediaries that are
supported by oslo.messaging drivers::
+----------------+-----------+-----------+-----+--------+-----------+
| Oslo.Messaging | Transport | Backend | RPC | Notify | Messaging |
| Driver | Protocol | Server | | | Type |
+================+===========+===========+=====+========+===========+
| rabbit | AMQP V0.9 | rabbitmq | yes | yes | queue |
+----------------+-----------+-----------+-----+--------+-----------+
| amqp | AMQP V1.0 | qdrouterd | yes | | direct |
+----------------+-----------+-----------+-----+--------+-----------+
| kafka | kafka | kafka | | yes | queue |
| (experimental) | binary | | | | (stream) |
+----------------+-----------+-----------+-----+--------+-----------+
Standard Deployment of RabbitMQ Server Backend
----------------------------------------------
A single RabbitMQ backend (e.g. server or cluster) is the default
deployment for TripleO. This messaging backend provides the services
for both RPC and Notification communications through its integration
with the oslo.messaging rabbit driver.
The example `standard messaging`_ environment file depicts the
resource association for this defacto deployment configuration::
# *******************************************************************
# This file was created automatically by the sample environment
# generator. Developers should use `tox -e genconfig` to update it.
# Users are recommended to make changes to a copy of the file instead
# of the original, if any customizations are needed.
# *******************************************************************
# title: Share single rabbitmq backend for rpc and notify messaging backend
# description: |
# Include this environment to enable a shared rabbitmq backend for
# oslo.messaging rpc and notification services
parameter_defaults:
# The network port for messaging backend
# Type: number
RpcPort: 5672
resource_registry:
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-container-puppet.yaml
The `rabbitmq-messaging-rpc-container-puppet.yaml`_ instantiates the rabbitmq server backend
while `rabbitmq-messaging-notify-container-puppet.yaml`_ sets up the notification
transport configuration to use the same shared rabbitmq server.
Deployment of Separate RPC and Notify Messaging Backends
--------------------------------------------------------
Separate messaging backends can be deployed for RPC and Notification
communications. For this TripleO deployment, the apache dispatch
router (qdrouterd) can be deployed for the RPC messaging backend using
the oslo.messaging AMQP 1.0 driver.
The example `hybrid messaging`_ environment file can be used for an
overcloud deployment::
# *******************************************************************
# This file was created automatically by the sample environment
# generator. Developers should use `tox -e genconfig` to update it.
# Users are recommended to make changes to a copy of the file instead
# of the original, if any customizations are needed.
# *******************************************************************
# title: Hybrid qdrouterd for rpc and rabbitmq for notify messaging backend
# description: |
# Include this environment to enable hybrid messaging backends for
# oslo.messaging rpc and notification services
parameter_defaults:
# The network port for messaging Notify backend
# Type: number
NotifyPort: 5672
# The network port for messaging backend
# Type: number
RpcPort: 31459
resource_registry:
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-container-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/messaging/rpc-qdrouterd-container-puppet.yaml
The above will instantiate qdrouterd server(s) and configure them for
use as the RPC transport and will also instantiate the rabbitmq backend
and configure it for use as the Notification transport. It should
be noted that the RPC and Notify ports must be distinct to prevent the
qdrouterd and rabbitmq servers from simultaneously using the amqp
standard port (5672).
Add the following arguments to your `openstack overcloud deploy`
command to deploy with separate messaging backends::
openstack overcloud deploy --templates -e /usr/share/openstack-tripleo-heat-templates/environments/messaging/rpc-qdrouterd-notify-rabbitmq-hybrid.yaml
.. _`standard messaging`: https://github.com/openstack/tripleo-heat-templates/blob/master/environments/messaging/rpc-rabbitmq-notify-rabbitmq-shared.yaml
.. _`rabbitmq-messaging-rpc-container-puppet.yaml`: https://github.com/openstack/tripleo-heat-templates/blob/master/deployment/rabbitmq/rabbitmq-messaging-rpc-container-puppet.yaml
.. _`rabbitmq-messaging-notify-container-puppet.yaml`: https://github.com/openstack/tripleo-heat-templates/blob/master/deployment/rabbitmq/rabbitmq-messaging-notify-container-puppet.yaml
.. _`hybrid messaging`: https://github.com/openstack/tripleo-heat-templates/blob/master/environments/messaging/rpc-qdrouterd-notify-rabbitmq-hybrid.yaml

View File

@ -1,98 +0,0 @@
Deploying with OVS DPDK Support
===============================
TripleO can deploy Overcloud nodes with OVS DPDK support. A new role
``ComputeOvsDpdk`` has been added to create a custom ``roles_data.yaml`` with
composable OVS DPDK role.
Execute below command to create the ``roles_data.yaml``::
openstack overcloud roles generate -o roles_data.yaml Controller ComputeOvsDpdk
Once a roles file is created, the following changes are required:
- Deploy Command
- Parameters
- Network Config
Deploy Command
----------------
Deploy command should include the generated roles data file from the above
command.
Deploy command should also include the OVS DPDK environment file to override the
default neutron-ovs-agent service with neutron-ovs-dpdk-agent service. All the
required parameters are specified in this environment file as commented. The
parameters has to be configured according to the baremetal on which OVS DPDK
is enabled.
Also, OVS-DPDK requires mandatory kernel parameters to be set before
configuring the DPDK driver, like ``intel_iommu=on`` on Intel machines. In
order to enable the configuration of kernel parameters to the host, host-
config-pre-network environment file has to be added for the deploy command.
Adding the following arguments to the ``openstack overcloud deploy`` command
will do the trick::
openstack overcloud deploy --templates \
-r roles_data.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/services/neutron-ovs-dpdk.yaml \
...
Parameters
----------
Following are the list of parameters which need to be provided for deploying
with OVS DPDK support.
* OvsPmdCoreList: List of Logical CPUs to be allocated for Poll Mode Driver
* OvsDpdkCoreList: List of Logical CPUs to be allocated for the openvswitch
host process (lcore list)
* OvsDpdkMemoryChannels: Number of memory channels
* OvsDpdkSocketMemory: Socket memory list per NUMA node
Example::
parameter_defaults:
OvsPmdCoreList: "2,3,18,19"
OvsDpdkCoreList: "0,1,16,17"
OvsDpdkMemoryChannels: "4"
OvsDpdkSocketMemory: "1024,1024"
The parameter ``KernelArgs`` should be provided in the deployment environment
file, with the set of kernel boot parameters to be applied on the
``ComputeOvsDpdk`` role where OVS DPDK is enabled::
parameter_defaults:
ComputeOvsDpdkParameters:
KernelArgs: "default_hugepagesz=1GB hugepagesz=1G hugepages=64 intel_iommu=on iommu=pt"
Network Config
--------------
DPDK supported network interfaces should be specified in the network config
templates to configure OVS DPDK on the node. The following new network config
types have been added to support DPDK.
- ovs_user_bridge
- ovs_dpdk_port
- ovs_dpdk_bond
Example::
network_config:
-
type: ovs_user_bridge
name: br-link
use_dhcp: false
members:
-
type: ovs_dpdk_port
name: dpdk0
mtu: 2000
rx_queue: 2
members:
-
type: interface
name: nic3

View File

@ -1,116 +0,0 @@
Configure node before Network Config
====================================
In specific deployments, it is required to perform additional configurations
on the overcloud node before network deployment, but after applying kernel
args. For example, OvS-DPDK deployment requires DPDK to be enabled in
OpenvSwitch before network deployment (os-net-config), but after the
hugepages are created (hugepages are created using kernel args). This
requirement is also valid for some 3rd party SDN integration. This kind of
configuration requires additional TripleO service definitions. This document
explains how to achieve such deployments on and after `train` release.
.. note::
In `queens` release, the resource `PreNetworkConfig` can be overridden to
achieve the required behavior, which has been deprecated from `train`
onwards. The implementations based on `PreNetworkConfig` should be
moved to other available alternates.
The TripleO service `OS::TripleO::BootParams` configures the parameter
`KernelArgs` and reboots the node using the `tripleo-ansible` role
`tripleo_kernel`. Some points to consider on `KernelArgs`:
* `BootParams` service is enabled by default on all the roles.
* The node will be restarted only when kernel args are applied for the first
time (fresh node configuration).
* In case of adding `KernelArgs` during update/upgrade/scale operations, when
a particular role does not have `KernelArgs`, it results in node reboot.
Such scenarios should be treated as role migration instead adding only
`KernelArgs`.
* `KernelArgs` can be updated from `wallaby` release onwards (where the role
already has `KernelArgs` but requires modification). In such cases, the
node reboot has to be planned by the user manually, after the TripleO
deployment is completed. For example, increasing the hugepages count post
deployment.
The firstboot_ scripts provide a mechanism to apply the custom node
configuration which is independent of kernel args.
.. _firstboot: https://github.com/openstack/tripleo-heat-templates/tree/master/firstboot
Custom Service
--------------
When a configuration needs to be applied on the node after reboot and before
the network config, then a custom service template should be added that
includes the `BootParams` resource (example below) and any other required
configuration. It is important to allow the default implementation
of `BootParams` service to be included as it is, because any improvements
or fixes will be automatically included in the deployment.
Here is an example OvS-DPDK_ has been configured after `BootParams` but before
network config::
heat_template_version: wallaby
description: >
Open vSwitch Configuration
parameters:
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
resources
BootParams:
type: /usr/share/openstack-tripleo-heat-templates/deployments/kernel/kernel-boot-params-baremetal-ansible.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Open vSwitch service.
value:
service_name: openvswitch
deploy_steps_tasks:
- get_attr: [BootParams, role_data, deploy_steps_tasks]
- - name: Run ovs-dpdk role
when: step|int == 0
include_role:
name: tripleo_ovs_dpdk
.. _OvS-DPDK: https://github.com/openstack/tripleo-heat-templates/blob/master/deployment/openvswitch/openvswitch-dpdk-baremetal-ansible.yaml
.. note::
In the above sample service definition, the condition `step|int == 0` in
the `deploy_steps_tasks` section forces the associated steps to run
before starting any other node configuration (including network deployment).
Add this service to the roles definition of the required roles so that the
configuration can be applied after reboot but before network deployment.

View File

@ -1,139 +0,0 @@
Deploying with RHSM
===================
Summary
-------
Starting in the Queens release, it is possible to use Ansible to apply the
RHSM (Red Hat Subscription Management) configuration.
Instead of the pre_deploy rhel-registration script, the new RHSM service will
allow our operators to:
#. deploy advanced RHSM configurations, where each role can have their own
repositories for example.
#. use config-download mechanism so operators can run the playbooks at anytime
after the deployment, in case RHSM parameters have changed.
Using RHSM
----------
To enable deployment with Ansible and config-download pass the additional arg
to the deployment command::
openstack overcloud deploy \
<other cli args> \
-e ~/rhsm.yaml
The ``rhsm.yaml`` environment enables mapping the OS::TripleO::Services::Rhsm to
the extraconfig service::
resource_registry:
OS::TripleO::Services::Rhsm: /usr/share/openstack-tripleo-heat-templates/deployment/rhsm/rhsm-baremetal-ansible.yaml
parameter_defaults:
RhsmVars:
rhsm_activation_key: "secrete_key"
rhsm_org_id: "Default_Organization"
rhsm_server_hostname: "mysatserver.com"
rhsm_baseurl: "https://mysatserver.com/pulp/repos"
rhsm_method: satellite
rhsm_insecure: yes
rhsm_release: 8.1
In some advanced use cases, you might want to configure RHSM for a specific role::
parameter_defaults:
ComputeHCIParameters:
RhsmVars:
rhsm_activation_key: "secrete_key"
rhsm_org_id: "Default_Organization"
rhsm_server_hostname: "mysatserver.com"
rhsm_baseurl: "https://mysatserver.com/pulp/repos"
rhsm_method: satellite
rhsm_insecure: yes
rhsm_release: 8.1
In that case, all nodes deployed with ComputeHCI will be configured with these RHSM parameters.
Scale-down the Overcloud
------------------------
The automatic unsubscription isn't currently supported and before scaling down the Overcloud,
the operator will have to run this playbook against the host(s) that will be removed.
Example when we want to remove 2 compute nodes::
- hosts:
- overcloud-compute47
- overcloud-compute72
vars:
rhsm_username: bob.smith@acme.com
rhsm_password: my_secret
rhsm_state: absent
roles:
- openstack.redhat-subscription
The playbook needs to be executed prior to the actual scale-down.
Transition from previous method
-------------------------------
The previous method ran a script called rhel-registration during
pre_deploy step, which is located in the ``extraconfig/pre_deploy/rhel-registration``
folder. While the script is still working, you can perform a
migration to the new service by replacing the parameters used in
rhel-registration with RhsmVars and switching the resource_registry
from::
resource_registry:
OS::TripleO::NodeExtraConfig: rhel-registration.yaml
To::
resource_registry:
# Before Train cycle, the file is in /usr/share/openstack-tripleo-heat-templates/extraconfig/services/rhsm.yaml
OS::TripleO::Services::Rhsm: /usr/share/openstack-tripleo-heat-templates/deployment/rhsm/rhsm-baremetal-ansible.yaml
The following table shows a migration path from the old
rhe-registration parameters to the new RhsmVars:
+------------------------------+------------------------------+
| rhel-registration script | rhsm with Ansible (RhsmVars) |
+==============================+==============================+
| rhel_reg_activation_key | rhsm_activation_key |
+------------------------------+------------------------------+
| rhel_reg_auto_attach | rhsm_autosubscribe |
+------------------------------+------------------------------+
| rhel_reg_sat_url | rhsm_satellite_url |
+------------------------------+------------------------------+
| rhel_reg_org | rhsm_org_id |
+------------------------------+------------------------------+
| rhel_reg_password | rhsm_password |
+------------------------------+------------------------------+
| rhel_reg_repos | rhsm_repos |
+------------------------------+------------------------------+
| rhel_reg_pool_id | rhsm_pool_ids |
+------------------------------+------------------------------+
| rhel_reg_user | rhsm_username |
+------------------------------+------------------------------+
| rhel_reg_method | rhsm_method |
+------------------------------+------------------------------+
| rhel_reg_http_proxy_host | rhsm_rhsm_proxy_hostname |
+------------------------------+------------------------------+
| rhel_reg_http_proxy_port | rhsm_rhsm_proxy_port |
+------------------------------+------------------------------+
| rhel_reg_http_proxy_username | rhsm_rhsm_proxy_user |
+------------------------------+------------------------------+
| rhel_reg_http_proxy_password | rhsm_rhsm_proxy_password |
+------------------------------+------------------------------+
More about the Ansible role
---------------------------
TripleO is using the Ansible role_ for Red Hat Subscription.
.. _role: https://github.com/openstack/ansible-role-redhat-subscription
The role parameters aren't documented here to avoid duplication but it is
recommended to take a look at them in the repository when using this feature
in TripleO.

View File

@ -1,108 +0,0 @@
Role-Specific Parameters
========================
A service can be associated with multiple roles, like ``nova-compute``
service can be associated with **ComputeRole1** and **ComputeRole2**. The
``nova-compute`` service takes multiple parameters like ``NovaVcpuPinSet``,
``NovaReservedHostMemory``, etc. It is possible to provide separate values
specific to a role with the following changes in the user environment file::
parameter_defaults:
NovaReservedHostMemory: 512
ComputeRole1Parameters:
NovaReservedHostMemory: 2048
ComputeRole2Parameter:
NovaReservedHostMemory: 1024
The format to provide role-specific parameters is ``<RoleName>Parameters``,
where the ``RoleName`` is the name of the role as defined in the
``roles_data.yaml`` template.
In the above specified example, the value "512" will be applied all the roles
which has the ``nova-compute`` service, where as the value "2048" will be
applied only on the **ComputeRole1** role and the value "1024" will be applied
only on the **ComputeRole2** role.
With this approach, the service implementation has to merge the role-specific
parameters with the global parameters in their definition template. The role-
specific parameter takes higher precedence than the global parameters.
For any custom service which need to use role-specific parameter, the
parameter merging should be done. Here is a sample parameter merging example
which will be done by the service implementation::
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- neutron::agents::ml2::ovs::datapath_type: NeutronDatapathType
neutron::agents::ml2::ovs::vhostuser_socket_dir: NeutronVhostuserSocketDir
vswitch::dpdk::driver_type: NeutronDpdkDriverType
vswitch::dpdk::host_core_list: HostCpusList
vswitch::dpdk::pmd_core_list: NeutronDpdkCoreList
vswitch::dpdk::memory_channels: NeutronDpdkMemoryChannels
vswitch::dpdk::socket_mem: NeutronDpdkSocketMemory
- values: {get_param: [RoleParameters]}
- values:
NeutronDatapathType: {get_param: NeutronDatapathType}
NeutronVhostuserSocketDir: {get_param: NeutronVhostuserSocketDir}
NeutronDpdkDriverType: {get_param: NeutronDpdkDriverType}
HostCpusList: {get_param: HostCpusList}
NeutronDpdkCoreList: {get_param: NeutronDpdkCoreList}
NeutronDpdkMemoryChannels: {get_param: NeutronDpdkMemoryChannels}
NeutronDpdkSocketMemory: {get_param: NeutronDpdkSocketMemory}
A service can have a unique variable name that is different than the role specific one.
The example below shows how to define the service variable ``KeystoneWSGITimeout``, override
it with the role specific variable ``WSGITimeout`` if it is found, and create a new alias variable
named ``wsgi_timeout`` to store the value. Later on, that value can be retrieved by using
``{get_attr: [RoleParametersValue, value, wsgi_timeout]}``.::
parameters:
KeystoneWSGITimeout:
description: The timeout for the Apache virtual host created for the API endpoint.
type: string
default: '60'
tags:
- role_specific
resources:
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- wsgi_timeout: WSGITimeout
- values: {get_param: [RoleParameters]}
- values:
WSGITimeout: {get_param: KeystoneWSGITimeout}
outputs:
role_data:
description: Role data for the Keystone API role.
value:
config_settings:
map_merge:
- keystone::wsgi::apache::vhost_custom_fragment:
list_join: [' ', ['Timeout', {get_attr: [RoleParametersValue, value, wsgi_timeout]}]]
Now the variable can optionally have a default set at the composable roles data level.::
- name: Undercloud
RoleParametersDefault:
WSGITimeout: '600'
.. note::
As of now, not all parameters can be set per role, it is based on the
service or template implementation. Each service should have the
implementation to merge the global parameters and role-specific
parameters, as explained in the above examples. A warning will be shown
during the deployment, if an invalid parameter (which does not support
role-specific implementation) is provided as role-specific input.

View File

@ -1,621 +0,0 @@
.. _routed_spine_leaf_network:
Deploying Overcloud with L3 routed networking
=============================================
Layer 3 Routed spine and leaf architectures is gaining in popularity due to the
benefits, such as high-performance, increased scalability and reduced failure
domains.
The below diagram is an example L3 routed
`Clos <https://en.wikipedia.org/wiki/Clos_network>`_ architecture. In this
example each server is connected to top-of-rack leaf switches. Each leaf switch
is attached to each spine switch. Within each rack, all servers share a layer 2
domain. The layer 2 network segments are local to the rack. Layer 3 routing via
the spine switches permits East-West traffic between the racks:
.. image:: ../_images/spine_and_leaf.svg
.. Note:: Typically Dynamic Routing is implemented in such an architecture.
Often also
`ECMP <https://en.wikipedia.org/wiki/Equal-cost_multi-path_routing>`_
(Equal-cost multi-path routing) and
`BFD <https://en.wikipedia.org/wiki/Bidirectional_Forwarding_Detection>`_
(Bidirectional Forwarding Detection) are used to provide non-blocking
forwarding and fast convergence times in case of failures.
Configuration of the underlying network architecture is not in the
scope of this document.
Layer 3 routed Requirements
---------------------------
For TripleO to deploy the ``overcloud`` on a network with a layer 3 routed
architecture the following requirements must be met:
* **Layer 3 routing**:
The network infrastructure must have *routing* configured to enable traffic
between the different layer 2 segments. This can be statically or dynamically
configured.
* **DHCP-Relay**:
Each layer 2 segment that is not local to the ``undercloud`` must provide
*dhcp-relay*. DHCP requests must be forwarded to the Undercloud on the
provisioning network segment where the ``undercloud`` is connected.
.. Note:: The ``undercloud`` uses two DHCP servers. One for baremetal node
introspection, and another for deploying overcloud nodes.
Make sure to read `DHCP relay configuration`_ to understand the
requirements when configuring *dhcp-relay*.
Layer 3 routed Limitations
--------------------------
* Some roles, such as the Controller role, use virtual IP addresses and
clustering. The mechanism behind this functionality requires layer-2 network
connectivity between these nodes. These nodes must all be placed within the
same leaf.
* Similar restrictions apply to networker nodes. The Network service implements
highly-available default paths in the network using Virtual Router Redundancy
Protocol (VRRP). Since VRRP uses a virtual router ip address, master and
backup nodes must be connected to the same L2 network segment.
* When using tenant or provider networks with VLAN segmentation, the particular
VLANs used must be shared between all networker and compute nodes.
.. Note:: It is possible to configure the Network service with multiple sets
of networker nodes. Each set would share routes for their networks,
and VRRP would be used within each set of networker nodes to
provide highly-available default paths. In such configuration all
networker nodes sharing networks must be on the same L2 network
segment.
Create undercloud configuration
-------------------------------
To deploy the ``overcloud`` on a L3 routed architecture the ``undercloud``
needs to be configured with multiple neutron network segments and subnets on
the ``ctlplane`` network.
#. In the ``[DEFAULT]`` section of ``undercloud.conf`` enable the routed
networks feature by setting ``enable_routed_networks`` to ``true``. For
example::
enable_routed_networks = true
#. In the ``[DEFAULT]`` section of ``undercloud.conf`` add a comma separated
list of control plane subnets. Define one subnet for each layer 2 segment in
the routed spine and leaf. For example::
subnets = leaf0,leaf1,leaf2
#. In the ``[DEFAULT]`` section of ``undercloud.conf`` specify the subnet that
is associated with the physical layer 2 segment that is *local* to the
``undercloud``. For example::
local_subnet = leaf0
#. For each of the control plane subnets specified in ``[DEFAULT]\subnets``
add an additional section in ``undercloud.conf``, for example::
[leaf0]
cidr = 192.168.10.0/24
dhcp_start = 192.168.10.10
dhcp_end = 192.168.10.90
inspection_iprange = 192.168.10.100,192.168.10.190
gateway = 192.168.10.1
masquerade = False
[leaf1]
cidr = 192.168.11.0/24
dhcp_start = 192.168.11.10
dhcp_end = 192.168.11.90
inspection_iprange = 192.168.11.100,192.168.11.190
gateway = 192.168.11.1
masquerade = False
[leaf2]
cidr = 192.168.12.0/24
dhcp_start = 192.168.12.10
dhcp_end = 192.168.12.90
inspection_iprange = 192.168.12.100,192.168.12.190
gateway = 192.168.12.1
masquerade = False
Install the undercloud
----------------------
Once the ``undercloud.conf`` is updated with the desired configuration, install
the undercloud by running the following command::
$ openstack undercloud install
Once the ``undercloud`` is installed complete the post-install tasks such as
uploading images and registering baremetal nodes. (For addition details
regarding the post-install tasks, see
:doc:`../deployment/install_overcloud`.)
DHCP relay configuration
------------------------
The TripleO Undercloud uses two DHCP servers on the provisioning network, one
for ``introspection`` and another one for ``provisioning``. When configuring
*dhcp-relay* make sure that DHCP requests are forwarded to both DHCP servers on
the Undercloud.
For devices that support it, UDP *broadcast* can be used to relay DHCP requests
to the L2 network segment where the Undercloud provisioning network is
connected. Alternatively UDP *unicast* can be can be used, in this case DHCP
requests are relayed to specific ip addresses.
.. Note:: Configuration of *dhcp-relay* on specific devices types is beyond the
scope of this document. As a reference
`DHCP relay configuration (Example)`_ using the implementation in
`ISC DHCP software <https://www.isc.org/downloads/dhcp/>`_ is
available below. (Please refer to manual page
`dhcrelay(8) <https://linux.die.net/man/8/dhcrelay>`_ for further
details on how to use this implementation.)
Broadcast DHCP relay
~~~~~~~~~~~~~~~~~~~~
DHCP requests are relayed onto the L2 network segment where the DHCP server(s)
reside using UDP *broadcast* traffic. All devices on the network segment will
receive the broadcast traffic. When using UDP *broadcast* both DHCP servers on
the Undercloud will receive the relayed DHCP request.
Depending on implementation this is typically configured by specifying either
*interface* or *ip network address*:
* **Interface**:
Specifying an interface connected to the L2 network segment where the DHCP
requests will be relayed.
* **IP network address**:
Specifying the network address of the IP network where the DHCP request will
be relayed.
Unicast DHCP relay
~~~~~~~~~~~~~~~~~~
DHCP requests are relayed to specific DHCP servers using UDP *unicast* traffic.
When using UDP *unicast* the device configured to provide *dhcp-relay* must be
configured to relay DHCP requests to both the IP address assigned to the
interface used for *introspection* on the Undercloud and the IP address of the
network namespace created by the Network service to host the DHCP service for
the ``ctlplane`` network.
The interface used for *introspection* is the one defined as
``inspection_interface`` in ``undercloud.conf``.
.. Note:: It is common to use the ``br-ctlplane`` interface for introspection,
the IP address defined as ``local_ip`` in ``undercloud.conf`` will be
on the ``br-ctlplane`` interface.
The IP address allocated to the neutron DHCP namespace will typically be the
first address available in the IP range configured for the ``local_subnet`` in
``undercloud.conf``. (The first address in the IP range is the one defined as
``dhcp_start`` in the configuration.) For example: ``172.20.0.10`` would be the
IP address when the following configuration is used::
[DEFAULT]
local_subnet = leaf0
subnets = leaf0,leaf1,leaf2
[leaf0]
cidr = 172.20.0.0/26
dhcp_start = 172.20.0.10
dhcp_end = 172.20.0.19
inspection_iprange = 172.20.0.20,172.20.0.29
gateway = 172.20.0.62
masquerade = False
.. Warning:: The IP address for the DHCP namespace is automatically allocated,
it will in most cases be the first address in the IP range, but
do make sure to verify that this is the case by running the
following commands on the Undercloud::
$ openstack port list --device-owner network:dhcp -c "Fixed IP Addresses"
+----------------------------------------------------------------------------+
| Fixed IP Addresses |
+----------------------------------------------------------------------------+
| ip_address='172.20.0.10', subnet_id='7526fbe3-f52a-4b39-a828-ec59f4ed12b2' |
+----------------------------------------------------------------------------+
$ openstack subnet show 7526fbe3-f52a-4b39-a828-ec59f4ed12b2 -c name
+-------+--------+
| Field | Value |
+-------+--------+
| name | leaf0 |
+-------+--------+
DHCP relay configuration (Example)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the following examples ``dhcrelay`` from
`ISC DHCP software <https://www.isc.org/downloads/dhcp/>`_ is started using
configuration parameters to relay incoming DHCP request on interfaces:
``eth1``, ``eth2`` and ``eth3``. The undercloud DHCP servers are on the network
segment connected to the ``eth0`` interface. The DHCP server used for
``introspection`` is listening on ip address: ``172.20.0.1`` and the DHCP
server used for ``provisioning`` is listening on ip address: ``172.20.0.10``.
Example, dhcrelay version 4.2.5 (in CentOS 7)::
dhcrelay -d --no-pid 172.20.0.10 172.20.0.1 \
-i eth0 -i eth1 -i eth2 -i eth3
Example, dhcrelay version 4.3.6 (in Fedora 28)::
dhcrelay -d --no-pid 172.20.0.10 172.20.0.1 \
-iu eth0 -id eth1 -id eth2 -id eth3
Map bare metal node ports to control plane network segments
-----------------------------------------------------------
To enable deployment onto a L3 routed network the baremetal ports must have
its ``physical_network`` field configured. Each baremetal port is associated
with a baremetal node in the Bare Metal service. The physical network names are
the ones used in the ``subnets`` option in the undercloud configuration.
.. Note:: The physical network name of the subnet specified as ``local_subnet``
in ``undercloud.conf`` is special. It is **always** named
``ctlplane``.
#. Make sure the baremetal nodes are in one of the following states: *enroll*,
or *manageable*. If the baremetal node is not in one of these states the
command used to set the ``physical_network`` property on the baremetal port
will fail. (For additional details regarding node states see
:doc:`../provisioning/node_states`.)
To set all nodes to ``manageable`` state run the following command::
for node in $(baremetal node list -f value -c Name); do \
baremetal node manage $node --wait; done
#. Use ``baremetal port list --node <node-uuid>`` command to find out
which baremetal ports are associated with which baremetal node. Then set the
``physical-network`` for the ports.
In the example below three subnets where defined in the configuration,
*leaf0*, *leaf1* and *leaf2*. Notice that the ``local_subnet`` is ``leaf0``,
since the physical network for the ``local_subnet`` is always ``ctlplane``
the baremetal port connected to ``leaf0`` use ``ctlplane``. The remaining
ports use the ``leafX`` names::
$ baremetal port set --physical-network ctlplane <port-uuid>
$ baremetal port set --physical-network leaf1 <port-uuid>
$ baremetal port set --physical-network leaf2 <port-uuid>
$ baremetal port set --physical-network leaf2 <port-uuid>
#. Make sure the nodes are in ``available`` state before deploying the
overcloud::
$ openstack overcloud node provide --all-manageable
Create network data with multi-subnet networks
----------------------------------------------
Network data (``network_data.yaml``) is used to define the networks in the
deployment. Each network has a base subnet defined by the network's
properties: ``ip_subnet``, ``allocation_pools``, ``gateway_ip``, ``vlan`` etc.
With support for routed networks (multiple subnets per network) the schema for
network's was extended with the ``subnets`` property, a map of one or more
additional subnets associated with the network. ``subnets`` property example::
subnets:
<subnet_name>:
vlan: '<vlan_id>'
ip_subnet: '<network_address>/<prefix>'
allocation_pools: [{'start': '<start_address>', 'end': '<end_address>'}]
gateway_ip: '<router_ip_address>'
.. Note:: The name of the base subnet is ``name_lower`` with the suffix
``_subnet`` appended. For example, the base subnet on the
``InternalApi`` network will be named ``internal_api_subnet``. This
name is used when setting the subnet for a role to use the base
subnet. (See
`Create roles specific to each leaf (layer 2 segment)`_)
Full networks data example::
- name: External
vip: true
name_lower: external
vlan: 100
ip_subnet: '10.0.0.0/24'
allocation_pools: [{'start': '10.0.0.4', 'end': '10.0.0.99'}]
gateway_ip: '10.0.0.254'
- name: InternalApi
name_lower: internal_api
vip: true
vlan: 10
ip_subnet: '172.17.0.0/24'
allocation_pools: [{'start': '172.17.0.10', 'end': '172.17.0.250'}]
gateway_ip: '172.17.0.254'
subnets:
internal_api_leaf1:
vlan: 11
ip_subnet: '172.17.1.0/24'
allocation_pools: [{'start': '172.17.1.10', 'end': '172.17.1.250'}]
gateway_ip: '172.17.1.254'
- name: Storage
vip: true
vlan: 20
name_lower: storage
ip_subnet: '172.18.0.0/24'
allocation_pools: [{'start': '172.18.0.10', 'end': '172.18.0.250'}]
gateway_ip: '172.18.0.254'
subnets:
storage_leaf1:
vlan: 21
ip_subnet: '172.18.1.0/24'
allocation_pools: [{'start': '172.18.1.10', 'end': '172.18.1.250'}]
gateway_ip: '172.18.1.254'
- name: StorageMgmt
name_lower: storage_mgmt
vip: true
vlan: 30
ip_subnet: '172.19.0.0/24'
allocation_pools: [{'start': '172.19.0.10', 'end': '172.19.0.250'}]
gateway_ip: '172.19.0.254'
subnets:
storage_mgmt_leaf1:
vlan: 31
ip_subnet: '172.19.1.0/24'
allocation_pools: [{'start': '172.19.1.10', 'end': '172.19.1.250'}]
gateway_ip: '172.19.1.254'
- name: Tenant
vip: false # Tenant network does not use VIPs
name_lower: tenant
vlan: 40
ip_subnet: '172.16.0.0/24'
allocation_pools: [{'start': '172.16.0.10', 'end': '172.16.0.250'}]
gateway_ip: '172.16.0.254'
subnets:
tenant_leaf1:
vlan: 41
ip_subnet: '172.16.1.0/24'
allocation_pools: [{'start': '172.16.1.10', 'end': '172.16.1.250'}]
gateway_ip: '172.16.1.254'
Create roles specific to each leaf (layer 2 segment)
----------------------------------------------------
To aid in scheduling and to allow override of leaf specific parameters in
``tripleo-heat-templates`` create new roles for each l2 leaf. In the
``networks`` property for each role, add the networks and associated subnet.
The following is an example with one controller role, and two compute roles.
Please refer to :doc:`custom_roles` for details on configuring custom roles.
Example ``roles_data`` below. (The list of default services has been left out.)
::
#############################################################################
# Role: Controller #
#############################################################################
- name: Controller
description: |
Controller role that has all the controller services loaded and handles
Database, Messaging and Network functions.
CountDefault: 1
tags:
- primary
- controller
networks:
External:
subnet: external_subnet
InternalApi:
subnet: internal_api_subnet
Storage:
subnet: storage_subnet
StorageMgmt:
subnet: storage_mgmt_subnet
Tenant:
subnet: tenant_subnet
HostnameFormatDefault: '%stackname%-controller-%index%'
ServicesDefault:
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services:: [...]
#############################################################################
# Role: ComputeLeaf0 #
#############################################################################
- name: ComputeLeaf0
description: |
Basic Compute Node role
CountDefault: 1
networks:
InternalApi:
subnet: internal_api_subnet
Tenant:
subnet: tenant_subnet
Storage:
subnet: storage_subnet
HostnameFormatDefault: '%stackname%-compute-leaf0-%index%'
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services:: [...]
#############################################################################
# Role: ComputeLeaf1 #
#############################################################################
- name: ComputeLeaf1
description: |
Basic Compute Node role
CountDefault: 1
networks:
InternalApi:
subnet: internal_api_leaf1
Tenant:
subnet: tenant_leaf1
Storage:
subnet: storage_leaf1
HostnameFormatDefault: '%stackname%-compute-leaf1-%index%'
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services:: [...]
Configure node placement
------------------------
Use node placement to map the baremetal nodes to roles, with each role using a
different set of local layer 2 segments. Please refer to :doc:`../provisioning/node_placement`
for details on how to configure node placement.
Add role specific configuration to ``parameter_defaults``
---------------------------------------------------------
In TripleO templates role specific parameters are defined using variables. One
of the variables used is ``{{role.name}}``. The templates have parameters such
as ``{{role.name}}Count``, ``Overcloud{{role.name}}Flavor``,
``{{role.name}}ControlPlaneSubnet`` and many more. This enables per-role values
for these parameters.
Before deploying the ``overcloud`` create an environment file (The examples in
this document uses ``node_data.yaml`` for this.) that contains the required
overrides. In the example below there are parameter overrides to specify the
*Count*, *Flavor* and *ControlPlaneSubnet* to use for the following roles:
* Controller
* ComputeLeaf0
* ComputeLeaf1
Parameter override example::
parameter_defaults:
OvercloudComputeLeaf0Flavor: compute-leaf0
OvercloudComputeLeaf1Flavor: compute-leaf1
ControllerCount: 3
ComputeLeaf0Count: 5
ComputeLeaf1Count: 5
ControllerControlPlaneSubnet: leaf0
ComputeLeaf0ControlPlaneSubnet: leaf0
ComputeLeaf1ControlPlaneSubnet: leaf1
Network configuration templates
-------------------------------
Network configuration templates are dynamically generated, but depending on the
hardware configuration, the sample configurations might not be an option. If
this is the case, the dynamically generated network configuration templates can
be generated manually providing a good starting point for manual customization.
Use the ``process-templates.py`` tool to generate network config templates for
all roles. For example::
$ /usr/share/openstack-tripleo-heat-templates/tools/process-templates.py \
-p /usr/share/openstack-tripleo-heat-templates \
-r /home/stack/roles_data.yaml \
-n /home/stack/network_data_subnets_routed.yaml \
-o /home/stack/processed_templates
The generated example templates for each role can now be found under the
``/home/stack/processed_templates/network/config/`` directory::
/home/stack/processed_templates/network/config/
├── bond-with-vlans
│   ├── computeleaf0.yaml
│   ├── computeleaf1.yaml
│   ├── controller-no-external.yaml
│   ├── controller-v6.yaml
│   ├── controller.yaml
│   └── README.md
├── multiple-nics
│   ├── compute-dvr.yaml
│   ├── computeleaf0.yaml
│   ├── computeleaf1.yaml
│   ├── controller-v6.yaml
│   ├── controller.yaml
│   └── README.md
├── single-nic-linux-bridge-vlans
│   ├── computeleaf0.yaml
│   ├── computeleaf1.yaml
│   ├── controller-v6.yaml
│   ├── controller.yaml
│   └── README.md
└── single-nic-vlans
├── computeleaf0.yaml
├── computeleaf1.yaml
├── controller-no-external.yaml
├── controller-v6.yaml
├── controller.yaml
└── README.md
Inspect the generated template files to find out which sample is most similar
to the specific deployments hardware configuration. Make copies, and edit the
network configuration templates as needed.
.. Note:: If compute nodes (or some other roles) in different leaf's have the
same hardware configuration and network needs, a single network
configuration template can be used for both roles. For example the
``computeleaf0.yaml`` template could be copied as compute.yaml, and
be used for both compute roles (``computeleaf0`` and
``computeleaf1``).
Create a environment file (``network-environment-overrides.yaml``) with
``resource_registry`` overrides to specify the network configuration templates
to use. For example::
resource_registry:
# Port assignments for the Controller
OS::TripleO::Controller::Net::SoftwareConfig:
/home/stack/templates/controller.yaml
# Port assignments for the ComputeLeaf0
OS::TripleO::ComputeLeaf0::Net::SoftwareConfig:
/home/stack/templates/compute.yaml
# Port assignments for the ComputeLeaf1
OS::TripleO::ComputeLeaf1::Net::SoftwareConfig:
/home/stack/templates/compute.yaml
Virtual IP addresses (VIPs)
---------------------------
If the a controller role which is hosting VIP's (Virtual IP addresses) is not
using the base subnet of one or more networks, additional overrides to the
``VipSubnetMap`` is required to ensure VIP's are created on the subnet
associated with the L2 network segment the controller nodes is connected to.
Example, specifying which subnet's to use when creating VIP's for the different
networks::
parameter_defaults:
VipSubnetMap:
ctlplane: leaf1
redis: internal_api_leaf1
InternalApi: internal_api_leaf1
Storage: storage_leaf1
StorageMgmt: storage_mgmt_leaf1
In this document the ctlplane subnet for the Controller is ``leaf0``. To set
which subnet on the ctlplane network that will be used for cluster VIP's
(Virtual IP addresses) the ``VipSubnetMap`` parameter must be overridden in an
environment file. For example add the following to
``network-environment-overrides.yaml``::
parameter_defaults:
VipSubnetMap:
ctlplane: leaf0
Deploy the overcloud
--------------------
To deploy the overcloud, run the ``openstack overcloud deploy`` specifying the
roles data file, the network data file and environment files. For example::
$ openstack overcloud deploy --templates \
-n /home/stack/templates/network_data_subnets_routed.yaml
-r /home/stack/templates/roles_data.yaml \
-e /home/stack/environments/node_data.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/network-environment.yaml \
-e /home/stack/environments/network-environment-overrides.yaml
.. Note:: Remember to include other environment files that you might want for
configuration of the overcloud.

View File

@ -1,408 +0,0 @@
Security Hardening
==================
TripleO can deploy Overcloud nodes with various Security Hardening values
passed in as environment files to the ``openstack overcloud deploy`` command.
.. note::
It is especially important to remember that you **must** include all
environment files needed to deploy the overcloud. Make sure
you pass the full environment in addition to your customization environments
at the end of each of the ``openstack overcloud deploy`` command.
Horizon Password Validation
---------------------------
Horizon provides a password validation check which OpenStack cloud operators
can use to enforce password complexity.
Regular expression can be used for password validation with help text to display
if the users password does not adhere with validation checks.
The following example will enforce users to create a password between 8 and 18
characters in length::
parameter_defaults:
HorizonPasswordValidator: '^.{8,18}$'
HorizonPasswordValidatorHelp: 'Password must be between 8 and 18 characters.'
If the above yaml was saved as ``horizon_password.yaml`` we can then pass this
into the overcloud deploy command as follows::
openstack overcloud deploy --templates \
-e <full environment> -e horizon_password.yaml
Default Security Values in Horizon
----------------------------------
The following config directives are set to ``True`` as a secure default, however
if a reason exists for an operator to disable one of the following values, they
can do so using an environment file.
.. note:: The following directives should only be set to ``False`` once the
potential security impacts are fully understood.
Enforce Password Check
~~~~~~~~~~~~~~~~~~~~~~
By setting ``ENFORCE_PASSWORD_CHECK`` to ``True`` within Horizon's
``local_settings.py``, it displays an Admin Password field on the
“Change Password” form to verify that it is the admin loggedin that wants to
perform the password change.
If a need is present to disable ``ENFORCE_PASSWORD_CHECK`` then this can be
achieved using an environment file contain the following parameter::
parameter_defaults:
ControllerExtraConfig:
horizon::enforce_password_check: false
Disallow Iframe Embed
~~~~~~~~~~~~~~~~~~~~~
DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded within
an iframe. Legacy browsers are still vulnerable to a Cross-Frame Scripting (XFS)
vulnerability, so this option allows extra security hardening where iframes are
not used in deployment.
If however a reason exists to allow Iframe embedding, then the following
parameter can be set within an environment file::
parameter_defaults:
ControllerExtraConfig:
horizon::disallow_iframe_embed: false
Disable Password Reveal
~~~~~~~~~~~~~~~~~~~~~~~
In the same way as ``ENFORCE_PASSWORD_CHECK`` and ``DISALLOW_IFRAME_EMBED`` the
``DISABLE_PASSWORD_REVEAL`` value to be toggled as a parameter::
parameter_defaults:
ControllerExtraConfig:
horizon::disable_password_reveal: false
SSH Banner Text
---------------
SSH ``/etc/issue`` Banner text can be set using the following parameters in an
environment file::
resource_registry:
OS::TripleO::Services::Sshd: ../deployment/sshd/sshd-baremetal-ansible.yaml
parameter_defaults:
BannerText: |
******************************************************************
* This system is for the use of authorized users only. Usage of *
* this system may be monitored and recorded by system personnel. *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
As with the previous Horizon Password Validation example, saving the above into
a yaml file, will allow passing the aforementioned parameters into the overcloud
deploy command::
openstack overcloud deploy --templates \
-e <full environment> -e ssh_banner.yaml
Audit
-----
Having a system capable of recording all audit events is key for troubleshooting
and performing analysis of events that led to a certain outcome. The audit system
is capable of logging many events such as someone changing the system time,
changes to Mandatory / Discretionary Access Control, creating / destroying users
or groups.
Rules can be declared using an environment file and injected into
``/etc/audit/audit.rules``::
parameter_defaults:
AuditdRules:
'Record Events that Modify User/Group Information':
content: '-w /etc/group -p wa -k audit_rules_usergroup_modification'
order : 1
'Collects System Administrator Actions':
content: '-w /etc/sudoers -p wa -k actions'
order : 2
'Record Events that Modify the Systems Mandatory Access Controls':
content: '-w /etc/selinux/ -p wa -k MAC-policy'
order : 3
Firewall Management
-------------------
Iptables rules are automatically deployed on overcloud nodes to open only the
ports which are needed to get OpenStack working. Rules can be added during the
deployment when needed. For example, for Zabbix monitoring system.
.. code-block:: yaml
parameter_defaults:
ExtraFirewallRules:
'301 allow zabbix':
dport: 10050
proto: tcp
source: 10.0.0.8
jump: accept
Rules can also be used to restrict access. The number used at definition of a
rule will determine where the nftables rule will be inserted. For example,
rabbitmq rule number is 109 by default. If you want to restrain it, you could
do.
.. code-block:: yaml
parameter_defaults:
ExtraFirewallRules:
'098 allow rabbit from internalapi network':
dport:
- 4369
- 5672
- 25672
proto: tcp
source: 10.0.0.0/24
jump: accept
'099 drop other rabbit access':
dport:
- 4369
- 5672
- 25672
proto: tcp
jump: drop
In this example, 098 and 099 are arbitrarily numbers that are smaller than the
default rabbitmq rule number. To know the number of a rule, inspect the active
nftables rules on an appropriate node (controller, in case of rabbitmq)
.. code-block:: shell
nft list chain inet filter TRIPLEO_INPUT
[...]
tcp dport { 4369, 5672, 25672-25683 } ct state new counter packets 0 bytes 0 accept comment "109 rabbitmq"
Alternatively it's possible to get the information in tripleo service in the
definition. In our case in `deployment/rabbitmq/rabbitmq-container-puppet.yaml`.
.. code-block:: yaml
firewall_rules:
'109 rabbitmq':
dport:
- 4369
- 5672
- 25672
- 25673-25683
Additional information regarding the available interface options, the role,
some of the implementation details can be reviewed `here <https://docs.openstack.org/tripleo-ansible/latest/roles/role-tripleo_firewall.html>`_.
VXLAN and nftables
~~~~~~~~~~~~~~~~~~
In order to properly get VXLAN support, you have to add a couple of rules to
the Undercloud firewall. This is especially true for a lab environment, or on
the upstream CI infrastructure. Here's an example of the custom rules for
the CI, feel free to adapt them. Note that the network is the one used on the
eth0 interface, aka "public" one of the Undercloud.
.. code-block:: yaml
parameter_defaults:
ExtraFirewallRules:
'020 Allow VXLan from CI infra network':
proto: "udp"
dport: 4789
source: "PUBLIC_NETWORK_CIDR"
state: []
'021 Allow OTV for vxlan from CI infra network':
proto: "udp"
dport: 8472
source: "PUBLIC_NETWORK_CIDR"
state: []
.. note:: The ``state: []`` is mandatory in order to not only catch the NEW
connection (default with the nftables and iptables modules).
AIDE - Intrusion Detection
--------------------------
AIDE (Advanced Intrusion Detection Environment) is a file and directory
integrity checker. It is used as medium to reveal possible unauthorized file
tampering / changes.
AIDE creates an integrity database of file hashes, which can then be used as a
comparison point to verify the integrity of the files and directories.
The TripleO AIDE service allows an operator to populate entries into an AIDE
configuration, which is then used by the AIDE service to create an integrity
database. This can be achieved using an environment file with the following
example structure
.. code-block:: yaml
resource_registry:
OS::TripleO::Services::Aide: /usr/share/openstack-tripleo-heat-templates/deployment/aide/aide-baremetal-ansible.yaml
parameter_defaults:
AideRules:
'TripleORules':
content: 'TripleORules = p+sha256'
order : 1
'etc':
content: '/etc/ TripleORules'
order : 2
'boot':
content: '/boot/ TripleORules'
order : 3
'sbin':
content: '/sbin/ TripleORules'
order : 4
'var':
content: '/var/ TripleORules'
order : 5
'not var/log':
content: '!/var/log.*'
order : 6
'not var/spool':
content: '!/var/spool.*'
order : 7
'not /var/adm/utmp':
content: '!/var/adm/utmp$'
order: 8
'not nova instances':
content: '!/var/lib/nova/instances.*'
order: 9
.. note::
Operators should select their own required AIDE values, as the example list
above is not actively maintained or benchmarked. It only seeks to provide
an document the YAML structure required.
If above environment file were saved as `aide.yaml` it could then be passed to
the `overcloud deploy` command as follows::
openstack overcloud deploy --templates -e aide.yaml
Let's walk through the different values used here.
First an 'alias' name `TripleORules` is declared to save us repeatedly typing
out the same attributes each time. To the alias we apply attributes of
`p+sha256`. In AIDE terms this reads as monitor all file permissions `p` with an
integrity checksum of `sha256`. For a complete list of attributes that can be
used in AIDE's config files, refer to the `AIDE MAN page <http://aide.sourceforge.net/stable/manual.html#config>`_.
Complex rules can be created using this format, such as the following::
MyAlias = p+i+n+u+g+s+b+m+c+sha512
The above would translate as monitor permissions, inodes, number of links, user,
group, size, block count, mtime, ctime, using sha256 for checksum generation.
Note, the alias should always have an order position of `1`, which means that
it is positioned at the top of the AIDE rules and is applied recursively to all
values below.
Following after the alias are the directories to monitor. Note that regular
expressions can be used. For example we set monitoring for the `var` directory,
but overwrite with a not clause using `!` with `'!/var/log.*'` and
`'!/var/spool.*'`.
Further AIDE values
~~~~~~~~~~~~~~~~~~~
The following AIDE values can also be set.
`AideConfPath`: The full POSIX path to the aide configuration file, this
defaults to `/etc/aide.conf`. If no requirement is in place to change the file
location, it is recommended to stick with the default path.
`AideDBPath`: The full POSIX path to the AIDE integrity database. This value is
configurable to allow operators to declare their own full path, as often AIDE
database files are stored off node perhaps on a read only file mount.
`AideDBTempPath`: The full POSIX path to the AIDE integrity temporary database.
This temporary files is created when AIDE initializes a new database.
'AideHour': This value is to set the hour attribute as part of AIDE cron
configuration.
'AideMinute': This value is to set the minute attribute as part of AIDE cron
configuration.
'AideCronUser': This value is to set the linux user as part of AIDE cron
configuration.
'AideEmail': This value sets the email address that receives AIDE reports each
time a cron run is made.
'AideMuaPath': This value sets the path to the Mail User Agent that is used to
send AIDE reports to the email address set within `AideEmail`.
Cron configuration
~~~~~~~~~~~~~~~~~~
The AIDE TripleO service allows configuration of a cron job. By default it will
send reports to `/var/log/audit/`, unless `AideEmail` is set, in which case it
will instead email the reports to the declared email address.
AIDE and Upgrades
~~~~~~~~~~~~~~~~~
When an upgrade is performed, the AIDE service will automatically regenerate
a new integrity database to ensure all upgraded files are correctly recomputed
to possess a updated checksum.
If `openstack overcloud deploy` is called as a subsequent run to an initial
deployment *and* the AIDE configuration rules are changed, the TripleO AIDE
service will rebuild the database to ensure the new config attributes are
encapsulated in the integrity database.
SecureTTY
---------
SecureTTY allows disabling root access via any console device (tty) by means of
entries to the `/etc/securetty` file.
An environment file can be used to set `/etc/securetty` entries as follows::
resource_registry:
OS::TripleO::Services::Securetty: ../deployment/securetty/securetty-baremetal-puppet.yaml
parameter_defaults:
TtyValues:
- console
- tty1
- tty2
- tty3
- tty4
- tty5
- tty6
Keystone CADF auditing
----------------------
Keystone CADF auditing can be enabled by setting `KeystoneNotificationFormat`::
parameter_defaults:
KeystoneNotificationFormat: cadf
login.defs values
-----------------
Entries can be made to `/etc/login.defs` to enforce password characteristics
for new users added to the system, for example::
resource_registry:
OS::TripleO::Services::LoginDefs: ../deployment/login-defs/login-defs-baremetal-puppet.yaml
parameter_defaults:
PasswordMaxDays: 60
PasswordMinDays: 1
PasswordMinLen: 5
PasswordWarnAge: 7
FailDelay: 4

View File

@ -1,87 +0,0 @@
Disabling updates to certain nodes
==================================
Server blacklist
----------------
Servers can be excluded from getting any updated Heat deployments by adding
them to a blacklist parameter called ``DeploymentServerBlacklist``.
Setting the blacklist
_____________________
The ``DeploymentServerBlacklist`` parameter is a list of Heat server names.
Write a new environment file, or add the parameter value to an existing
custom environment file and pass the file to the deployment command::
parameter_defaults:
DeploymentServerBlacklist:
- overcloud-compute-0
- overcloud-compute-1
- overcloud-compute-2
.. note::
The server names in the parameter value are the names according to Heat, not
the actual server hostnames.
Any servers in the list will be blacklisted by Heat from getting any updated
triggered deployments from Heat. After the stack operation completes, any
blacklisted servers will be unchanged. The blacklisted servers also could have
been powered off, or had their ``os-collect-config`` agents stopped during the
stack operation.
The blacklist can be used during scale out operations or for isolating changes
to certain servers only.
.. warning::
Blacklisting servers disables **all** updates to the blacklisted nodes, even
for those deployments that could be considered critical.
.. warning::
Blacklisting servers should be done with caution, and only when the operator
understands that the requested change can be applied with a blacklist in
effect.
It would be possible to blacklist servers in ways to create a hung stack in
Heat, or a misconfigured overcloud. For example, cluster configuration
changes that would need to be applied to all members of a pacemaker cluster
would not support blacklisting certain cluster members since it
could result is a misconfigured cluster.
.. warning::
The blacklist should not be used during the update or upgrade procedures.
Those procedures have their own methods for isolating changes to particular
servers. See the documentation for updates/upgrades for more information.
.. warning::
In cases where servers are added to the blacklist, further changes to those
nodes are not supported until the server is removed from the blacklist. This
includes updates/upgrades/scale up/scale down/node replacement.
Clearing the blacklist
______________________
When clearing the blacklist for subsequent stack operations, an empty parameter
value must be sent with the deploy command. It is not sufficient to simply omit
the parameter since Heat will use the previously saved value.
Send an empty list value to force Heat to clear the blacklist::
parameter_defaults:
DeploymentServerBlacklist: []
Skip deploy identifier
----------------------
The default behavior during a stack update operation is to force puppet to
reapply all manifests. This can be a time consuming operation and is not always
required if not changing any configuration date such as in the case of only
scaling out certain roles.
The behavior can be overridden by passing ``--skip-deploy-identifier`` to the
``openstack overcloud deploy`` command.
Similar to the server blacklist feature, this feature should be used only when
the operator is sure that puppet can be safely skipped on the stack update.
.. note::
In some cases, puppet will still run even when ``--skip-deploy-identifier``
is specified. These cases include changes to puppet manifests or hieradata.

View File

@ -1,129 +0,0 @@
Splitting the Overcloud stack into multiple independent Heat stacks
===================================================================
.. note:: Since victoria TripleO provisions baremetal using a separate
workflow :doc:`../provisioning/baremetal_provision` that does not
involve Heat stack, making this feature irrelevant.
split-stack is a feature in TripleO that splits the overcloud stack into
multiple independent stacks in Heat.
The ``overcloud`` stack is split into an ``overcloud-baremetal`` and
``overcloud-services`` stack. This allows for independent and isolated
management of the baremetal and services part of the Overcloud deployment. It
is a more modular design than deploying a single ``overcloud`` stack in that it
allows either the baremetal or services stack to be replaced by tooling that is
external to TripleO if desired.
The ``overcloud-services`` stack makes extensive use of the deployed-server
feature, documented at :doc:`deployed_server` in order to orchestrate the
deployment and configuration of the services separate from the baremetal
deployment.
split-stack allows for mixing baremetal systems deployed by TripleO and those
deployed by external tooling when creating the services stack. Since the
baremetal resources are completely abstracted behind the deployed-server
interface when deploying the services stack, it does not matter whether the
servers were actually created with TripleO or not.
split-stack Requirements
------------------------
A default split-stack deployment (detailed in the later steps) can be deployed
without any special requirements.
More advanced deployments where baremetal servers provisioned by TripleO will
be mixed with those not provisioned by TripleO will want to pay attention to
the requirements around using already deployed servers from
:doc:`deployed_server`. The requirements for using deployed servers will apply
when not using servers provisioned by TripleO.
Default split-stack deployment
------------------------------
split-stack will be deployed by running 2 separate ``openstack overcloud
deploy`` commands to deploy the separate stacks.
If applicable, prepare the custom roles files and any custom environments
initially. The custom roles file and an environment setting the role counts
should be passed to both deployment commands so that enough baremetal nodes are
deployed per what the ``overcloud-services`` stack expects.
Baremetal Deployment Command
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Run the deployment command to deploy the ``overcloud-baremetal`` stack.
An additional environment file, ``overcloud-baremetal.yaml``, is passed to the
deployment to enable deploying just the baremetal stack.
Enough baremetal nodes should be deployed to match how many nodes per role will
be needed when the services stack is deployed later. Be sure that the
environment file being used to set the role counts is passed to the baremetal
deployment command::
openstack overcloud deploy \
<other cli arguments> \
--stack overcloud-baremetal \
-r roles-data.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/overcloud-baremetal.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/split-stack-consistent-hostname-format.yaml
The ``--stack`` argument sets the name of the Heat stack to
``overcloud-baremetal``. This will also be the name of the Swift container that
stores the stack's plan (templates) and of the Mistral environment.
The ``roles-data.yaml`` roles file illustrates passing a custom roles file to
the deployment command. It is not necessary to use custom roles when using
split stack, however if custom roles are used, the same roles file should be
used for both stacks.
The ``overcloud-baremetal.yaml`` environment will set the parameters for the
deployment such that no services will be deployed.
The ``split-stack-consistent-hostname-format.yaml`` environment will set the
respective ``<role-name>HostnameFormat`` parameters for each role defined in
the role files used. The server hostnames for the 2 stacks must be the same,
otherwise the servers will not be able to pull their deployment metadata from
Heat.
.. warning::
Do not pass any network isolation templates or NIC config templates to the
``overcloud-baremetal`` stack deployment command. These will only be passed
to the ``overcloud-services`` stack deployment command.
Services Deployment Command
^^^^^^^^^^^^^^^^^^^^^^^^^^^
The services stack, ``overcloud-services`` will now be deployed with a separate
deployment command::
openstack overcloud deploy \
<other cli arguments> \
--stack overcloud-services \
--disable-validations \
-r roles-data.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/deployed-server-environment.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/deployed-server-deployed-neutron-ports.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/deployed-server-bootstrap-environment-centos.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/split-stack-consistent-hostname-format.yaml
The ``overcloud-services`` stack makes use of the "deployed-server" feature.
The additional environments needed are shown in the above command. See
:doc:`deployed_server` for more information on how to fully configure the
feature.
The roles file, ``roles-data.yaml`` is again passed to the services stack as
the same roles file should be used for both stacks.
The ``split-stack-consistent-hostname-format.yaml`` environment is again
passed, so that the hostnames used for the server resources created by Heat are
the same as were created in the previous baremetal stack.
During this deployment, any network isolation environments and/or NIC config
templates should be passed for the desired network configuration.
The stack should complete and the generated ``overcloudrc`` can be used to
interact with the Overcloud.

View File

@ -1,88 +0,0 @@
Deploying with SR-IOV Support
===============================
TripleO can deploy Overcloud nodes with SR-IOV support. A new role ``ComputeSriov``
has been added to create a custom ``roles_data.yaml`` with composable SR-IOV role.
Execute below command to create the ``roles_data.yaml``::
openstack overcloud roles generate -o roles_data.yaml Controller ComputeSriov
Once a roles file is created, the following changes are required:
- Deploy Command
- Parameters
- Network Config
Deploy Command
----------------
Deploy command should include the generated roles data file from the above
command.
Deploy command should also include the SR-IOV environment file to include the
neutron-sriov-agent service. All the required parameters are also specified in
this environment file. The parameters has to be configured according to the
baremetal on which SR-IOV needs to be enabled.
Also, SR-IOV requires mandatory kernel parameters to be set, like
``intel_iommu=on iommu=pt`` on Intel machines. In order to enable the
configuration of kernel parameters to the host, host-config-pre-network
environment file has to be added for the deploy command.
Adding the following arguments to the ``openstack overcloud deploy`` command
will do the trick::
openstack overcloud deploy --templates \
-r roles_data.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/services/neutron-sriov.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/host-config-and-reboot.yaml \
...
Parameters
----------
Following are the list of parameters which need to be provided for deploying
with SR-IOV support.
* NovaPCIPassthrough: Provide the list of SR-IOV device names, the logical network,
PCI addresses etc. The PF/VF devices matching the criteria would be available for
guests.
* NeutronPhysicalDevMappings: The map of logical network name and the physical interface.
Example::
parameter_defaults:
NovaPCIPassthrough:
- devname: "p7p1"
physical_network: "sriov1_net"
- devname: "p7p2"
physical_network: "sriov2_net"
NeutronPhysicalDevMappings: "sriov1_net:p7p1,sriov2_net:p7p2"
The parameter ``KernelArgs`` should be provided in the deployment environment
file, with the set of kernel boot parameters to be applied on the
``ComputeSriov`` role where SR-IOV is enabled::
parameter_defaults:
ComputeSriovParameters:
KernelArgs: "intel_iommu=on iommu=pt"
Network Config
--------------
SR-IOV supported network interfaces should be specified in the network config
templates as sriov_pf type. This mechanism of configuring numvfs for SR-IOV
device is recommended and NeutronSriovNumVFs shall be avoided.
Example::
network_config:
- type: sriov_pf
name: p7p2
mtu: 9000
numvfs: 10
use_dhcp: false
defroute: false
nm_controlled: true
promisc: false

View File

@ -1,399 +0,0 @@
.. _ssl:
Deploying with SSL
==================
TripleO supports deploying with SSL on the public OpenStack endpoints as well
as deploying SSL in the internal network for most services.
This document will focus on deployments using network isolation. For more
details on deploying that way, see
:doc:`network_isolation`
Undercloud SSL
--------------
To enable SSL with an automatically generated certificate, you must set
the ``generate_service_certificate`` option in ``undercloud.conf`` to
``True``. This will generate a certificate in ``/etc/pki/tls/certs`` with
a file name that follows the following pattern::
undercloud-[undercloud_public_vip].pem
This will be a PEM file in a format that HAProxy can understand (see the
HAProxy documentation for more information on this).
.. admonition:: Stable Branch
:class: stable
As of the Rocky release, the default is to have TLS enabled through
this option.
This option for auto-generating certificates uses Certmonger to request
and keep track of the certificate. So you will see a certificate with the
ID of ``undercloud-haproxy-public-cert`` in certmonger (you can check this
by using the ``sudo getcert list`` command). Note that this also implies
that certmonger will manage the certificate's lifecycle, so when it needs
renewing, certmonger will do that for you.
The default is to use Certmonger's ``local`` CA. So using this option has
the side-effect of extracting Certmonger's local CA to a PEM file that is
located in the following path::
/etc/pki/ca-trust/source/anchors/cm-local-ca.pem
This certificate will then be added to the trusted CA chain, since this is
needed to be able to use the undercloud's endpoints with that certificate.
.. admonition:: Stable Branch
:class: stable
As of the Rocky release, the default is for TripleO pass this CA
certificate to overcloud nodes so it'll be trusted.
.. note:: If you need to access the undercloud from outside the node, the
aforementioned file is the one you need to add to your trust store.
So for RHEL-based systems you need to copy ``cm-local-ca.pem`` into
``/etc/pki/ca-trust/source/anchors/`` and subsequently run the
command ``update-ca-trust extract``. This will add that CA to your
trust store.
However, it is possible to not use certmonger's ``local`` CA. For
instance, one can use FreeIPA as the CA by setting the option
``certificate_generation_ca`` in ``undercloud.conf`` to have 'IPA' as the
value. This requires the undercloud host to be enrolled as a FreeIPA
client, and to define a ``haproxy/<undercloud FQDN>@<KERBEROS DOMAIN>``
service in FreeIPA. We also need to set the option ``service_principal``
to the relevant value in ``undercloud.conf``. Finally, we need to set the
public endpoints to use FQDNs instead of IP addresses, which will also
then use an FQDN for the certificate.
To enable an FQDN for the certificate we set the ``undercloud_public_vip``
to the desired hostname in ``undercloud.conf``. This will in turn also set
the keystone endpoints to relevant values.
Note that the ``generate_service_certificate`` option doesn't take into
account the ``undercloud_service_certificate`` option and will have
precedence over it.
To enable SSL on the undercloud with a pre-created certificate, you must
set the ``undercloud_service_certificate`` option in ``undercloud.conf``
to an appropriate certificate file. Important:
The certificate file's Common Name *must* be set to the value of
``undercloud_public_vip`` in undercloud.conf.
If you do not have a trusted CA signed certificate file, you can alternatively
generate a self-signed certificate file using the following command::
openssl genrsa -out privkey.pem 2048
The next command will prompt for some identification details. Most of these don't
matter, but make sure the ``Common Name`` entered matches the value of
``undercloud_public_vip`` in undercloud.conf::
openssl req -new -x509 -key privkey.pem -out cacert.pem -days 365
Combine the two files into one for HAProxy to use. The order of the
files in this command matters, so do not change it::
cat cacert.pem privkey.pem > undercloud.pem
Move the file to a more appropriate location and set the SELinux context::
sudo mkdir /etc/pki/instack-certs
sudo cp undercloud.pem /etc/pki/instack-certs
sudo semanage fcontext -a -t etc_t "/etc/pki/instack-certs(/.*)?"
sudo restorecon -R /etc/pki/instack-certs
``undercloud_service_certificate`` should then be set to
``/etc/pki/instack-certs/undercloud.pem``.
Add the self-signed CA certificate to the undercloud system's trusted
certificate store::
sudo cp cacert.pem /etc/pki/ca-trust/source/anchors/
sudo update-ca-trust extract
.. note:: If you're using a self-signed or autogenerated certificate for the
undercloud, the overcloud nodes will need to trust it. So the
contents of the certificate need to be set in the CAMap as described
in ":ref:`ca-trust`" section.
Overcloud SSL
-------------
Certificate and Public VIP Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The public VIP of the deployed overcloud needs to be predictable in order for
the SSL certificate to be configured properly. There are two options for
configuring the certificate:
#. The certificate's Common Name can be set to the IP of the public
VIP. In this case, the Common Name must match *exactly*. If the public
VIP is ``10.0.0.1``, the certificate's Common Name must also be ``10.0.0.1``.
Wild cards will not work.
#. The overcloud endpoints can be configured to point at
a DNS name. In this case, the certificate's Common Name must be valid
for the FQDN of the overcloud endpoints. Wild cards should work fine.
Note that this option also requires pre-configuration of the specified
DNS server with the appropriate FQDN and public VIP.
In either case, the public VIP must be explicitly specified as part of the
deployment configuration. This can be done by passing an environment file
like the following::
parameter_defaults:
PublicVirtualFixedIPs: [{'ip_address':'10.0.0.1'}]
.. note:: If network isolation is not in use, the ControlFixedIPs parameter
should be set instead.
The selected IP should fall in the specified allocation range for the public
network.
Certificate Details
~~~~~~~~~~~~~~~~~~~
.. This admonition is intentionally left class-less because it is only used
on the SSL page.
.. admonition:: Self-Signed SSL
It is not recommended that the self-signed certificate is trusted; So for
this purpose, having a self-signed CA certificate is a better choice. In
this case we will trust the self-signed CA certificate, and not the leaf
certificate that will be used for the public VIP; This leaf certificate,
however, will be signed by the self-signed CA.
For the self-signed case, just the predictable public VIP method will
be documented, as DNS configuration is outside the scope of this document.
Generate a private key::
openssl genrsa -out overcloud-ca-privkey.pem 2048
Generate a self-signed CA certificate. This command will prompt for some
identifying information. Most of the fields don't matter, and the CN should
not be the same as the one we'll give the leaf certificate. You can choose a
CN for this such as "TripleO CA"::
openssl req -new -x509 -key overcloud-ca-privkey.pem \
-out overcloud-cacert.pem -days 365
Add the self-signed CA certificate to the undercloud's trusted certificate
store. Adding this file to the overcloud nodes will be discussed later::
sudo cp overcloud-cacert.pem /etc/pki/ca-trust/source/anchors/
sudo update-ca-trust extract
This certificate location needs to be added to the ``enabled-tls.yaml`` file
with the parameter ``PublicTLSCAFile`` like so::
parameter_defaults:
PublicTLSCAFile: '/etc/pki/ca-trust/source/anchors/overcloud-cacert.pem'
``PublicTLSCAFile`` ensures the CA Certificate will be added to the ``clouds.yaml``
file for the ``cacert`` parameter.
Generate the leaf certificate request and key that will be used for the
public VIP. To do this, we will create two files for the certificate
request. First, we create the server.csr.cnf::
[req]
default_bits = 2048
prompt = no
default_md = sha256
distinguished_name = dn
[dn]
C=AU
ST=Queensland
L=Brisbane
O=your-org
OU=admin
emailAddress=me@example.com
CN=openstack.example.com
Create v3.ext::
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1=openstack.example.com
Create the Key::
openssl req -new -sha256 -nodes -out server.csr \
-newkey rsa:2048 -keyout server-key.pem \
-config <( cat server.csr.cnf )
Create the certificate::
openssl x509 -req -in server.csr \
-CA overcloud-cacert.pem \
-CAkey overcloud-ca-privkey.pem \
-CAcreateserial -out server-cert.pem \
-days 500 -sha256 -extfile v3.ext
The following is a list of which files generated in the previous steps
map to which parameters in the SSL environment files::
overcloud-cacert.pem: SSLRootCertificate
server-key.pem: SSLKey
server-cert.pem: SSLCertificate
The contents of the private key and certificate files must be provided
to Heat as part of the deployment command. To do this, there is a sample
environment file in tripleo-heat-templates with fields for the file contents.
It is generally recommended that the original copy of tripleo-heat-templates
in ``/usr/share/openstack-tripleo-heat-templates`` not be altered, since it
could be overwritten by a package update at any time. Instead, make a copy
of the templates::
cp -r /usr/share/openstack-tripleo-heat-templates ~/ssl-heat-templates
Then edit the enable-tls.yaml environment file. If using the location from the
previous command, the correct file would be in
``~/ssl-heat-templates/environments/ssl/enable-tls.yaml``. Insert the contents of
the private key and certificate files in their respective locations.
.. admonition:: Stable Branch
:class: stable
In the Pike release the SSL environment files in the top-level environments
directory were deprecated and moved to the ``ssl`` subdirectory as
shown in the example paths. For Ocata and older the paths will still need
to refer to the top-level environments. The filenames are all the same, but
the ``ssl`` directory must be removed from the path.
.. note:: The certificate and key will be multi-line values, and all of the lines
must be indented to the same level.
An abbreviated version of how the file should look::
parameter_defaults:
SSLCertificate: |
-----BEGIN CERTIFICATE-----
MIIDgzCCAmugAwIBAgIJAKk46qw6ncJaMA0GCSqGSIb3DQEBCwUAMFgxCzAJBgNV
[snip]
sFW3S2roS4X0Af/kSSD8mlBBTFTCMBAj6rtLBKLaQbIxEpIzrgvp
-----END CERTIFICATE-----
[rest of file snipped]
``SSLKey`` should look similar, except with the value of the private key.
``SSLIntermediateCertificate`` can be set in the same way if the certificate
signer uses an intermediate certificate. Note that the ``|`` character must
be added as in the other values to indicate that this is a multi-line value.
When using a self-signed certificate or a signer whose certificate is
not in the default trust store on the overcloud image it will be necessary
to inject the certificate as part of the deploy process. This can be done
with the environment file ``~/ssl-heat-templates/environments/ssl/inject-trust-anchor.yaml``.
Insert the contents of the signer's root CA certificate in the appropriate
location, in a similar fashion to what was done for the certificate and key
above.
.. admonition:: Self-Signed SSL
:class: selfsigned
Injecting the root CA certificate is required for self-signed SSL. The
correct value to use is the contents of the ``overcloud-cacert.pem`` file.
DNS Endpoint Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~
When deploying with DNS endpoint addresses, two additional parameters must be
passed in a Heat environment file. These are ``CloudName`` and ``DnsServers``.
To do so, create a new file named something like ``cloudname.yaml``::
parameter_defaults:
CloudName: my-overcloud.my-domain.com
DnsServers: 10.0.0.100
Replace the values with ones appropriate for the target environment. Note that
the configured DNS server(s) must have an entry for the configured ``CloudName``
that matches the public VIP.
In addition, when a DNS endpoint is being used, make sure to pass the
``tls-endpoints-public-dns.yaml`` environment to your deploy command. See the examples
below.
Deploying an SSL Environment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``enable-tls.yaml`` file must always be passed to use SSL on the public
endpoints. Depending on the specific configuration, additional files will
also be needed. Examples of the necessary parameters for different scenarios
follow.
IP-based certificate::
-e ~/ssl-heat-templates/environments/ssl/enable-tls.yaml -e ~/ssl-heat-templates/environments/ssl/tls-endpoints-public-ip.yaml
Self-signed IP-based certificate::
-e ~/ssl-heat-templates/environments/ssl/enable-tls.yaml -e ~/ssl-heat-templates/environments/ssl/tls-endpoints-public-ip.yaml -e ~/ssl-heat-templates/environments/ssl/inject-trust-anchor.yaml
DNS-based certificate::
-e ~/ssl-heat-templates/environments/ssl/enable-tls.yaml -e ~/ssl-heat-templates/environments/ssl/tls-endpoints-public-dns.yaml -e ~/cloudname.yaml
Self-signed DNS-based certificate::
-e ~/ssl-heat-templates/environments/ssl/enable-tls.yaml -e ~/ssl-heat-templates/environments/ssl/tls-endpoints-public-dns.yaml -e ~/cloudname.yaml -e ~/ssl-heat-templates/environments/ssl/inject-trust-anchor.yaml
It is also possible to get all your certificates from a CA. For this you need
to include the **environments/services/haproxy-public-tls-certmonger.yaml**
environment file.
.. _ca-trust:
Getting the overcloud to trust CAs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As mentioned above, it is possible to get the overcloud to trust a CA by using
the ``~/ssl-heat-templates/environments/ssl/inject-trust-anchor.yaml`` environment
and adding the necessary details there. However, that environment has the
restriction that it will only allow you to inject one CA. However, the
file ``~/ssl-heat-templates/environments/ssl/inject-trust-anchor-hiera.yaml`` is an
alternative that actually supports as many CA certificates as you need.
.. note:: This is only available since Newton. Older versions of TripleO don't
support this.
This file is a template of how you should fill the ``CAMap`` parameter which is
passed via parameter defaults. It looks like this::
CAMap:
first-ca-name:
content: |
The content of the CA cert goes here
second-ca-name:
content: |
The content of the CA cert goes here
where ``first-ca-name`` and ``second-ca-name`` will generate the files
``first-ca-name.pem`` and ``second-ca-name.pem`` respectively. These files will
be stored in the ``/etc/pki/ca-trust/source/anchors/`` directory in each node
of the overcloud and will be added to the trusted certificate chain of each of
the nodes. You must be careful that the content is a block string in yaml and
is in PEM format.
.. admonition:: Stable Branch
:class: stable
As of Rocky, the undercloud now defaults to using TLS through the
autogenerated certificate. If you're upgrading your undercloud and
had the ``generate_service_certificate``, it also automatically passes
the CA certificate via the ``CAMap`` parameter.
.. note:: In some cases, such as when using Ceph, the overcloud needs to trust
the undercloud's CA certificate. If you're using the default CA in
the undercloud, and autogenerated your certificates, you'll need to
copy the contents of
``/etc/pki/ca-trust/source/anchors/cm-local-ca.pem`` into the
aforementioned ``CAMap`` parameter.

View File

@ -1,85 +0,0 @@
Use an external Swift Proxy with the Overcloud
===============================================
|project| supports use of an external Swift (or Ceph RadosGW) proxy, already
available to the operator.
Use of an external Swift proxy can be configured using a particular environment file
when deploying the overcloud, specifically `environments/swift-external.yaml`.
In the environment file above user must adjust the parameters to fit
its setup by creating a custom environment file (i.e.
*~/my-swift-settings.yaml*)::
parameter_defaults:
ExternalSwiftPublicUrl: 'http://<Public Swift endpoint or loadbalancer>:9024/v1/AUTH_%(tenant_id)s'
ExternalSwiftInternalUrl: 'http://<Internal Swift endpoint>:9024/v1/AUTH_%(tenant_id)s'
ExternalSwiftAdminUrl: 'http://<Admin Swift endpoint>:9024'
ExternalSwiftUserTenant: 'service'
SwiftPassword: 'choose_a_random_password'
.. note::
When the external Swift is implemented by Ceph RadosGW, the endpoint will be
different; the /v1/ part needs to be replaced with /swift/v1, for example:
`http://<Public Swift endpoint or loadbalancer>:9024/v1/AUTH_%(tenant_id)s`
becomes
`http://<Public Swift endpoint or loadbalancer>:9024/swift/v1/AUTH_%(tenant_id)s`
The user can create an environment file with the required settings
and add the files above to the deploy commandline::
openstack overcloud deploy --templates -e /usr/share/openstack-tripleo-heat-templates/environments/swift-external.yaml -e ~/my-swift-settings.yaml
Once the deploy has succeeded, user has to complete the
configuration on the external swift proxy, configuring it to use the
keystone authentication provider. This environment file creates also
a service user called *swift* that can be used for this purpose. The
password for this user is defined by using the *SwiftPassword*
parameter, as shown above.
The external Swift proxy must use Keystone from the overcloud, otherwise
authentication will fail. The public Keystone endpoint must be
accessible from the proxy therefore.
The following snippet from `/etc/swift/proxy-server.conf` is an example
how to configure the Swift proxy to use Keystone from the overcloud::
[pipeline:main]
pipeline = [... other middlewares ...] authtoken keystone [... other middlewares ...]
[filter:keystone]
use = egg:swift#keystoneauth
operator_roles = admin, SwiftOperator
cache = swift.cache
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
signing_dir = /tmp/keystone-signing-swift
www_authenticate_uri = http://<public Keystone endpoint>:5000/
auth_url = http://<admin Keystone endpoint>:5000/
password = <Password as defined in the environment parameters>
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = swift
cache = swift.cache
include_service_catalog = False
delay_auth_decision = True
For Ceph RadosGW instead, the following settings can be used::
rgw_keystone_api_version: 3
rgw_keystone_url: http://<public Keystone endpoint>:5000/
rgw_keystone_accepted_roles: 'member, Member, admin'
rgw_keystone_accepted_admin_roles: ResellerAdmin, swiftoperator
rgw_keystone_admin_domain: default
rgw_keystone_admin_project: service
rgw_keystone_admin_user: swift
rgw_keystone_admin_password: <Password as defined in the environment parameters>
rgw_keystone_implicit_tenants: 'true'
rgw_keystone_revocation_interval: '0'
rgw_s3_auth_use_keystone: 'true'
rgw_swift_versioning_enabled: 'true'
rgw_swift_account_in_url: 'true'

View File

@ -1,405 +0,0 @@
Deploying TLS-everywhere
========================
Setting up *TLS-everywhere* primarily consists of a few additional steps you
need to take on the undercloud and FreeIPA server. These steps consist of
installing additional packages and enrolling the undercloud host as a FreeIPA
client.
The OpenStack release you are deploying affects which tools you can use to
deploy *TLS-everywhere*. For deployments using Queens through Stein you must
use Novajoin. For deployments using Train or Ussuri, you can use either
Novajoin or tripleo-ipa. For deployments using Victoria or newer releases you
must use tripleo-ipa. Deployments :ref:`deployed_server` must also use
tripleo-ipa. We recommend using tripleo-ipa whenever possible. Let's walk
through each step using both tripleo-ipa and Novajoin.
You can find a primer on the various TLS deployment strategies and components
in the :doc:`tls-introduction` documentation.
TLS-everywhere with tripleo-ipa
-------------------------------
.. note::
This deployment strategy is only supported on Train and newer releases. If
you're deploying a version older than Train, you'll need to use Novajoin to
accomplish *TLS-everywhere*, which is documented below.
Do the following steps before deploying your undercloud.
Configure DNS
~~~~~~~~~~~~~
*TLS-everywhere* deployments use FreeIPA as the DNS server. You need to set the
proper search domain and nameserver on the undercloud. To do this, you need to
know the deployment domain, the domain of the FreeIPA server, and the FreeIPA
server's IP address. For example, if the deployment domain is `example.com` and
the FreeIPA server domain is `bigcorp.com`, you should set the following in
`/etc/resolv.conf`::
search example.com bigcorp.com
nameserver $FREEIPA_IP_ADDRESS
This step ensures the undercloud can resolve newly added hosts and services
after TripleO enrolls them as FreeIPA clients. You only need to add both search
domains if they're different. If the FreeIPA server is using the same domain as
the deployment you only need to specify the deployment domain.
Configure FreeIPA
~~~~~~~~~~~~~~~~~
.. note::
This section assumes you have permissions to make writeable changes to your
FreeIPA server. If you don't have those permissions or direct access to the
FreeIPA server, you'll need to contact your FreeIPA administrator and have
them perform the following steps either using ansible scripts or manually.
Before you configure the undercloud, you need to ensure FreeIPA is configured
with the correct principal and privileges. This allows the undercloud to add
new hosts, services, and DNS records in FreeIPA during the overcloud
installation.
The undercloud will enroll itself as a FreeIPA client and download a keytab to
use for authentication during the installation process. To do this, it needs a
one-time password (OTP) from FreeIPA that you configure in ``undercloud.conf``.
You can generate the OTP manually if you have the correct permissions to add
hosts, modify permissions, update roles, and create principals in FreeIPA. You
need to perform these actions from an existing FreeIPA client. Note, the
FreeIPA server itself is enrolled as a client.
You can find a set of `playbooks
<https://opendev.org/x/tripleo-ipa/src/branch/master/tripleo_ipa/playbooks#user-content-tls-e-ipa-server-configuration-roles>`_
in tripleo-ipa that automate creating permissions, hosts, and principals for
the undercloud. These playbooks expect the ``IPA_PRINCIPAL``, which is a user
in FreeIPA, to have the necessary permissions to perform the tasks in each
playbook (e.g., ``ipa privilege-add-permission``, ``ipa host-add``, etc). They
also expect you to generate a kerberos token before executing each playbook.
Create a FreeIPA role
^^^^^^^^^^^^^^^^^^^^^
First, you need to create a new FreeIPA role with the appropriate permissions
for managing hosts, principals, services, and DNS entries::
$ kinit
$ export IPA_PASSWORD=$IPA_PASSWORD
$ export IPA_PRINCIPAL=$IPA_USER
$ export UNDERCLOUD_FQDN=undercloud.example.com
$ ansible-playbook /usr/share/ansible/tripleo-playbooks/ipa-server-create-role.yaml
Register the undercloud
^^^^^^^^^^^^^^^^^^^^^^^
Next, you need to register the undercloud as a FreeIPA client and generate a
OTP that the undercloud will use for enrollment, which is necessary before it
can manage entities in FreeIPA::
$ export IPA_PASSWORD=$IPA_PASSWORD
$ export IPA_PRINCIPAL=$IPA_USER
$ export UNDERCLOUD_FQDN=undercloud.example.com
$ ansible-playbook /usr/share/ansible/tripleo-playbooks/ipa-server-register-undercloud.yaml
If successful, the ansible output will contain an OTP. Save this OTP because
you will need it when you configure the undercloud.
Create a principal
^^^^^^^^^^^^^^^^^^
Finally, create a FreeIPA principal and grant it the necessary permissions to
manage hosts, services, and DNS entries in FreeIPA::
$ export IPA_PASSWORD=$IPA_PASSWORD
$ export IPA_PRINCIPAL=$IPA_USER
$ export UNDERCLOUD_FQDN=undercloud.example.com
$ ansible-playbook /usr/share/ansible/tripleo-playbooks/ipa-server-create-principal.yaml
Configure the Undercloud
~~~~~~~~~~~~~~~~~~~~~~~~
.. warning::
This section only provides guidance for configuring *TLS-everywhere*. You
need to make sure your undercloud configuration is complete before starting
the undercloud installation process.
Set the following variables in `undercloud.conf`::
ipa_otp = $OTP
overcloud_domain_name = example.com
undercloud_nameservers = $FREEIPA_IP_ADDRESS
Your undercloud configuration is ready to be deployed and has the necessary
changes to allow you to deploy *TLS-everywhere* for the overcloud.
Undercloud Install
~~~~~~~~~~~~~~~~~~
After you've had an opportunity to verify all undercloud configuration options,
including the options listed above, start the undercloud installation process::
$ openstack undercloud install
Undercloud Verification
~~~~~~~~~~~~~~~~~~~~~~~
You should verify that the undercloud was enrolled properly by listing the
hosts in FreeIPA::
$ sudo kinit
$ sudo ipa host-find
You should also confirm that ``/etc/novajoin/krb5.keytab`` exists on the
undercloud. The ``novajoin`` directory name is purely for legacy naming
reasons. The keytab is placed in this directory regardless of using novajoin
to enroll the undercloud as a FreeIPA client.
You can proceed with the :ref:`Overcloud TLS-everywhere` if the undercloud
installation was successful.
TLS-everywhere with Novajoin
----------------------------
.. warning:: This deployment strategy is only supported up to the Train release. We
recommend using tripleo-ipa to accomplish *TLS-everywhere* in newer
releases. Steps for using tripleo-ipa are documented above. This deployment
strategy has been removed in Victoria.
Do the following steps before deploying your undercloud.
Configure DNS
~~~~~~~~~~~~~
*TLS-everywhere* deployments use FreeIPA as the DNS server. You need to set the
proper search domain and nameserver on the undercloud. To do this, you need to
know the deployment domain, the domain of the FreeIPA server, and the FreeIPA
server's IP address. For example, if the deployment domain is `example.com` and
the FreeIPA server domain is `bigcorp.com`, you should set the following in
`/etc/resolv.conf`::
search example.com bigcorp.com
nameserver $FREEIPA_IP_ADDRESS
This step ensures the undercloud can resolve newly added hosts and services
after TripleO enrolls them as FreeIPA clients. You only need to add both search
domains if they're different. If the FreeIPA server is using the same domain as
the deployment you only need to specify the deployment domain.
Add Undercloud as a FreeIPA host
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Next, you need to add the undercloud as a host in FreeIPA. This will generate a
one-time password that TripleO uses to enroll the undercloud as a FreeIPA
client, giving the undercloud the permissions it needs to add new hosts,
services, and DNS records. You can use the following command-line utility to
add the undercloud as a FreeIPA host::
novajoin-ipa-setup \
--principal $IPA_USER \
--password $IPA_PASSWORD \
--server ipa.bigcorp.com \
--realm BIGCORP.COM \
--domain example.com \
--hostname undercloud.example.com \
--precreate
If successful, the command will return a one-time password. Save this password
because you will need it later to configure the undercloud.
Configure the Undercloud
~~~~~~~~~~~~~~~~~~~~~~~~
.. warning::
This section only provides guidance for configuring *TLS-everywhere*. You
need to make sure your undercloud configuration is complete before starting
the undercloud installation process.
Set the following variables in `undercloud.conf`::
enable_novajoin = True
ipa_otp = $IPA_OTP
overcloud_domain_name = example.com
Your undercloud configuration is ready to be deployed and has the necessary
changes to allow you to deploy *TLS-everywhere* for the overcloud.
Undercloud Install
~~~~~~~~~~~~~~~~~~
After you've had an opportunity to verify all undercloud configuration options,
including the options listed above, start the undercloud installation process::
$ openstack undercloud install
Undercloud Verification
~~~~~~~~~~~~~~~~~~~~~~~
You should verify that the undercloud was enrolled properly by listing the
hosts in FreeIPA::
$ sudo kinit
$ sudo ipa host-find
You should also confirm that ``/etc/novajoin/krb5.keytab`` exists on the
undercloud and that the ``novajoin`` and ``novajoin-notifier`` services are
running.
You can proceed with the :ref:`Overcloud TLS-everywhere` if the undercloud
installation was successful.
.. _Overcloud TLS-everywhere:
Configuring the Overcloud
-------------------------
*TLS-everywhere* requires you to set extra parameters and templates before you
deploy, or update, your overcloud. These changes consist of settings domain
information and including additional heat templates in your deploy command.
Let's walk through each step individually.
Set Parameters
~~~~~~~~~~~~~~
Next, you need to set parameters so that TripleO knows where to find your
FreeIPA server and configures DNS. You need to set these variables so that
TripleO adds DNS records that map to the correct hosts. Let's continue assuming
we have a file called ``tls-parameters.yaml`` and it contains the following
parameter_defaults section::
parameter_defaults:
DnsSearchDomains: ["example.com"]
DnsServers: ["192.168.1.13"]
CloudDomain: example.com
CloudName: overcloud.example.com
CloudNameInternal: overcloud.internalapi.example.com
CloudNameStorage: overcloud.storage.example.com
CloudNameStorageManagement: overcloud.storagemgmt.example.com
CloudNameCtlplane: overcloud.ctlplane.example.com
.. note::
If you are using deployed servers, you must also specify the following
parameters::
IdMInstallClientPackages: True
This option is required to install packages needed to enroll overcloud
hosts as FreeIPA clients. Deployments using Novajoin do not require this
option since the necessary packages are built into the overcloud images. If
you do not specify this argument, you need to ensure dependencies for
ansible-freeipa are present on the overcloud servers before deploying the
overcloud.
The ``DnsServers`` value above assumes we have FreeIPA available at
192.168.1.13.
It's important to note that you will need to update the `DnsSearchDomains` to
include the domain of the IPA server if it's different than the `CloudDomain`.
For example, if your `CloudDomain` is `example.com` and your IPA server is
located at `ipa.bigcorp.com`, then you need to include `bigcorp.com` as an
additional search domain::
DnsSearchDomains: ["example.com", "bigcorp.com"]
Composable Services
~~~~~~~~~~~~~~~~~~~
In addition to the parameters above, you might need to update the
``resource_registry`` in ``tls-parameters.yaml`` to include a composable
service. There are two composable services, one for Novajoin and the other is
for tripleo-ipa. TripleO uses the Novajoin composable service for deploying
*TLS-everywhere* by default. If you need or want to use tripleo-ipa, you'll
need to update the registry to use a different composable service. Both options
are described below.
Novajoin Composable Service
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This was the default option until Ussuri. As of Victoria, this option has
been removed, and deployers upgrading to Victoria will be migrated to tripleo-ipa.
For reference, the Novajoin based composable service is located at
/usr/share/openstack-tripleo-heat-templates/deployment/ipa/ipaclient-baremetal-ansible.yaml
tripleo-ipa Composable Service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you're deploying *TLS-everywhere* with tripleo-ipa prior to Victoria, you need to
override the default Novajoin composable service. Add the following composable service to
the ``resource_registry`` in ``tls-parameters.yaml``::
resource_registry:
OS::TripleO::Services::IpaClient: /usr/share/openstack-tripleo-heat-templates/deployment/ipa/ipaservices-baremetal-ansible.yaml
As of Victoria, this is the only method for deploying *TLS-everywhere*.
Specify Templates
~~~~~~~~~~~~~~~~~
At this point, you should have all the settings configured for a successful
*TLS-everywhere* deployment. The only remaining step is to include the
following templates in your overcloud deploy command::
$ openstack overcloud deploy \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/tls-everywhere-endpoints-dns.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/services/haproxy-public-tls-certmonger.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/enable-internal-tls.yaml \
-e tls-parameters.yaml
Remember, ``tls-parameters.yaml`` is the file containing the parameters above.
Overcloud Verification
----------------------
After the overcloud is deployed, you can confirm each endpoint is using HTTPS
by querying keystone's endpoints::
$ openstack --os-cloud overcloud endpoint list
Deleting Overclouds
-------------------
.. note::
This functionality is only invoked when you use the ``openstack overcloud
delete`` command using Train or newer releases. The overcloud is
technically a heat stack, but using ``openstack stack delete`` will not
clean up FreeIPA.
.. note::
This section is only applicable to deployments using tripleo-ipa. Novajoin
cleans up FreeIPA after consuming notifications about instance deletion.
The python-tripleoclient CLI cleans up hosts, services, and DNS records in
FreeIPA when you delete an overcloud::
$ openstack overcloud delete overcloud
You can verify the hosts, services, DNS records were removed by querying
FreeIPA::
$ kinit
$ ipa host-find
$ ipa service-find
$ ipa dnsrecord-find example.com.
The undercloud host, service, and DNS records are untouched when deleting
overclouds. Overcloud hosts, services, and DNS records are re-added to FreeIPA
during subsequent deployments.
If you don't want to clean up FreeIPA when you delete your overcloud, you can
use the ``openstack overcloud delete --skip-ipa-cleanup`` parameter. This
option leaves all overcloud hosts, services, and DNS records in FreeIPA. You
might find this useful if your FreeIPA server is unreachable or if you plan to
clean up FreeIPA later.
To clean up FreeIPA manually, you need the Ansible inventory file that
describes your deployment. If you don't have it handy, you can generate one
from the undercloud using::
$ source stackrc
$ tripleo-ansible-inventory --static-yaml-inventory generated-inventory.yaml
The utility will generate an inventory file and store it as
``generated-inventory.yaml``. You can invoke the playbook that cleans up
FreeIPA using::
$ ansible-playbook -i generated-inventory.yaml /usr/share/ansible/tripleo-playbooks/cli-cleanup-ipa.yml

View File

@ -1,159 +0,0 @@
.. _tls-introduction:
TLS Introduction
================
Depending on your deployment's security requirements, you might be required to
encrypt network traffic. TripleO helps you accomplish this by supporting
various TLS deployment options. Let's start by understanding the different ways
we can deploy TLS.
The first option is to only encrypt traffic between clients and public
endpoints. This approach results in fewer certificates to manage, and we refer
to it as *public TLS*. Public endpoints, in this sense, are endpoints only
exposed to end-users. Traffic between internal endpoints is not encrypted.
The second option leverages TLS for all endpoints in the entire deployment,
including the overcloud, undercloud, and any systems that natively support TLS.
We typically refer to this approach as *TLS-everywhere* because we use TLS
everywhere we can, encrypting as much network traffic as possible. Certificate
management automation is critical with this approach because the number of
certificates scales linearly with the number of services in your deployment.
TripleO uses several components to help ease the burden of managing
certificates. This option is desirable for deployments susceptible to industry
regulation or those who have a higher security risk. Healthcare,
telecommunications, and the public sector are but a few industries that make
extensive use of *TLS-everywhere*. You can think of *public TLS* as a subset of
what *TLS-everywhere* offers.
TripleO uses the following components to implement *public TLS* and
*TLS-everywhere*.
Certmonger
----------
`Certmonger`_ is a daemon that helps simplify certificate management between
endpoints and certificate authorities (CAs). You can use it to generate key
pairs and certificate signing requests (CSRs). It can self-sign CSRs or send
them to external CAs for signing. Certmonger also tracks the expiration of each
certificate it manages. When a certificate is about to expire, Certmonger
requests a new certificate, updates it accordingly, and may restart a service.
This automation keeps the node enrolled as a client of the certificate
authority so that you dont have to update hundreds, or thousands, of
certificates manually. Certmonger runs on each node that provides an endpoint
in your deployment.
.. _Certmonger: https://pagure.io/certmonger
FreeIPA
-------
`FreeIPA`_ is a multi-purpose system that includes a certificate authority
(DogTag Certificate System), LDAP (389 Directory Server), MIT Kerberos, NTP
server, and DNS. TripleO uses all of these subsystems to implement TLS across
OpenStack. For example, if you use FreeIPA in your deployment, you can sign
CSRs with DogTag, as opposed to self-signing CSRs with certmonger locally.
FreeIPA runs on a supplemental node in your deployment, and it is kept separate
from other infrastructure.
.. _FreeIPA: https://www.freeipa.org/page/Main_Page
Installing FreeIPA
~~~~~~~~~~~~~~~~~~
Similar to setting up the undercloud node, you need to set the hostname
properly for the FreeIPA server. For this example, let's assume we're using
``example.com`` as the domain name for the deployment.::
sudo hostnamectl set-hostname ipa.example.come
sudo hostnamectl set-hostname --transient ipa.example.com
Collect and install the FreeIPA packages::
sudo yum install -y ipa-server ipa-server-dns
Configure FreeIPA::
sudo ipa-server-install --realm EXAMPLE.COM /
--ds-password $DIRECTORY_MANAGER_PASSWORD /
--admin-password $ADMIN_PASSWORD /
--hostname ipa.example.com /
--setup-dns /
--auto-forwarders /
--auto-reverse /
--unattended
By default, FreeIPA does not public it's Certificate Revocation List (CRL)
on startup. As the CRL is retrieved when the overcloud nodes retrieve
certificates from FreeIPA, we should configure it to do so and restart
FreeIPA.::
sed -i -e \
's/ca.crl.MasterCRL.publishOnStart=.*/ca.crl.MasterCRL.publishOnStart=true/' \
/etc/pki/pki-tomcat/ca/CS.cfg
systemctl restart ipa
If your IPA server is not at 4.8.5 or higher, you will need to add an
ACL to allow for the proper generation of certificates with a IP SAN.::
cat << EOF | ldapmodify -x -D "cn=Directory Manager" -w $DIRECTORY_MANAGER_PASSWORD
dn: cn=dns,dc=example,dc=com
changetype: modify
add: aci
aci: (targetattr = "aaaarecord || arecord || cnamerecord || idnsname || objectclass || ptrrecord")(targetfilter = "(&(objectclass=idnsrecord)(|(aaaarecord=*)(arecord=*)(cnamerecord=*)(ptrrecord=*)(idnsZoneActive=TRUE)))")(version 3.0; acl "Allow hosts to read DNS A/AAA/CNAME/PTR records"; allow (read,search,compare) userdn = "ldap:///fqdn=*,cn=computers,cn=accounts,dc=example,dc=com";)
EOF
If you are upgrading to Victoria and you have been using novajoin, an additional permission
must be added to the Nova Host Manager role to allow the creation of DNS zone entries.
As an admin user::
ipa privilege-add-permission 'Nova Host Management' --permission \
'System: Modify Realm Domains'
Please refer to ``ipa-server-install --help`` for specifics on each argument or
reference the `FreeIPA documentation`_. The directions above are only a guide.
You may need to adjust certain values and configuration options to use FreeIPA,
depending on your requirements.
.. _FreeIPA documentation: https://www.freeipa.org/page/Documentation
Novajoin
--------
`Novajoin`_ is a vendor data service that extends nova's config drive
functionality and you use it when you want to deploy *TLS-everywhere*. When the
undercloud creates new nodes for the overcloud, novajoin creates a host entry
in FreeIPA to enable the overcloud node to enroll as a FreeIPA client.
If you want to use novajoin, you must have nova deployed in your undercloud.
Novajoin isn't supported for deployments :doc:`deployed_server`.
Novajoin was introduced in the Queens release and is supported through Train.
The `tripleo-ipa`_ project, described below, effectively replaced novajoin in
the Train release.
As of Victoria, novajoin is not longer supported. If you are updating
from Ussuri, tripleo will automatically migrate your deployment from novajoin
to tripleo-ipa. Tripleo will stop and remove the novajoin containers from
the undercloud. If in-flight validations are enabled, tripleo will run a
pre-upgrade validation to verify that the needed ACI and permissions have been
added to the FreeIPA server. See the previous section on "Installing FreeIPA"
for more details.
.. _Novajoin: https://opendev.org/x/novajoin
tripleo-ipa
-----------
`tripleo-ipa`_ is a collection of Ansible roles used to integrate FreeIPA into
TripleO deployments and you use it when you want to deploy *TLS-everywhere*.
These playbooks support deployments using nova and ironic in the undercloud as
well as :doc:`deployed_server`. This project was introduced in Train and
effectively replaces the novajoin metadata service.
We recommend using tripleo-ipa for all *TLS-everywhere* deployments as of the
Train release. As of Victoria, tripleo-ipa is the only supported method to
configure and deploy *TLS-everywhere*.
.. _tripleo-ipa: https://opendev.org/x/tripleo-ipa

View File

@ -1,46 +0,0 @@
Tolerate deployment failures
============================
When proceeding to large scale deployments, it happens very often to have
infrastructure problems such as network outages, wrong configurations applied
on hardware, hard drive issues, etc.
It is unpleasant to deploy hundred of nodes and only have a few of them which
failed. On most of large-scale use-cases, deployers would not care about
these nodes, as long as the cloud can already be used with the successfully
deployed servers.
For that purpose, it is possible in |project| to specify a percentage value,
per role, that will tell how much failures we tolerate.
Example: We deploy 50 compute nodes with the role "Compute". If I set the
following environment, my deployment will go until the end even if up to 5
nodes fail to deploy::
parameter_defaults:
ComputeMaxFailPercentage: 10
At the end of the deployment, a report will be printed and if nodes failed to
deploy, it'll be shown like this::
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~~~~~~ State Information ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~~~~ Number of nodes which did not deploy successfully: 3 ~~~~~~~~~~~~~~
This or these node(s) failed to deploy: compute3, compute24, compute29
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If one or multiple node(s) failed to deploy, the tripleoclient return code
won't be 0 and an error will be printed with a Python trace. Very often the
problem can be read from the Ansible logs by searching for the nodes which
didn't deploy successfully.
If you want to target all the compute nodes in our deployment and you have more
than one role to deploy computes, then you'll probably want to allocate one
value per role and distribute it based on your expectations and needs.
.. Warning::
For now, this only works for the execution of the deployment steps
from config-download playbooks. Minor updates, major upgrades, fast forward
upgrades and baremetal provisioning operations aren't supported yet, but
will certainly be in the future.

View File

@ -1,61 +0,0 @@
Deploying custom tuned profiles
===============================
TripleO can be used to deploy Overcloud nodes with different tuned
profiles in addition to custom tuned profiles.
Deploying with existing tuned profiles
--------------------------------------
Create an environment file, e.g. `~/tuned.yaml`, with the following
content:
.. code-block:: yaml
parameter_defaults:
TunedProfileName: throughput-performance
Deploy the Overcloud as usual using the :doc:`CLI
<../deployment/install_overcloud>` and pass the environment
file using the `-e` option:
.. code-block:: bash
openstack overcloud deploy --templates -e ~/tuned.yaml
In the above example, the `throughput-performance` tuned profile will
be applied to the overcloud nodes. The TunedProfileName parameter may
be set to any tuned profile already on the node.
Deploying with custom tuned profiles
------------------------------------
If the tuned profile you wish to apply is not already on the overcloud
node being deployed, then TripleO can create the tuned profile for
you and will set the name of the new profile to whatever
TunedProfileName parameter you supply.
The following example creates a custom tuned profile called
`my_profile` which inherits from the existing throughput-performance
tuned profile and then adds a few extra tunings:
.. code-block:: yaml
parameter_defaults:
TunedCustomProfile: |
[main]
summary=my profile
include=throughput-performance
[sysctl]
vm.dirty_ratio = 10
vm.dirty_background_ratio = 3
[sysfs]
/sys/kernel/mm/ksm/run=0
TunedProfileName: my_profile
The above will create the file `/etc/tuned/my_profile/tuned.conf`
on the overcloud nodes and tuned.conf will contain the tuned
directives defined by the TunedCustomProfile parameter. The
TunedCustomProfile parameter should be set to a multiline string using
YAML's literal block scalar (i.e. the pipe '|') and that string should
contain valid tuned directives in INI format.

View File

@ -1,152 +0,0 @@
(DEPRECATED) Installing a Undercloud Minion
===========================================
.. note::
The minion functionality is deprecated starting in Wallaby.
.. note::
This is optional functionality that is helpful for large scale related
deployments.
.. note::
The minion functionality is only available starting from the Train cycle.
The undercloud can be scaled horizontally by installing and configuring undercloud
minions. The minions can expand the number of heat-engine and ironic-conductors
available the overall undercloud installation. The undercloud minions can be
added and removed as necessary to scale processing during a deployment.
Installation Steps
------------------
.. note::
The minion requires an undercloud has been installed. The undercloud
installation process has two output files that we will need to install the
minion.
#. Log in to your machine (baremetal or VM) where you want to install the
minion as a non-root user (such as the stack user)::
ssh <non-root-user>@<minion-machine>
.. note::
If you don't have a non-root user created yet, log in as root and create
one with following commands::
sudo useradd stack
sudo passwd stack # specify a password
echo "stack ALL=(root) NOPASSWD:ALL" | sudo tee -a /etc/sudoers.d/stack
sudo chmod 0440 /etc/sudoers.d/stack
su - stack
.. note::
The minion is intended to work correctly with SELinux enforcing.
Installations with the permissive/disabled SELinux are not recommended.
The ``minion_enable_selinux`` config option controls that setting.
.. note::
vlan tagged interfaces must follow the if_name.vlan_id convention, like for
example: eth0.vlan100 or bond0.vlan120.
#. Enable needed repositories:
.. admonition:: RHEL
:class: rhel
Enable optional repo::
sudo yum install -y yum-utils
sudo yum-config-manager --enable rhelosp-rhel-7-server-opt
.. include:: ../repositories.rst
.. We need to manually continue our list numbering here since the above
"include" directive breaks the numbering.
3. Install the TripleO CLI, which will pull in all other necessary packages as dependencies::
sudo yum install -y python-tripleoclient
#. Copy the `tripleo-undercloud-outputs.yaml` and `tripleo-undercloud-passwords.yaml`
from the undercloud to the node being provisioned as a minion::
scp tripleo-undercloud-outputs.yaml tripleo-undercloud-passwords.yaml <non-root-user>@<minion-machine>:
#. (Optional) Copy Undercloud CA certificate if SSL is enabled.
On the undercloud::
scp /etc/pki/ca-trust/source/anchors/cm-local-ca.pem <non-root-user>@<minion-machine>:
On the minion::
sudo update-ca-trust enable
sudo cp cm-local-ca.pem /etc/pki/ca-trust/source/anchors/undercloud-ca.pem
sudo update-ca-trust extract
#. Prepare the configuration file::
cp /usr/share/python-tripleoclient/minion.conf.sample ~/minion.conf
Update the settings in this file to match the desired configuration. The
options in the minion.conf are similarly configured as the undercloud.conf
on the undercloud node. It is important to configure the `minion_local_ip`
and the `minion_local_interface` to match the available interfaces on the
minion system.
.. note::
The minion configured interface and ip must be on the control plane network.
#. Run the command to install the minion:
To deploy a minion::
openstack undercloud minion install
#. Verify services
- Heat Engine
By default only the heat-engine service is configured. To verify it has
been configured correctly, run the following on the undercloud::
source ~/stackrc
openstack orchestration service list
Example output::
(undercloud) [stack@undercloud ~]$ openstack orchestration service list
+------------------------+-------------+--------------------------------------+------------------------+--------+----------------------------+--------+
| Hostname | Binary | Engine ID | Host | Topic | Updated At | Status |
+------------------------+-------------+--------------------------------------+------------------------+--------+----------------------------+--------+
| undercloud.localdomain | heat-engine | b1af4e18-6859-4b73-b1cf-87674bd0ce1f | undercloud.localdomain | engine | 2019-07-25T23:19:34.000000 | up |
| minion.localdomain | heat-engine | 3a0d7080-06a9-4049-bb00-dbdcafbce0fc | minion.localdomain | engine | 2019-07-25T23:19:24.000000 | up |
| undercloud.localdomain | heat-engine | f6ccea46-2b30-4869-b06f-935c342a9ed6 | undercloud.localdomain | engine | 2019-07-25T23:19:34.000000 | up |
| minion.localdomain | heat-engine | eef759de-f7d3-472a-afbc-878eb6a3b9c0 | minion.localdomain | engine | 2019-07-25T23:19:24.000000 | up |
| minion.localdomain | heat-engine | 7f076afe-5116-45ad-9f08-aab7fbfda40b | minion.localdomain | engine | 2019-07-25T23:19:24.000000 | up |
| undercloud.localdomain | heat-engine | 038ead61-91f1-4739-8537-df63a9e2c917 | undercloud.localdomain | engine | 2019-07-25T23:19:34.000000 | up |
| undercloud.localdomain | heat-engine | f16a4f55-b053-4650-9202-781aef55698e | undercloud.localdomain | engine | 2019-07-25T23:19:36.000000 | up |
| minion.localdomain | heat-engine | e853d9c9-9f75-4958-ad9b-49e4b63b79b2 | minion.localdomain | engine | 2019-07-25T23:19:24.000000 | up |
+------------------------+-------------+--------------------------------------+------------------------+--------+----------------------------+--------+
- Ironic Conductor
If the ironic conductor service has been enabled, run the following on the
undercloud::
source ~/stackrc
baremetal conductor list
Example output::
(undercloud) [stack@undercloud ~]$ baremetal conductor list
+------------------------+-----------------+-------+
| Hostname | Conductor Group | Alive |
+------------------------+-----------------+-------+
| undercloud.localdomain | | True |
| minion.localdomain | | True |
+------------------------+-----------------+-------+

View File

@ -1,440 +0,0 @@
Deploying with vDPA Support
===============================
TripleO can deploy Overcloud nodes with vDPA support. A new role ``ComputeVdpa``
has been added to create a custom ``roles_data.yaml`` with composable vDPA role.
vDPA is very similar to SR-IOV and leverages the same Openstack components. It's
important to note that vDPA can't function without OVS Hardware Offload.
Mellanox is the only NIC vendor currently supported with vDPA.
CentOS9/RHEL9 with a kernel of 5.14 or higher is required.
Execute below command to create the ``roles_data.yaml``::
openstack overcloud roles generate -o roles_data.yaml Controller ComputeVdpa
Once a roles file is created, the following changes are required:
- Deploy Command
- Parameters
- Network Config
- Network and Port creation
Deploy Command
----------------
Deploy command should include the generated roles data file from the above
command.
Deploy command should also include the SR-IOV environment file to include the
``neutron-sriov-agent`` service. All the required parameters are also specified
in this environment file. The parameters has to be configured according to the
baremetal on which vDPA needs to be enabled.
Also, vDPA requires mandatory kernel parameters to be set, like
``intel_iommu=on iommu=pt`` on Intel machines. In order to enable the
configuration of kernel parameters to the host, The ``KernelArgs`` role
parameter has to be defined accordingly.
Adding the following arguments to the ``openstack overcloud deploy`` command
will do the trick::
openstack overcloud deploy --templates \
-r roles_data.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/services/neutron-sriov.yaml \
...
Parameters
----------
Unlike SR-IOV, vDPA devices shouldn't be added to ``NeutronPhysicalDevMappings`` but to the
``NovaPCIPassthrough``. The vDPA bridge should also be added to the ``NeutronBridgeMappings``
and the ``physical_network`` to the ``NeutronNetworkVLANRanges``.
The parameter ``KernelArgs`` should be provided in the deployment environment
file, with the set of kernel boot parameters to be applied on the
``ComputeVdpa`` role where vDPA is enabled.
The ``PciPassthroughFilter`` is required for vDPA. The ``NUMATopologyFilter`` will become
optional when ``libvirt`` will support the locking of the guest memory. At this time, it
is mandatory to have it::
parameter_defaults:
NeutronTunnelTypes: ''
NeutronNetworkType: 'vlan'
NeutronNetworkVLANRanges:
- tenant:1300:1399
NovaSchedulerDefaultFilters:
- PciPassthroughFilter
- NUMATopologyFilter
- ...
ComputeVdpaParameters:
NovaPCIPassthrough:
- vendor_id: "15b3"
product_id: "101e"
address: "06:00.0"
physical_network: "tenant"
- vendor_id: "15b3"
product_id: "101e"
address: "06:00.1"
physical_network: "tenant"
KernelArgs: "[...] iommu=pt intel_iommu=on"
NeutronBridgeMappings:
- tenant:br-tenant
.. note::
It's important to use the ``product_id`` of a VF device and not a PF
06:00.1 Ethernet controller [0200]: Mellanox Technologies MT2892 Family [ConnectX-6 Dx] [15b3:101d]
06:00.2 Ethernet controller [0200]: Mellanox Technologies ConnectX Family mlx5Gen Virtual Function [15b3:101e]
Network Config
--------------
vDPA supported network interfaces should be specified in the network config
templates as sriov_pf type. It should also be under an OVS bridge with a ``link_mode``
set to ``switchdev``
Example::
- type: ovs_bridge
name: br-tenant
members:
- type: sriov_pf
name: enp6s0f0
numvfs: 8
use_dhcp: false
vdpa: true
link_mode: switchdev
- type: sriov_pf
name: enp6s0f1
numvfs: 8
use_dhcp: false
vdpa: true
link_mode: switchdev
Network and Port Creation
-------------------------
When creating the network, it has to be mapped to the physical network::
$ openstack network create \
--provider-physical-network tenant \
--provider-network-type vlan \
--provider-segment 1337 \
vdpa_net1
$ openstack subnet create \
--network vdpa_net1 \
--subnet-range 192.0.2.0/24 \
--dhcp \
vdpa_subnet1
To allocate a port from a vdpa-enabled NIC, create a neutron port and set the
``--vnic-type`` to ``vdpa``::
$ openstack port create --network vdpa_net1 \
--vnic-type=vdpa \
vdpa_direct_port1
Scheduling instances
--------------------
Normally, the ``PciPassthroughFilter`` is sufficient to ensure that a vDPA instance will
land on a vDPA host. If we want to prevent other instances from using a vDPA host, we need
to setup the `isolate-aggregate feature
<https://docs.openstack.org/nova/latest/reference/isolate-aggregates.html>`_.
Example::
$ openstack --os-placement-api-version 1.6 trait create CUSTOM_VDPA
$ openstack aggregate create \
--zone vdpa-az1 \
vdpa_ag1
$ openstack hypervisor list -c ID -c "Hypervisor Hostname" -f value | grep vdpa | \
while read l
do UUID=$(echo $l | cut -f 1 -d " ")
H_NAME=$(echo $l | cut -f 2 -d " ")
echo $H_NAME $UUID
openstack aggregate add host vdpa_ag1 $H_NAME
traits=$(openstack --os-placement-api-version 1.6 resource provider trait list \
-f value $UUID | sed 's/^/--trait /')
openstack --os-placement-api-version 1.6 resource provider trait set \
$traits --trait CUSTOM_VDPA $UUID
done
$ openstack --os-compute-api-version 2.53 aggregate set \
--property trait:CUSTOM_VDPA=required \
vdpa_ag1
The flavor will map to that new aggregate with the ``trait:CUSTOM_VDPA`` property::
$ openstack --os-compute-api-version 2.86 flavor create \
--ram 4096 \
--disk 10 \
--vcpus 2 \
--property hw:cpu_policy=dedicated \
--property hw:cpu_realtime=True \
--property hw:cpu_realtime_mask=^0 \
--property trait:CUSTOM_VDPA=required \
vdpa_pinned
.. note::
It's also important to have the ``hw:cpu_realtime*`` properties here since
``libvirt`` doesn't currently support the locking of guest memory.
This should launch an instance on one of the vDPA hosts::
$ openstack server create \
--image cirros \
--flavor vdpa_pinned \
--nic port-id=vdpa_direct_port1 \
vdpa_test_1
Validations
-----------
Confirm that a PCI device is in switchdev mode::
[root@computevdpa-0 ~]# devlink dev eswitch show pci/0000:06:00.0
pci/0000:06:00.0: mode switchdev inline-mode none encap-mode basic
[root@computevdpa-0 ~]# devlink dev eswitch show pci/0000:06:00.1
pci/0000:06:00.1: mode switchdev inline-mode none encap-mode basic
Verify if offload is enabled in OVS::
[root@computevdpa-0 ~]# ovs-vsctl get Open_vSwitch . other_config:hw-offload
"true"
Validate the interfaces are added to the tenant bridge::
[root@computevdpa-0 ~]# ovs-vsctl show
be82eb5b-94c3-449d-98c8-0961b6b6b4c4
Manager "ptcp:6640:127.0.0.1"
is_connected: true
[...]
Bridge br-tenant
Controller "tcp:127.0.0.1:6633"
is_connected: true
fail_mode: secure
datapath_type: system
Port br-tenant
Interface br-tenant
type: internal
Port enp6s0f0
Interface enp6s0f0
Port phy-br-tenant
Interface phy-br-tenant
type: patch
options: {peer=int-br-tenant}
Port enp6s0f1
Interface enp6s0f1
[...]
Verify if the NICs have ``hw-tc-offload`` enabled::
[root@computevdpa-0 ~]# for i in {0..1};do ethtool -k enp6s0f$i | grep tc-offload;done
hw-tc-offload: on
hw-tc-offload: on
Verify that the udev rules have been created::
[root@computevdpa-0 ~]# cat /etc/udev/rules.d/80-persistent-os-net-config.rules
# This file is autogenerated by os-net-config
SUBSYSTEM=="net", ACTION=="add", ATTR{phys_switch_id}!="", ATTR{phys_port_name}=="pf*vf*", ENV{NM_UNMANAGED}="1"
SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", KERNELS=="0000:06:00.0", NAME="enp6s0f0"
SUBSYSTEM=="net", ACTION=="add", ATTR{phys_switch_id}=="80ecee0003723f04", ATTR{phys_port_name}=="pf0vf*", IMPORT{program}="/etc/udev/rep-link-name.sh $attr{phys_port_name}", NAME="enp6s0f0_$env{NUMBER}"
SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", KERNELS=="0000:06:00.1", NAME="enp6s0f1"
SUBSYSTEM=="net", ACTION=="add", ATTR{phys_switch_id}=="80ecee0003723f04", ATTR{phys_port_name}=="pf1vf*", IMPORT{program}="/etc/udev/rep-link-name.sh $attr{phys_port_name}", NAME="enp6s0f1_$env{NUMBER}"
Validate that the ``numvfs`` are correctly defined::
[root@computevdpa-0 ~]# cat /sys/class/net/enp6s0f0/device/sriov_numvfs
8
[root@computevdpa-0 ~]# cat /sys/class/net/enp6s0f1/device/sriov_numvfs
8
Validate that the ``pci/passthrough_whitelist`` contains all the PFs::
[root@computevdpa-0 ~]# grep ^passthrough_whitelist /var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.conf
passthrough_whitelist={"address":"06:00.0","physical_network":"tenant","product_id":"101d","vendor_id":"15b3"}
passthrough_whitelist={"address":"06:00.1","physical_network":"tenant","product_id":"101d","vendor_id":"15b3"}
Verify the ``nodedev-list`` from ``libvirt``::
[root@computevdpa-0 ~]# podman exec -u0 nova_virtqemud virsh -c qemu:///system nodedev-list | grep -P "pci_0000_06|enp6|vdpa"
net_enp6s0f0np0_04_3f_72_ee_ec_84
net_enp6s0f0np0_0_1a_c1_a5_25_94_ef
net_enp6s0f0np0_1_3a_dc_1d_36_85_af
net_enp6s0f0np0_2_6a_95_0c_e9_8f_1a
net_enp6s0f0np0_3_ba_c8_5b_f5_70_cc
net_enp6s0f0np0_4_9e_03_86_23_cd_65
net_enp6s0f0np0_5_0a_5c_8b_c4_00_7a
net_enp6s0f0np0_6_2e_f6_bc_e6_6f_cd
net_enp6s0f0np0_7_ce_1e_b2_20_5e_15
net_enp6s0f1np1_04_3f_72_ee_ec_85
net_enp6s0f1np1_0_a6_04_9e_5a_cd_3b
net_enp6s0f1np1_1_56_5d_59_b0_df_17
net_enp6s0f1np1_2_de_ac_7c_3f_19_b1
net_enp6s0f1np1_3_16_0c_8c_47_40_5c
net_enp6s0f1np1_4_0e_a6_15_f5_68_77
net_enp6s0f1np1_5_e2_73_dc_f9_c2_46
net_enp6s0f1np1_6_e6_13_57_c9_cf_0f
net_enp6s0f1np1_7_62_10_4f_2b_1b_ae
net_vdpa06p00vf2_42_11_c8_97_aa_43
net_vdpa06p00vf3_2a_59_5e_32_3e_b7
net_vdpa06p00vf4_9a_5c_3f_c9_cc_42
net_vdpa06p00vf5_26_73_2a_e3_db_f9
net_vdpa06p00vf6_9a_bf_a9_e9_6b_06
net_vdpa06p00vf7_d2_1f_cc_00_a9_95
net_vdpa06p01vf0_ba_81_cb_7e_01_1d
net_vdpa06p01vf1_56_95_fa_5e_4a_51
net_vdpa06p01vf2_72_53_64_8d_12_98
net_vdpa06p01vf3_9e_ff_1d_6d_c1_4e
net_vdpa06p01vf4_96_20_f3_b1_69_ef
net_vdpa06p01vf5_ea_0c_8b_0b_3f_ff
net_vdpa06p01vf6_0a_53_4e_94_e0_8b
net_vdpa06p01vf7_16_84_48_e6_74_59
net_vdpa06p02vf0_b2_cc_fa_16_f0_52
net_vdpa06p02vf1_0a_12_1b_a2_1a_d3
pci_0000_06_00_0
pci_0000_06_00_1
pci_0000_06_00_2
pci_0000_06_00_3
pci_0000_06_00_4
pci_0000_06_00_5
pci_0000_06_00_6
pci_0000_06_00_7
pci_0000_06_01_0
pci_0000_06_01_1
pci_0000_06_01_2
pci_0000_06_01_3
pci_0000_06_01_4
pci_0000_06_01_5
pci_0000_06_01_6
pci_0000_06_01_7
pci_0000_06_02_0
pci_0000_06_02_1
vdpa_0000_06_00_2
vdpa_0000_06_00_3
vdpa_0000_06_00_4
vdpa_0000_06_00_5
vdpa_0000_06_00_6
vdpa_0000_06_00_7
vdpa_0000_06_01_0
vdpa_0000_06_01_1
vdpa_0000_06_01_2
vdpa_0000_06_01_3
vdpa_0000_06_01_4
vdpa_0000_06_01_5
vdpa_0000_06_01_6
vdpa_0000_06_01_7
vdpa_0000_06_02_0
vdpa_0000_06_02_1
Validate that the vDPA devices have been created, this should match the vdpa
devices from ``virsh nodedev-list``::
[root@computevdpa-0 ~]# ls -tlra /dev/vhost-vdpa-*
crw-------. 1 root root 241, 0 Jun 30 12:52 /dev/vhost-vdpa-0
crw-------. 1 root root 241, 1 Jun 30 12:52 /dev/vhost-vdpa-1
crw-------. 1 root root 241, 2 Jun 30 12:52 /dev/vhost-vdpa-2
crw-------. 1 root root 241, 3 Jun 30 12:52 /dev/vhost-vdpa-3
crw-------. 1 root root 241, 4 Jun 30 12:52 /dev/vhost-vdpa-4
crw-------. 1 root root 241, 5 Jun 30 12:53 /dev/vhost-vdpa-5
crw-------. 1 root root 241, 6 Jun 30 12:53 /dev/vhost-vdpa-6
crw-------. 1 root root 241, 7 Jun 30 12:53 /dev/vhost-vdpa-7
crw-------. 1 root root 241, 8 Jun 30 12:53 /dev/vhost-vdpa-8
crw-------. 1 root root 241, 9 Jun 30 12:53 /dev/vhost-vdpa-9
crw-------. 1 root root 241, 10 Jun 30 12:53 /dev/vhost-vdpa-10
crw-------. 1 root root 241, 11 Jun 30 12:53 /dev/vhost-vdpa-11
crw-------. 1 root root 241, 12 Jun 30 12:53 /dev/vhost-vdpa-12
crw-------. 1 root root 241, 13 Jun 30 12:53 /dev/vhost-vdpa-13
crw-------. 1 root root 241, 14 Jun 30 12:53 /dev/vhost-vdpa-14
crw-------. 1 root root 241, 15 Jun 30 12:53 /dev/vhost-vdpa-15
Validate the ``pci_devices`` table in the database from one of the controllers::
[root@controller-2 neutron]# podman exec -u0 $(podman ps -q -f name=galera) mysql -t -D nova -e "select address,product_id,vendor_id,dev_type,dev_id from pci_devices where address like '0000:06:%' and deleted=0;"
+--------------+------------+-----------+----------+------------------+
| address | product_id | vendor_id | dev_type | dev_id |
+--------------+------------+-----------+----------+------------------+
| 0000:06:01.1 | 101e | 15b3 | vdpa | pci_0000_06_01_1 |
| 0000:06:00.2 | 101e | 15b3 | vdpa | pci_0000_06_00_2 |
| 0000:06:00.3 | 101e | 15b3 | vdpa | pci_0000_06_00_3 |
| 0000:06:00.4 | 101e | 15b3 | vdpa | pci_0000_06_00_4 |
| 0000:06:00.5 | 101e | 15b3 | vdpa | pci_0000_06_00_5 |
| 0000:06:00.6 | 101e | 15b3 | vdpa | pci_0000_06_00_6 |
| 0000:06:00.7 | 101e | 15b3 | vdpa | pci_0000_06_00_7 |
| 0000:06:01.0 | 101e | 15b3 | vdpa | pci_0000_06_01_0 |
| 0000:06:01.2 | 101e | 15b3 | vdpa | pci_0000_06_01_2 |
| 0000:06:01.3 | 101e | 15b3 | vdpa | pci_0000_06_01_3 |
| 0000:06:01.4 | 101e | 15b3 | vdpa | pci_0000_06_01_4 |
| 0000:06:01.5 | 101e | 15b3 | vdpa | pci_0000_06_01_5 |
| 0000:06:01.6 | 101e | 15b3 | vdpa | pci_0000_06_01_6 |
| 0000:06:01.7 | 101e | 15b3 | vdpa | pci_0000_06_01_7 |
| 0000:06:02.0 | 101e | 15b3 | vdpa | pci_0000_06_02_0 |
| 0000:06:02.1 | 101e | 15b3 | vdpa | pci_0000_06_02_1 |
| 0000:06:00.2 | 101e | 15b3 | vdpa | pci_0000_06_00_2 |
| 0000:06:00.3 | 101e | 15b3 | vdpa | pci_0000_06_00_3 |
| 0000:06:00.4 | 101e | 15b3 | vdpa | pci_0000_06_00_4 |
| 0000:06:00.5 | 101e | 15b3 | vdpa | pci_0000_06_00_5 |
| 0000:06:00.6 | 101e | 15b3 | vdpa | pci_0000_06_00_6 |
| 0000:06:00.7 | 101e | 15b3 | vdpa | pci_0000_06_00_7 |
| 0000:06:01.0 | 101e | 15b3 | vdpa | pci_0000_06_01_0 |
| 0000:06:01.1 | 101e | 15b3 | vdpa | pci_0000_06_01_1 |
| 0000:06:01.2 | 101e | 15b3 | vdpa | pci_0000_06_01_2 |
| 0000:06:01.3 | 101e | 15b3 | vdpa | pci_0000_06_01_3 |
| 0000:06:01.4 | 101e | 15b3 | vdpa | pci_0000_06_01_4 |
| 0000:06:01.5 | 101e | 15b3 | vdpa | pci_0000_06_01_5 |
| 0000:06:01.6 | 101e | 15b3 | vdpa | pci_0000_06_01_6 |
| 0000:06:01.7 | 101e | 15b3 | vdpa | pci_0000_06_01_7 |
| 0000:06:02.0 | 101e | 15b3 | vdpa | pci_0000_06_02_0 |
| 0000:06:02.1 | 101e | 15b3 | vdpa | pci_0000_06_02_1 |
+--------------+------------+-----------+----------+------------------+
The ``vdpa`` command::
[root@computevdpa-0 ~]# vdpa dev
0000:06:01.0: type network mgmtdev pci/0000:06:01.0 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:00.6: type network mgmtdev pci/0000:06:00.6 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:00.4: type network mgmtdev pci/0000:06:00.4 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:00.2: type network mgmtdev pci/0000:06:00.2 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:01.1: type network mgmtdev pci/0000:06:01.1 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:00.7: type network mgmtdev pci/0000:06:00.7 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:00.5: type network mgmtdev pci/0000:06:00.5 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:00.3: type network mgmtdev pci/0000:06:00.3 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:02.0: type network mgmtdev pci/0000:06:02.0 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:01.6: type network mgmtdev pci/0000:06:01.6 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:01.4: type network mgmtdev pci/0000:06:01.4 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:01.2: type network mgmtdev pci/0000:06:01.2 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:02.1: type network mgmtdev pci/0000:06:02.1 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:01.7: type network mgmtdev pci/0000:06:01.7 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:01.5: type network mgmtdev pci/0000:06:01.5 vendor_id 5555 max_vqs 16 max_vq_size 256
0000:06:01.3: type network mgmtdev pci/0000:06:01.3 vendor_id 5555 max_vqs 16 max_vq_size 256
Validating the OVN agents::
(overcloud) [stack@undercloud-0 ~]$ openstack network agent list --host computevdpa-0.home.arpa
+--------------------------------------+----------------------+-------------------------+-------------------+-------+-------+----------------------------+
| ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
+--------------------------------------+----------------------+-------------------------+-------------------+-------+-------+----------------------------+
| ef2e6ced-e723-449c-bbf8-7513709f33ea | OVN Controller agent | computevdpa-0.home.arpa | | :-) | UP | ovn-controller |
| 7be39049-db5b-54fc-add1-4a0687160542 | OVN Metadata agent | computevdpa-0.home.arpa | | :-) | UP | neutron-ovn-metadata-agent |
+--------------------------------------+----------------------+-------------------------+-------------------+-------+-------+----------------------------+
Other useful commands for troubleshooting::
[root@computevdpa-0 ~]# ovs-appctl dpctl/dump-flows -m type=offloaded
[root@computevdpa-0 ~]# ovs-appctl dpctl/dump-flows -m
[root@computevdpa-0 ~]# tc filter show dev enp6s0f1_1 ingress
[root@computevdpa-0 ~]# tc -s filter show dev enp6s0f1_1 ingress
[root@computevdpa-0 ~]# tc monitor

View File

@ -1,19 +0,0 @@
========================
TripleO Deployment Guide
========================
TripleO is a project aimed at installing, upgrading and operating OpenStack
clouds using OpenStack's own cloud facilities as the foundation - building on
Nova, Ironic, Neutron and Heat to automate cloud management at datacenter
scale.
.. toctree::
:maxdepth: 2
:includehidden:
environments/index
provisioning/index
features/index
deployment/index
post_deployment/index
troubleshooting/index

Some files were not shown because too many files have changed in this diff Show More