Base Code for Tugboat Plugin and Addition of config files, templates
This commit is contained in:
parent
acd81d2b3f
commit
4a8e2720e1
3
doc/requirements.txt
Normal file
3
doc/requirements.txt
Normal file
@ -0,0 +1,3 @@
|
||||
# Documentation
|
||||
sphinx>=1.6.2
|
||||
sphinx_rtd_theme==0.2.4
|
129
doc/source/conf.py
Normal file
129
doc/source/conf.py
Normal file
@ -0,0 +1,129 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# shipyard documentation build configuration file, created by
|
||||
# sphinx-quickstart on Sat Sep 16 03:40:50 2017.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.abspath('../../'))
|
||||
import sphinx_rtd_theme
|
||||
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.todo',
|
||||
'sphinx.ext.viewcode',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
# templates_path = []
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
# source_suffix = ['.rst', '.md']
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'tugboat'
|
||||
copyright = u'2018 AT&T Intellectual Property.'
|
||||
author = u'Tugboat Authors'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = u'0.1.0'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = u'0.1.0'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This patterns also effect to html_static_path and html_extra_path
|
||||
exclude_patterns = []
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#
|
||||
# html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = []
|
||||
|
||||
|
||||
# -- Options for HTMLHelp output ------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'ucpintdoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#
|
||||
# 'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#
|
||||
# 'figure_align': 'htbp',
|
||||
}
|
132
doc/source/getting_started.rst
Normal file
132
doc/source/getting_started.rst
Normal file
@ -0,0 +1,132 @@
|
||||
..
|
||||
Copyright 2018 AT&T Intellectual Property.
|
||||
All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
===============
|
||||
Getting Started
|
||||
===============
|
||||
|
||||
What is Spyglass?
|
||||
----------------
|
||||
|
||||
Spyglass is a data extraction tool which can interface with
|
||||
different input data sources to generate site manifest YAML files.
|
||||
The data sources will provide all the configuration data needed
|
||||
for a site deployment. These site manifest YAML files generated
|
||||
by spyglass will be saved in a Git repository, from where Pegleg
|
||||
can access and aggregate them. This aggregated file can then be
|
||||
fed to Shipyard for site deployment / updates.
|
||||
|
||||
Architecture
|
||||
------------
|
||||
|
||||
::
|
||||
|
||||
+-----------+ +-------------+
|
||||
| | | +-------+ |
|
||||
| | +------>| |Generic| |
|
||||
+-----------+ | | | |Object | |
|
||||
|Tugboat(Xl)| I | | | +-------+ |
|
||||
|Plugin | N | | | | |
|
||||
+-----------+ T | | | | |
|
||||
| E | | | +------+ |
|
||||
+------------+ R | | | |Parser| +------> Intermediary YAML
|
||||
|Remote Data | F |---+ | +------+ |
|
||||
|SourcePlugin| A | | | |
|
||||
+------------+ C | | |(Intermediary YAML)
|
||||
| E | | | |
|
||||
| | | | |
|
||||
| H | | v |
|
||||
| A | | +---------+|(templates) +------------+
|
||||
| N | | |Site |+<--------------|Repository |
|
||||
| D | | |Processor||-------------->|Adapter |
|
||||
| L | | +---------+|(Generated +------------+
|
||||
| E | | ^ | Site Manifests)
|
||||
| R | | +---|-----+|
|
||||
| | | | J2 ||
|
||||
| | | |Templates||
|
||||
| | | +---------+|
|
||||
+-----------+ +-------------+
|
||||
|
||||
--
|
||||
|
||||
Basic Usage
|
||||
-----------
|
||||
|
||||
Before using Spyglass you must:
|
||||
|
||||
|
||||
1. Clone the Spyglass repository:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
git clone https://github.com/att-comdev/tugboat/tree/spyglass
|
||||
|
||||
2. Install the required packages in spyglass:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
pip3 install -r tugboat/requirements.txt
|
||||
|
||||
|
||||
CLI Options
|
||||
-----------
|
||||
|
||||
Usage: spyglass [OPTIONS]
|
||||
|
||||
Options:
|
||||
-s, --site TEXT Specify the site for which manifests to be
|
||||
generated
|
||||
-t, --type TEXT Specify the plugin type formation or tugboat
|
||||
-f, --formation_url TEXT Specify the formation url
|
||||
-u, --formation_user TEXT Specify the formation user id
|
||||
-p, --formation_password TEXT Specify the formation user password
|
||||
-i, --intermediary PATH Intermediary file path generate manifests,
|
||||
use -m also with this option
|
||||
-d, --additional_config PATH Site specific configuraton details
|
||||
-g, --generate_intermediary Dump intermediary file from passed excel and
|
||||
excel spec
|
||||
-idir, --intermediary_dir PATH The path where intermediary file needs to be
|
||||
generated
|
||||
-e, --edit_intermediary / -nedit, --no_edit_intermediary
|
||||
Flag to let user edit intermediary
|
||||
-m, --generate_manifests Generate manifests from the generated
|
||||
intermediary file
|
||||
-mdir, --manifest_dir PATH The path where manifest files needs to be
|
||||
generated
|
||||
-x, --excel PATH Path to engineering excel file, to be passed
|
||||
with generate_intermediary
|
||||
-e, --excel_spec PATH Path to excel spec, to be passed with
|
||||
generate_intermediary
|
||||
-l, --loglevel INTEGER Loglevel NOTSET:0 ,DEBUG:10, INFO:20,
|
||||
WARNING:30, ERROR:40, CRITICAL:50 [default:
|
||||
20]
|
||||
--help Show this message and exit.
|
||||
|
||||
|
||||
1. Running Spyglass with Remote Data Source Plugin
|
||||
|
||||
spyglass -mg --type formation -f <URL> -u <user_id> -p <password> -d <site_config> -s <sitetype> --template_dir=<j2 template dir>
|
||||
|
||||
2. Running Spyglass with Excel Plugin
|
||||
|
||||
spyglass -mg --type tugboat -x <Excel File> -e <Excel Spec> -d <Site Config> -s <Region> --template_dir=<j2 template dir>
|
||||
|
||||
for example:
|
||||
spyglass -mg -t tugboat -x SiteDesignSpec_v0.1.xlsx -e excel_spec_upstream.yaml -d site_config.yaml -s airship-seaworthy --template_dir=<j2 template dir>
|
||||
Where sample 'excel_spec_upstream.yaml', 'SiteDesignSpec_v0.1.xlsx'
|
||||
'site_config.yaml' and J2 templates can be found under 'spyglass/examples'
|
||||
folder
|
||||
|
34
doc/source/index.rst
Normal file
34
doc/source/index.rst
Normal file
@ -0,0 +1,34 @@
|
||||
..
|
||||
Copyright 2018 AT&T Intellectual Property.
|
||||
All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
=====================
|
||||
Spyglass Documentation
|
||||
=====================
|
||||
|
||||
Overview
|
||||
--------
|
||||
Spyglass is a data extraction tool which can interface with
|
||||
different input data sources to generate site manifest YAML files.
|
||||
The data sources will provide all the configuration data needed
|
||||
for a site deployment. These site manifest YAML files generated
|
||||
by spyglass will be saved in a Git repository, from where Pegleg
|
||||
can access and aggregate them. This aggregated file can then be
|
||||
fed to Shipyard for site deployment / updates.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
getting_started
|
0
spyglass/__init__.py
Normal file
0
spyglass/__init__.py
Normal file
0
spyglass/config/__init__.py
Normal file
0
spyglass/config/__init__.py
Normal file
38
spyglass/config/rules.yaml
Normal file
38
spyglass/config/rules.yaml
Normal file
@ -0,0 +1,38 @@
|
||||
###########################
|
||||
# Global Rules #
|
||||
###########################
|
||||
#Rule1: ip_alloc_offset
|
||||
# Specifies the number of ip addresses to offset from
|
||||
# the start of subnet allocation pool while allocating it to host.
|
||||
# -for vlan it is set to 12 as default.
|
||||
# -for oob it is 10
|
||||
# -for all gateway ip addresss it is set to 1.
|
||||
# -for ingress vip it is 1
|
||||
# -for static end (non pxe) it is -1( means one but last ip of the pool)
|
||||
# -for dhcp end (pxe only) it is -2( 3rd from the last ip of the pool)
|
||||
#Rule2: host_profile_interfaces.
|
||||
# Specifies the network interfaces type and
|
||||
# and their names for a particular hw profile
|
||||
#Rule3: hardware_profile
|
||||
# This specifies the profile details bases on sitetype.
|
||||
# It specifies the profile name and host type for compute,
|
||||
# controller along with hw type
|
||||
---
|
||||
rule_ip_alloc_offset:
|
||||
name: ip_alloc_offset
|
||||
ip_alloc_offset:
|
||||
default: 12
|
||||
oob: 10
|
||||
gateway: 1
|
||||
ingress_vip: 1
|
||||
static_ip_end: -2
|
||||
dhcp_ip_end: -2
|
||||
rule_hardware_profile:
|
||||
name: hardware_profile
|
||||
hardware_profile:
|
||||
foundry:
|
||||
profile_name:
|
||||
compute: dp-r720
|
||||
ctrl: cp-r720
|
||||
hw_type: dell_r720
|
||||
...
|
@ -277,7 +277,6 @@ class BaseDataSourcePlugin(object):
|
||||
"""
|
||||
LOG.info("Extract baremetal information from plugin")
|
||||
baremetal = {}
|
||||
is_genesis = False
|
||||
hosts = self.get_hosts(self.region)
|
||||
|
||||
# For each host list fill host profile and network IPs
|
||||
@ -301,30 +300,19 @@ class BaseDataSourcePlugin(object):
|
||||
|
||||
# Fill network IP for this host
|
||||
temp_host['ip'] = {}
|
||||
temp_host['ip']['oob'] = temp_host_ips[host_name].get('oob', "")
|
||||
temp_host['ip']['oob'] = temp_host_ips[host_name].get(
|
||||
'oob', "#CHANGE_ME")
|
||||
temp_host['ip']['calico'] = temp_host_ips[host_name].get(
|
||||
'calico', "")
|
||||
temp_host['ip']['oam'] = temp_host_ips[host_name].get('oam', "")
|
||||
'calico', "#CHANGE_ME")
|
||||
temp_host['ip']['oam'] = temp_host_ips[host_name].get(
|
||||
'oam', "#CHANGE_ME")
|
||||
temp_host['ip']['storage'] = temp_host_ips[host_name].get(
|
||||
'storage', "")
|
||||
'storage', "#CHANGE_ME")
|
||||
temp_host['ip']['overlay'] = temp_host_ips[host_name].get(
|
||||
'overlay', "")
|
||||
'overlay', "#CHANGE_ME")
|
||||
temp_host['ip']['pxe'] = temp_host_ips[host_name].get(
|
||||
'pxe', "#CHANGE_ME")
|
||||
|
||||
# Filling rack_type( compute/controller/genesis)
|
||||
# "cp" host profile is controller
|
||||
# "ns" host profile is compute
|
||||
if (temp_host['host_profile'] == 'cp'):
|
||||
# The controller node is designates as genesis"
|
||||
if is_genesis is False:
|
||||
is_genesis = True
|
||||
temp_host['type'] = 'genesis'
|
||||
else:
|
||||
temp_host['type'] = 'controller'
|
||||
else:
|
||||
temp_host['type'] = 'compute'
|
||||
|
||||
baremetal[rack_name][host_name] = temp_host
|
||||
LOG.debug("Baremetal information:\n{}".format(
|
||||
pprint.pformat(baremetal)))
|
||||
@ -412,8 +400,9 @@ class BaseDataSourcePlugin(object):
|
||||
for net in networks:
|
||||
tmp_net = {}
|
||||
if net['name'] in networks_to_scan:
|
||||
tmp_net['subnet'] = net['subnet']
|
||||
tmp_net['vlan'] = net['vlan']
|
||||
tmp_net['subnet'] = net.get('subnet', '#CHANGE_ME')
|
||||
if ((net['name'] != 'ingress') and (net['name'] != 'oob')):
|
||||
tmp_net['vlan'] = net.get('vlan', '#CHANGE_ME')
|
||||
|
||||
network_data['vlan_network_data'][net['name']] = tmp_net
|
||||
|
||||
|
@ -433,8 +433,8 @@ class FormationPlugin(BaseDataSourcePlugin):
|
||||
name_pattern = "(?i)({})".format(name)
|
||||
if re.search(name_pattern, vlan_name):
|
||||
return network_names[name]
|
||||
|
||||
return ("")
|
||||
# Return empty string is vlan_name is not matched with network_names
|
||||
return ""
|
||||
|
||||
def get_dns_servers(self, region):
|
||||
try:
|
||||
|
35
spyglass/data_extractor/plugins/tugboat/check_exceptions.py
Normal file
35
spyglass/data_extractor/plugins/tugboat/check_exceptions.py
Normal file
@ -0,0 +1,35 @@
|
||||
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class BaseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class NotEnoughIp(BaseError):
|
||||
def __init__(self, cidr, total_nodes):
|
||||
self.cidr = cidr
|
||||
self.total_nodes = total_nodes
|
||||
|
||||
def display_error(self):
|
||||
print('{} can not handle {} nodes'.format(self.cidr, self.total_nodes))
|
||||
|
||||
|
||||
class NoSpecMatched(BaseError):
|
||||
def __init__(self, excel_specs):
|
||||
self.specs = excel_specs
|
||||
|
||||
def display_error(self):
|
||||
print('No spec matched. Following are the available specs:\n'.format(
|
||||
self.specs))
|
350
spyglass/data_extractor/plugins/tugboat/tugboat.py
Normal file
350
spyglass/data_extractor/plugins/tugboat/tugboat.py
Normal file
@ -0,0 +1,350 @@
|
||||
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the 'License');
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an 'AS IS' BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import pprint
|
||||
import re
|
||||
from spyglass.data_extractor.base import BaseDataSourcePlugin
|
||||
from spyglass.data_extractor.plugins.tugboat.excel_parser import ExcelParser
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TugboatPlugin(BaseDataSourcePlugin):
|
||||
def __init__(self, region):
|
||||
LOG.info("Tugboat Initializing")
|
||||
self.source_type = 'excel'
|
||||
self.source_name = 'tugboat'
|
||||
|
||||
# Configuration parameters
|
||||
self.excel_path = None
|
||||
self.excel_spec = None
|
||||
|
||||
# Site related data
|
||||
self.region = region
|
||||
|
||||
# Raw data from excel
|
||||
self.parsed_xl_data = None
|
||||
|
||||
LOG.info("Initiated data extractor plugin:{}".format(self.source_name))
|
||||
|
||||
def set_config_opts(self, conf):
|
||||
"""
|
||||
Placeholder to set confgiuration options
|
||||
specific to each plugin.
|
||||
|
||||
:param dict conf: Configuration options as dict
|
||||
|
||||
Example: conf = { 'excel_spec': 'spec1.yaml',
|
||||
'excel_path': 'excel.xls' }
|
||||
|
||||
Each plugin will have their own config opts.
|
||||
"""
|
||||
self.excel_path = conf['excel_path']
|
||||
self.excel_spec = conf['excel_spec']
|
||||
|
||||
# Extract raw data from excel sheets
|
||||
self._get_excel_obj()
|
||||
self._extract_raw_data_from_excel()
|
||||
return
|
||||
|
||||
def get_plugin_conf(self, kwargs):
|
||||
""" Validates the plugin param from CLI and return if correct
|
||||
|
||||
|
||||
Ideally the CLICK module shall report an error if excel file
|
||||
and excel specs are not specified. The below code has been
|
||||
written as an additional safeguard.
|
||||
"""
|
||||
try:
|
||||
assert (len(
|
||||
kwargs['excel'])), "Engineering Spec file not specified"
|
||||
excel_file_info = kwargs['excel']
|
||||
assert (kwargs['excel_spec']
|
||||
) is not None, "Excel Spec file not specified"
|
||||
excel_spec_info = kwargs['excel_spec']
|
||||
except AssertionError as e:
|
||||
LOG.error("{}:Spyglass exited!".format(e))
|
||||
exit()
|
||||
plugin_conf = {
|
||||
'excel_path': excel_file_info,
|
||||
'excel_spec': excel_spec_info
|
||||
}
|
||||
return plugin_conf
|
||||
|
||||
def get_hosts(self, region, rack=None):
|
||||
"""Return list of hosts in the region
|
||||
:param string region: Region name
|
||||
:param string rack: Rack name
|
||||
:returns: list of hosts information
|
||||
:rtype: list of dict
|
||||
Example: [
|
||||
{
|
||||
'name': 'host01',
|
||||
'type': 'controller',
|
||||
'host_profile': 'hp_01'
|
||||
},
|
||||
{
|
||||
'name': 'host02',
|
||||
'type': 'compute',
|
||||
'host_profile': 'hp_02'}
|
||||
]
|
||||
"""
|
||||
LOG.info("Get Host Information")
|
||||
ipmi_data = self.parsed_xl_data['ipmi_data'][0]
|
||||
rackwise_hosts = self._get_rackwise_hosts()
|
||||
host_list = []
|
||||
for rack in rackwise_hosts.keys():
|
||||
for host in rackwise_hosts[rack]:
|
||||
host_list.append({
|
||||
'rack_name':
|
||||
rack,
|
||||
'name':
|
||||
host,
|
||||
'host_profile':
|
||||
ipmi_data[host]['host_profile']
|
||||
})
|
||||
return host_list
|
||||
|
||||
def get_networks(self, region):
|
||||
""" Extracts vlan network info from raw network data from excel"""
|
||||
vlan_list = []
|
||||
# Network data extracted from xl is formatted to have a predictable
|
||||
# data type. For e.g VlAN 45 extracted from xl is formatted as 45
|
||||
vlan_pattern = r'\d+'
|
||||
private_net = self.parsed_xl_data['network_data']['private']
|
||||
public_net = self.parsed_xl_data['network_data']['public']
|
||||
# Extract network information from private and public network data
|
||||
for net_type, net_val in itertools.chain(private_net.items(),
|
||||
public_net.items()):
|
||||
tmp_vlan = {}
|
||||
# Ingress is special network that has no vlan, only a subnet string
|
||||
# So treatment for ingress is different
|
||||
if net_type is not 'ingress':
|
||||
# standardize the network name as net_type may ne different.
|
||||
# For e.g insteas of pxe it may be PXE or instead of calico
|
||||
# it may be ksn. Valid network names are pxe, calico, oob, oam,
|
||||
# overlay, storage, ingress
|
||||
tmp_vlan['name'] = self._get_network_name_from_vlan_name(
|
||||
net_type)
|
||||
|
||||
# extract vlan tag. It was extracted from xl file as 'VlAN 45'
|
||||
# The code below extracts the numeric data fron net_val['vlan']
|
||||
if net_val.get('vlan', "") is not "":
|
||||
value = re.findall(vlan_pattern, net_val['vlan'])
|
||||
tmp_vlan['vlan'] = value[0]
|
||||
else:
|
||||
tmp_vlan['vlan'] = "#CHANGE_ME"
|
||||
|
||||
tmp_vlan['subnet'] = net_val.get('subnet', "#CHANGE_ME")
|
||||
tmp_vlan['gateway'] = net_val.get('gateway', "#CHANGE_ME")
|
||||
else:
|
||||
tmp_vlan['name'] = 'ingress'
|
||||
tmp_vlan['subnet'] = net_val
|
||||
vlan_list.append(tmp_vlan)
|
||||
LOG.debug("vlan list extracted from tugboat:\n{}".format(
|
||||
pprint.pformat(vlan_list)))
|
||||
return vlan_list
|
||||
|
||||
def get_ips(self, region, host=None):
|
||||
"""Return list of IPs on the host
|
||||
:param string region: Region name
|
||||
:param string host: Host name
|
||||
:returns: Dict of IPs per network on the host
|
||||
:rtype: dict
|
||||
Example: {'oob': {'ipv4': '192.168.1.10'},
|
||||
'pxe': {'ipv4': '192.168.2.10'}}
|
||||
The network name from get_networks is expected to be the keys of this
|
||||
dict. In case some networks are missed, they are expected to be either
|
||||
DHCP or internally generated n the next steps by the design rules.
|
||||
"""
|
||||
|
||||
ip_ = {}
|
||||
ipmi_data = self.parsed_xl_data['ipmi_data'][0]
|
||||
ip_[host] = {
|
||||
'oob': ipmi_data[host].get('ipmi_address', '#CHANGE_ME'),
|
||||
'oam': ipmi_data[host].get('oam', '#CHANGE_ME'),
|
||||
'calico': ipmi_data[host].get('calico', '#CHANGE_ME'),
|
||||
'overlay': ipmi_data[host].get('overlay', '#CHANGE_ME'),
|
||||
'pxe': ipmi_data[host].get('pxe', '#CHANGE_ME'),
|
||||
'storage': ipmi_data[host].get('storage', '#CHANGE_ME')
|
||||
}
|
||||
return ip_
|
||||
|
||||
def get_ldap_information(self, region):
|
||||
""" Extract ldap information from excel"""
|
||||
|
||||
ldap_raw_data = self.parsed_xl_data['site_info']['ldap']
|
||||
ldap_info = {}
|
||||
# raw url is 'url: ldap://example.com' so we are converting to
|
||||
# 'ldap://example.com'
|
||||
url = ldap_raw_data.get('url', '#CHANGE_ME')
|
||||
try:
|
||||
ldap_info['url'] = url.split(' ')[1]
|
||||
ldap_info['domain'] = url.split('.')[1]
|
||||
except IndexError as e:
|
||||
LOG.error("url.split:{}".format(e))
|
||||
ldap_info['common_name'] = ldap_raw_data.get('common_name',
|
||||
'#CHANGE_ME')
|
||||
ldap_info['subdomain'] = ldap_raw_data.get('subdomain', '#CHANGE_ME')
|
||||
|
||||
return ldap_info
|
||||
|
||||
def get_ntp_servers(self, region):
|
||||
""" Returns a comma separated list of ntp ip addresses"""
|
||||
|
||||
ntp_server_list = self._get_formatted_server_list(
|
||||
self.parsed_xl_data['site_info']['ntp'])
|
||||
return ntp_server_list
|
||||
|
||||
def get_dns_servers(self, region):
|
||||
""" Returns a comma separated list of dns ip addresses"""
|
||||
dns_server_list = self._get_formatted_server_list(
|
||||
self.parsed_xl_data['site_info']['dns'])
|
||||
return dns_server_list
|
||||
|
||||
def get_domain_name(self, region):
|
||||
""" Returns domain name extracted from excel file"""
|
||||
|
||||
return self.parsed_xl_data['site_info']['domain']
|
||||
|
||||
def get_location_information(self, region):
|
||||
"""
|
||||
Prepare location data from information extracted
|
||||
by ExcelParser(i.e raw data)
|
||||
"""
|
||||
location_data = self.parsed_xl_data['site_info']['location']
|
||||
|
||||
corridor_pattern = r'\d+'
|
||||
corridor_number = re.findall(corridor_pattern,
|
||||
location_data['corridor'])[0]
|
||||
name = location_data.get('name', '#CHANGE_ME')
|
||||
state = location_data.get('state', '#CHANGE_ME')
|
||||
country = location_data.get('country', '#CHANGE_ME')
|
||||
physical_location_id = location_data.get('physical_location', '')
|
||||
|
||||
return {
|
||||
'name': name,
|
||||
'physical_location_id': physical_location_id,
|
||||
'state': state,
|
||||
'country': country,
|
||||
'corridor': 'c{}'.format(corridor_number),
|
||||
}
|
||||
|
||||
def get_racks(self, region):
|
||||
# This function is not required since the excel plugin
|
||||
# already provide rack information.
|
||||
pass
|
||||
|
||||
def _get_excel_obj(self):
|
||||
""" Creation of an ExcelParser object to store site information.
|
||||
|
||||
The information is obtained based on a excel spec yaml file.
|
||||
This spec contains row, column and sheet information of
|
||||
the excel file from where site specific data can be extracted.
|
||||
"""
|
||||
self.excel_obj = ExcelParser(self.excel_path, self.excel_spec)
|
||||
|
||||
def _extract_raw_data_from_excel(self):
|
||||
""" Extracts raw information from excel file based on excel spec"""
|
||||
self.parsed_xl_data = self.excel_obj.get_data()
|
||||
|
||||
def _get_network_name_from_vlan_name(self, vlan_name):
|
||||
""" network names are ksn, oam, oob, overlay, storage, pxe
|
||||
|
||||
|
||||
This is a utility function to determine the vlan acceptable
|
||||
vlan from the name extracted from excel file
|
||||
|
||||
The following mapping rules apply:
|
||||
vlan_name contains "ksn or calico" the network name is "calico"
|
||||
vlan_name contains "storage" the network name is "storage"
|
||||
vlan_name contains "server" the network name is "oam"
|
||||
vlan_name contains "ovs" the network name is "overlay"
|
||||
vlan_name contains "oob" the network name is "oob"
|
||||
vlan_name contains "pxe" the network name is "pxe"
|
||||
"""
|
||||
network_names = [
|
||||
'ksn|calico', 'storage', 'oam|server', 'ovs|overlay', 'oob', 'pxe'
|
||||
]
|
||||
for name in network_names:
|
||||
# Make a pattern that would ignore case.
|
||||
# if name is 'ksn' pattern name is '(?i)(ksn)'
|
||||
name_pattern = "(?i)({})".format(name)
|
||||
if re.search(name_pattern, vlan_name):
|
||||
if name is 'ksn|calico':
|
||||
return 'calico'
|
||||
if name is 'storage':
|
||||
return 'storage'
|
||||
if name is 'oam|server':
|
||||
return 'oam'
|
||||
if name is 'ovs|overlay':
|
||||
return 'overlay'
|
||||
if name is 'oob':
|
||||
return 'oob'
|
||||
if name is 'pxe':
|
||||
return 'pxe'
|
||||
# if nothing matches
|
||||
LOG.error(
|
||||
"Unable to recognize VLAN name extracted from Plugin data source")
|
||||
return ("")
|
||||
|
||||
def _get_formatted_server_list(self, server_list):
|
||||
""" Format dns and ntp server list as comma separated string """
|
||||
|
||||
# dns/ntp server info from excel is of the format
|
||||
# 'xxx.xxx.xxx.xxx, (aaa.bbb.ccc.com)'
|
||||
# The function returns a list of comma separated dns ip addresses
|
||||
servers = []
|
||||
for data in server_list:
|
||||
if '(' not in data:
|
||||
servers.append(data)
|
||||
formatted_server_list = ','.join(servers)
|
||||
return formatted_server_list
|
||||
|
||||
def _get_rack(self, host):
|
||||
"""
|
||||
Get rack id from the rack string extracted
|
||||
from xl
|
||||
"""
|
||||
rack_pattern = r'\w.*(r\d+)\w.*'
|
||||
rack = re.findall(rack_pattern, host)[0]
|
||||
if not self.region:
|
||||
self.region = host.split(rack)[0]
|
||||
return rack
|
||||
|
||||
def _get_rackwise_hosts(self):
|
||||
""" Mapping hosts with rack ids """
|
||||
rackwise_hosts = {}
|
||||
hostnames = self.parsed_xl_data['ipmi_data'][1]
|
||||
racks = self._get_rack_data()
|
||||
for rack in racks:
|
||||
if rack not in rackwise_hosts:
|
||||
rackwise_hosts[racks[rack]] = []
|
||||
for host in hostnames:
|
||||
if rack in host:
|
||||
rackwise_hosts[racks[rack]].append(host)
|
||||
LOG.debug("rackwise hosts:\n%s", pprint.pformat(rackwise_hosts))
|
||||
return rackwise_hosts
|
||||
|
||||
def _get_rack_data(self):
|
||||
""" Format rack name """
|
||||
LOG.info("Getting rack data")
|
||||
racks = {}
|
||||
hostnames = self.parsed_xl_data['ipmi_data'][1]
|
||||
for host in hostnames:
|
||||
rack = self._get_rack(host)
|
||||
racks[rack] = rack.replace('r', 'rack')
|
||||
return racks
|
BIN
spyglass/examples/SiteDesignSpec_v0.1.xlsx
Normal file
BIN
spyglass/examples/SiteDesignSpec_v0.1.xlsx
Normal file
Binary file not shown.
63
spyglass/examples/excel_spec.yaml
Normal file
63
spyglass/examples/excel_spec.yaml
Normal file
@ -0,0 +1,63 @@
|
||||
# Copyright 2018 The Openstack-Helm Authors.
|
||||
# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Important: Please modify the dictionary with appropriate
|
||||
# design spec file.
|
||||
---
|
||||
specs:
|
||||
# Design Spec file name: SiteDesignSpec_v0.1.xlsx
|
||||
xl_spec:
|
||||
ipmi_sheet_name: 'Site-Information'
|
||||
start_row: 4
|
||||
end_row: 15
|
||||
hostname_col: 2
|
||||
ipmi_address_col: 3
|
||||
host_profile_col: 5
|
||||
ipmi_gateway_col: 4
|
||||
private_ip_sheet: 'Site-Information'
|
||||
net_type_col: 1
|
||||
vlan_col: 2
|
||||
vlan_start_row: 19
|
||||
vlan_end_row: 30
|
||||
net_start_row: 33
|
||||
net_end_row: 40
|
||||
net_col: 2
|
||||
net_vlan_col: 1
|
||||
public_ip_sheet: 'Site-Information'
|
||||
oam_vlan_col: 1
|
||||
oam_ip_row: 43
|
||||
oam_ip_col: 2
|
||||
oob_net_row: 48
|
||||
oob_net_start_col: 2
|
||||
oob_net_end_col: 5
|
||||
ingress_ip_row: 45
|
||||
dns_ntp_ldap_sheet: 'Site-Information'
|
||||
login_domain_row: 52
|
||||
ldap_col: 2
|
||||
global_group: 53
|
||||
ldap_search_url_row: 54
|
||||
ntp_row: 55
|
||||
ntp_col: 2
|
||||
dns_row: 56
|
||||
dns_col: 2
|
||||
domain_row: 51
|
||||
domain_col: 2
|
||||
location_sheet: 'Site-Information'
|
||||
column: 2
|
||||
corridor_row: 59
|
||||
site_name_row: 58
|
||||
state_name_row: 60
|
||||
country_name_row: 61
|
||||
clli_name_row: 62
|
33
spyglass/examples/site_config.yaml
Normal file
33
spyglass/examples/site_config.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
##################################
|
||||
# Site Specific Tugboat Settings #
|
||||
##################################
|
||||
---
|
||||
site_info:
|
||||
ldap:
|
||||
common_name: test
|
||||
url: ldap://ldap.example.com
|
||||
subdomain: test
|
||||
ntp:
|
||||
servers: 10.10.10.10,20.20.20.20,30.30.30.30
|
||||
sitetype: foundry
|
||||
domain: atlantafoundry.com
|
||||
dns:
|
||||
servers: 8.8.8.8,8.8.4.4,208.67.222.222
|
||||
network:
|
||||
vlan_network_data:
|
||||
ingress:
|
||||
subnet:
|
||||
- 132.68.226.72/29
|
||||
bgp :
|
||||
peers:
|
||||
- '172.29.0.2'
|
||||
- '172.29.0.3'
|
||||
asnumber: 64671
|
||||
peer_asnumber: 64688
|
||||
storage:
|
||||
ceph:
|
||||
controller:
|
||||
osd_count: 6
|
||||
...
|
||||
|
||||
|
@ -0,0 +1,26 @@
|
||||
---
|
||||
schema: 'drydock/BootAction/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: promjoin
|
||||
storagePolicy: 'cleartext'
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
labels:
|
||||
application: 'drydock'
|
||||
data:
|
||||
signaling: false
|
||||
assets:
|
||||
- path: /opt/promjoin.sh
|
||||
type: file
|
||||
permissions: '555'
|
||||
{% raw %}
|
||||
location: promenade+http://promenade-api.ucp.svc.cluster.local/api/v1.0/join-scripts?design_ref={{ action.design_ref | urlencode }}&hostname={{ node.hostname }}&ip={{ node.network.calico.ip }}{% endif %}{% for k, v in node.labels.items() %}&labels.dynamic={{ k }}={{ v }}{% endfor %}
|
||||
|
||||
{% endraw %}
|
||||
location_pipeline:
|
||||
- template
|
||||
data_pipeline:
|
||||
- utf8_decode
|
||||
...
|
51
spyglass/examples/templates/baremetal/nodes.yaml.j2
Normal file
51
spyglass/examples/templates/baremetal/nodes.yaml.j2
Normal file
@ -0,0 +1,51 @@
|
||||
{% set control_count = [1] %}
|
||||
{% for rack in data['baremetal'].keys() %}
|
||||
{% for host in data['baremetal'][rack].keys()%}
|
||||
{% if data['baremetal'][rack][host]['type'] != 'genesis' %}
|
||||
---
|
||||
schema: 'drydock/BaremetalNode/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: {{ host }}
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
oob:
|
||||
account: 'root'
|
||||
{% if data['baremetal'][rack][host]['host_profile'] == 'cp' %}
|
||||
{% if control_count.append(control_count.pop()+1) %} {% endif %}
|
||||
{% if control_count[0] < 4 %}
|
||||
host_profile: nc-{{data['baremetal'][rack][host]['host_profile']}}-primary
|
||||
{% else %}
|
||||
host_profile: nc-{{data['baremetal'][rack][host]['host_profile']}}-secondary
|
||||
{% endif %}
|
||||
{% else %}
|
||||
host_profile: nc-{{data['baremetal'][rack][host]['host_profile']}}
|
||||
{% endif %}
|
||||
addressing:
|
||||
- network: oob
|
||||
address: {{ data['baremetal'][rack][host]['ip']['oob'] }}
|
||||
- network: oam
|
||||
address: {{ data['baremetal'][rack][host]['ip']['oam'] }}
|
||||
- network: pxe
|
||||
address: {{ data['baremetal'][rack][host]['ip']['pxe'] }}
|
||||
- network: storage
|
||||
address: {{ data['baremetal'][rack][host]['ip']['storage'] }}
|
||||
- network: calico
|
||||
address: {{ data['baremetal'][rack][host]['ip']['calico'] }}
|
||||
- network: overlay
|
||||
address: {{ data['baremetal'][rack][host]['ip']['overlay'] }}
|
||||
metadata:
|
||||
rack: RACK{{rack[-2:] }}
|
||||
tags:
|
||||
{% if data['baremetal'][rack][host]['type'] == 'compute' %}
|
||||
- 'workers'
|
||||
{% else %}
|
||||
- 'masters'
|
||||
{% endif %}
|
||||
...
|
||||
{% endif %}
|
||||
{%endfor%}
|
||||
{%endfor%}
|
@ -0,0 +1,90 @@
|
||||
---
|
||||
# The purpose of this file is to provide Shipyard a strategy to aid in the site's
|
||||
# deployment. This WILL require modification for each particular site. A successful
|
||||
# strategy for large labs that has been used in the past has been to split the Control
|
||||
# Plane hosts up from the computes, as well as the computes by rack. The below strategy
|
||||
# differs slightly, as the size of the lab is smaller. As such, the Control Plane hosts
|
||||
# deploy first, followed by half of the computes, followed by the second half of the
|
||||
# computes. Shipyard deployment strategies can be very useful in getting around certain
|
||||
# failures, like misbehaving nodes that may hold up the deployment. See more at:
|
||||
# https://github.com/openstack/airship-shipyard/blob/master/doc/source/site-definition-documents.rst#deployment-strategy
|
||||
schema: shipyard/DeploymentStrategy/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
replacement: true
|
||||
name: deployment-strategy
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
name: deployment-strategy-global
|
||||
actions:
|
||||
- method: replace
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
replacement: true
|
||||
data:
|
||||
groups:
|
||||
- name: masters
|
||||
critical: true
|
||||
depends_on: []
|
||||
selectors:
|
||||
- node_names: []
|
||||
node_labels: []
|
||||
node_tags:
|
||||
- masters
|
||||
rack_names: []
|
||||
success_criteria:
|
||||
percent_successful_nodes: 100
|
||||
# NEWSITE-CHANGEME: The number of "worker groups" should equal the number of site racks
|
||||
- name: worker_group_0
|
||||
critical: false
|
||||
depends_on:
|
||||
- masters
|
||||
selectors:
|
||||
# NEWSITE-CHANGEME: The following should be a list of the computes in the site's first rack
|
||||
- node_names:
|
||||
{% for rack in data['baremetal'].keys() %}
|
||||
{% for host in data['baremetal'][rack].keys()%}
|
||||
{% if rack == 'rack03' or rack == 'rack04' %}
|
||||
- {{ host }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
node_labels: []
|
||||
node_tags: []
|
||||
rack_names: []
|
||||
- name: worker_group_1
|
||||
critical: false
|
||||
depends_on:
|
||||
- masters
|
||||
selectors:
|
||||
# NEWSITE-CHANGEME: The following should be a list of the computes in the site's second rack
|
||||
- node_names:
|
||||
{% for rack in data['baremetal'].keys() %}
|
||||
{% for host in data['baremetal'][rack].keys()%}
|
||||
{% if rack == 'rack05' or rack == 'rack06' %}
|
||||
- {{ host }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
node_labels: []
|
||||
node_tags: []
|
||||
rack_names: []
|
||||
- name: workers
|
||||
critical: true
|
||||
# NEWSITE-CHANGEME: Populate with each worker group (should equal the number of site racks).
|
||||
# This group ensures a percent of success is achieved with the compute deployments.
|
||||
depends_on:
|
||||
- worker_group_0
|
||||
- worker_group_1
|
||||
selectors:
|
||||
- node_names: []
|
||||
node_labels: []
|
||||
node_tags:
|
||||
- workers
|
||||
rack_names: []
|
||||
success_criteria:
|
||||
percent_successful_nodes: 60
|
||||
...
|
||||
|
107
spyglass/examples/templates/networks/common_addresses.yaml.j2
Normal file
107
spyglass/examples/templates/networks/common_addresses.yaml.j2
Normal file
@ -0,0 +1,107 @@
|
||||
---
|
||||
schema: pegleg/CommonAddresses/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: common-addresses
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
calico:
|
||||
ip_autodetection_method: interface=bond1.{{ data['network']['vlan_network_data']['calico']['vlan']}}
|
||||
etcd:
|
||||
service_ip: 10.96.232.136
|
||||
ip_rule:
|
||||
gateway: {{ data['network']['vlan_network_data']['calico']['gateway']}}
|
||||
overlap_cidr: 10.96.0.0/15
|
||||
bgp:
|
||||
ipv4:
|
||||
public_service_cidr: {{ data['network']['vlan_network_data']['ingress']['subnet'][0] }}
|
||||
ingress_vip: {{ data['network']['bgp']['ingress_vip'] }}
|
||||
peers:
|
||||
{% for peer in data['network']['bgp']['peers'] %}
|
||||
- {{ peer }}
|
||||
{% endfor %}
|
||||
dns:
|
||||
cluster_domain: cluster.local
|
||||
service_ip: 10.96.0.10
|
||||
upstream_servers:
|
||||
{% for server in (data['site_info']['dns']['servers']).split(',') %}
|
||||
- {{ server }}
|
||||
{% endfor %}
|
||||
upstream_servers_joined: {{ data['site_info']['dns']['servers']}}
|
||||
ingress_domain: {{ data['site_info']['domain']|lower }}
|
||||
|
||||
genesis:
|
||||
hostname: {{ (data|get_role_wise_nodes)['genesis']['name'] }}
|
||||
{% for rack in data['baremetal'] %}
|
||||
{% for host in data['baremetal'][rack] %}
|
||||
{% if data['baremetal'][rack][host]['type'] == 'genesis' %}
|
||||
ip: {{ data['baremetal'][rack][host]['ip']['calico'] }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
bootstrap:
|
||||
ip: {{ (data|get_role_wise_nodes)['genesis']['pxe'] }}
|
||||
|
||||
kubernetes:
|
||||
api_service_ip: 10.96.0.1
|
||||
etcd_service_ip: 10.96.0.2
|
||||
pod_cidr: 10.97.0.0/16
|
||||
service_cidr: 10.96.0.0/16
|
||||
# misc k8s port settings
|
||||
apiserver_port: 6443
|
||||
haproxy_port: 6553
|
||||
service_node_port_range: 30000-32767
|
||||
|
||||
# etcd port settings
|
||||
etcd:
|
||||
container_port: 2379
|
||||
haproxy_port: 2378
|
||||
|
||||
masters:
|
||||
{% for host in (data|get_role_wise_nodes)['masters'] %}
|
||||
- hostname: {{ host }}
|
||||
{% endfor %}
|
||||
# NEWSITE-CHANGEME: Environment proxy information.
|
||||
# NOTE: Reference Airship sites do not deploy behind a proxy, so this proxy section
|
||||
# should be commented out.
|
||||
# However if you are in a lab that requires proxy, ensure that these proxy
|
||||
# settings are correct and reachable in your environment; otherwise update
|
||||
# them with the correct values for your environment.
|
||||
proxy:
|
||||
http: ""
|
||||
https: ""
|
||||
no_proxy: []
|
||||
|
||||
node_ports:
|
||||
drydock_api: 30000
|
||||
maas_api: 30001
|
||||
maas_proxy: 31800 # hardcoded in MAAS
|
||||
shipyard_api: 30003
|
||||
airflow_web: 30004
|
||||
ntp:
|
||||
servers_joined: {{ data['site_info']['ntp']['servers'] }}
|
||||
|
||||
ldap:
|
||||
base_url: {{ (data['site_info']['ldap']['url']|string).split('//')[1] }}
|
||||
url: {{ data['site_info']['ldap']['url'] }}
|
||||
auth_path: DC=test,DC=test,DC=com?sAMAccountName?sub?memberof=CN={{ data['site_info']['ldap']['common_name'] }},OU=Application,OU=Groups,DC=test,DC=test,DC=com
|
||||
common_name: {{ data['site_info']['ldap']['common_name'] }}
|
||||
subdomain: {{ data['site_info']['ldap']['subdomain'] }}
|
||||
domain: {{ (data['site_info']['ldap']['url']|string).split('.')[1] }}
|
||||
|
||||
storage:
|
||||
ceph:
|
||||
public_cidr: {{ data['network']['vlan_network_data']['storage']['subnet'] }}
|
||||
cluster_cidr: {{ data['network']['vlan_network_data']['storage']['subnet'] }}
|
||||
|
||||
neutron:
|
||||
tunnel_device: 'bond1.{{ data['network']['vlan_network_data']['overlay']['vlan'] }}'
|
||||
external_iface: 'bond1'
|
||||
|
||||
openvswitch:
|
||||
external_iface: 'bond1'
|
||||
...
|
||||
|
251
spyglass/examples/templates/networks/physical/networks.yaml.j2
Normal file
251
spyglass/examples/templates/networks/physical/networks.yaml.j2
Normal file
@ -0,0 +1,251 @@
|
||||
---
|
||||
schema: 'drydock/NetworkLink/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: oob
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
# MaaS doesnt own this network like it does the others, so the noconfig label
|
||||
# is specified.
|
||||
labels:
|
||||
noconfig: enabled
|
||||
bonding:
|
||||
mode: disabled
|
||||
mtu: 1500
|
||||
linkspeed: auto
|
||||
trunking:
|
||||
mode: disabled
|
||||
default_network: oob
|
||||
allowed_networks:
|
||||
- oob
|
||||
...
|
||||
---
|
||||
schema: 'drydock/Network/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: oob
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
cidr: {{ data['network']['vlan_network_data']['oob']['subnet'] }}
|
||||
routes:
|
||||
- subnet: '0.0.0.0/0'
|
||||
gateway: {{ data['network']['vlan_network_data']['oob']['gateway'] }}
|
||||
metric: 100
|
||||
ranges:
|
||||
- type: static
|
||||
start: {{ data['network']['vlan_network_data']['oob']['static_start'] }}
|
||||
end: {{ data['network']['vlan_network_data']['oob']['static_end'] }}
|
||||
...
|
||||
|
||||
---
|
||||
schema: 'drydock/NetworkLink/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: pxe
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
bonding:
|
||||
mode: disabled
|
||||
mtu: 1500
|
||||
linkspeed: auto
|
||||
trunking:
|
||||
mode: disabled
|
||||
default_network: pxe
|
||||
allowed_networks:
|
||||
- pxe
|
||||
...
|
||||
|
||||
---
|
||||
schema: 'drydock/Network/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: pxe
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
network_role: pxe
|
||||
topology: cruiser
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
cidr: {{ data['network']['vlan_network_data']['pxe']['subnet'] }}
|
||||
routes:
|
||||
{% for other_subnet in data['network']['vlan_network_data']['pxe']['routes'] %}
|
||||
- subnet: {{ other_subnet }}
|
||||
gateway: {{ data['network']['vlan_network_data']['pxe']['gateway'] }}
|
||||
metric: 100
|
||||
{% endfor %}
|
||||
ranges:
|
||||
- type: reserved
|
||||
start: {{ data['network']['vlan_network_data']['pxe']['reserved_start'] }}
|
||||
end: {{ data['network']['vlan_network_data']['pxe']['reserved_end'] }}
|
||||
- type: static
|
||||
start: {{ data['network']['vlan_network_data']['pxe']['static_start'] }}
|
||||
end: {{ data['network']['vlan_network_data']['pxe']['static_end'] }}
|
||||
- type: dhcp
|
||||
start: {{ data['network']['vlan_network_data']['pxe']['dhcp_start'] }}
|
||||
end: {{ data['network']['vlan_network_data']['pxe']['dhcp_end'] }}
|
||||
...
|
||||
|
||||
---
|
||||
schema: 'drydock/NetworkLink/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: data
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
bonding:
|
||||
mode: 802.3ad
|
||||
hash: layer3+4
|
||||
peer_rate: fast
|
||||
mon_rate: 100
|
||||
up_delay: 1000
|
||||
down_delay: 3000
|
||||
# NEWSITE-CHANGEME: Ensure the network switches in the environment are
|
||||
# configured for this MTU or greater. Even if switches are configured for or
|
||||
# can support a slightly higher MTU, there is no need (and negliable benefit)
|
||||
# to squeeze every last byte into the MTU (e.g., 9216 vs 9100). Leave MTU at
|
||||
# 9100 for maximum compatibility.
|
||||
mtu: 9100
|
||||
linkspeed: auto
|
||||
trunking:
|
||||
mode: 802.1q
|
||||
allowed_networks:
|
||||
- oam
|
||||
- storage
|
||||
- overlay
|
||||
- calico
|
||||
...
|
||||
|
||||
---
|
||||
schema: 'drydock/Network/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: oam
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: 'site'
|
||||
parentSelector:
|
||||
network_role: oam
|
||||
topology: cruiser
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
cidr: {{ data['network']['vlan_network_data']['oam']['subnet'] }}
|
||||
{% set flag = [0] %}
|
||||
{% for route in data['network']['vlan_network_data']['oam']['routes'] %}
|
||||
{% if flag[0] == 0 %}
|
||||
routes:
|
||||
{% endif %}
|
||||
{% if flag.append(flag.pop() + 1) %} {% endif %}
|
||||
- subnet: {{ route }}
|
||||
gateway: {{ data['network']['vlan_network_data']['oam']['gateway'] }}
|
||||
metric: 100
|
||||
{% endfor %}
|
||||
{% if flag[0] == 0 %}
|
||||
routes:[]
|
||||
{% endif %}
|
||||
ranges:
|
||||
- type: reserved
|
||||
start: {{ data['network']['vlan_network_data']['oam']['reserved_start'] }}
|
||||
end: {{ data['network']['vlan_network_data']['oam']['reserved_end'] }}
|
||||
- type: static
|
||||
start: {{ data['network']['vlan_network_data']['oam']['static_start'] }}
|
||||
end: {{ data['network']['vlan_network_data']['oam']['static_end'] }}
|
||||
...
|
||||
|
||||
---
|
||||
schema: 'drydock/Network/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: storage
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
network_role: storage
|
||||
topology: cruiser
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
cidr: {{ data['network']['vlan_network_data']['storage']['subnet'] }}
|
||||
ranges:
|
||||
- type: reserved
|
||||
start: {{ data['network']['vlan_network_data']['storage']['reserved_start'] }}
|
||||
end: {{ data['network']['vlan_network_data']['storage']['reserved_end'] }}
|
||||
- type: static
|
||||
start: {{ data['network']['vlan_network_data']['storage']['static_start'] }}
|
||||
end: {{ data['network']['vlan_network_data']['storage']['static_end'] }}
|
||||
...
|
||||
|
||||
---
|
||||
schema: 'drydock/Network/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: overlay
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
network_role: os-overlay
|
||||
topology: cruiser
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
cidr: {{ data['network']['vlan_network_data']['overlay']['subnet'] }}
|
||||
ranges:
|
||||
- type: reserved
|
||||
start: {{ data['network']['vlan_network_data']['overlay']['reserved_start'] }}
|
||||
end: {{ data['network']['vlan_network_data']['overlay']['reserved_end'] }}
|
||||
- type: static
|
||||
start: {{ data['network']['vlan_network_data']['overlay']['static_start'] }}
|
||||
end: {{ data['network']['vlan_network_data']['overlay']['static_end'] }}
|
||||
...
|
||||
|
||||
---
|
||||
schema: 'drydock/Network/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: calico
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
network_role: calico
|
||||
topology: cruiser
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
cidr: {{ data['network']['vlan_network_data']['calico']['subnet'] }}
|
||||
ranges:
|
||||
- type: reserved
|
||||
start: {{ data['network']['vlan_network_data']['calico']['reserved_start'] }}
|
||||
end: {{ data['network']['vlan_network_data']['calico']['reserved_end'] }}
|
||||
- type: static
|
||||
start: {{ data['network']['vlan_network_data']['calico']['static_start'] }}
|
||||
end: {{ data['network']['vlan_network_data']['calico']['static_end'] }}
|
||||
...
|
||||
|
187
spyglass/examples/templates/pki/pki-catalogue.yaml.j2
Normal file
187
spyglass/examples/templates/pki/pki-catalogue.yaml.j2
Normal file
@ -0,0 +1,187 @@
|
||||
---
|
||||
schema: promenade/PKICatalog/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: cluster-certificates
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
certificate_authorities:
|
||||
kubernetes:
|
||||
description: CA for Kubernetes components
|
||||
certificates:
|
||||
- document_name: apiserver
|
||||
description: Service certificate for Kubernetes apiserver
|
||||
common_name: apiserver
|
||||
hosts:
|
||||
- localhost
|
||||
- 127.0.0.1
|
||||
- 10.96.0.1
|
||||
kubernetes_service_names:
|
||||
- kubernetes.default.svc.cluster.local
|
||||
{% for racks in data['baremetal'].keys()%}
|
||||
{% for host in data['baremetal'][racks].keys()%}
|
||||
{% if data['baremetal'][racks][host]['type'] == 'genesis' %}
|
||||
|
||||
- document_name: kubelet-genesis
|
||||
common_name: system:node:{{ host }}
|
||||
hosts:
|
||||
- {{ host }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['oam'] }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['ksn']}}
|
||||
groups:
|
||||
- system:nodes
|
||||
{% endif %}
|
||||
{%endfor%}
|
||||
{%endfor%}
|
||||
{% for racks in data['baremetal'].keys()%}
|
||||
{% for host in data['baremetal'][racks].keys()%}
|
||||
- document_name: kubelet-{{ host }}
|
||||
common_name: system:node:{{ host }}
|
||||
hosts:
|
||||
- {{ host }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['oam'] }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['ksn']}}
|
||||
groups:
|
||||
- system:nodes
|
||||
{%endfor%}
|
||||
{%endfor%}
|
||||
- document_name: scheduler
|
||||
description: Service certificate for Kubernetes scheduler
|
||||
common_name: system:kube-scheduler
|
||||
- document_name: controller-manager
|
||||
description: certificate for controller-manager
|
||||
common_name: system:kube-controller-manager
|
||||
- document_name: admin
|
||||
common_name: admin
|
||||
groups:
|
||||
- system:masters
|
||||
- document_name: armada
|
||||
common_name: armada
|
||||
groups:
|
||||
- system:masters
|
||||
kubernetes-etcd:
|
||||
description: Certificates for Kubernetes's etcd servers
|
||||
certificates:
|
||||
- document_name: apiserver-etcd
|
||||
description: etcd client certificate for use by Kubernetes apiserver
|
||||
common_name: apiserver
|
||||
# NOTE(mark-burnett): hosts not required for client certificates
|
||||
- document_name: kubernetes-etcd-anchor
|
||||
description: anchor
|
||||
common_name: anchor
|
||||
{% for racks in data['baremetal'].keys()%}
|
||||
{% for host in data['baremetal'][racks].keys()%}
|
||||
{% if data['baremetal'][racks][host]['type'] == 'genesis' %}
|
||||
- document_name: kubernetes-etcd-genesis
|
||||
common_name: kubernetes-etcd-genesis
|
||||
hosts:
|
||||
- {{ host }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['oam'] }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['ksn']}}
|
||||
- 127.0.0.1
|
||||
- localhost
|
||||
- kubernetes-etcd.kube-system.svc.cluster.local
|
||||
- 10.96.0.2
|
||||
{% endif %}
|
||||
{%endfor%}
|
||||
{%endfor%}
|
||||
{% for racks in data['baremetal'].keys()%}
|
||||
{% for host in data['baremetal'][racks].keys()%}
|
||||
{% if data['baremetal'][racks][host]['type'] == 'controller' or data['baremetal'][racks][host]['type'] == 'genesis'%}
|
||||
- document_name: kubernetes-etcd-{{ host }}
|
||||
common_name: kubernetes-etcd-{{ host }}
|
||||
hosts:
|
||||
- {{ host }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['oam'] }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['ksn']}}
|
||||
- 127.0.0.1
|
||||
- localhost
|
||||
- kubernetes-etcd.kube-system.svc.cluster.local
|
||||
- 10.96.0.2
|
||||
{% endif %}
|
||||
{%endfor%}
|
||||
{%endfor%}
|
||||
{% for racks in data['baremetal'].keys()%}
|
||||
{% for host in data['baremetal'][racks].keys()%}
|
||||
{% if data['baremetal'][racks][host]['type'] == 'genesis' %}
|
||||
kubernetes-etcd-peer:
|
||||
certificates:
|
||||
- document_name: kubernetes-etcd-genesis-peer
|
||||
common_name: kubernetes-etcd-genesis-peer
|
||||
hosts:
|
||||
- {{ host }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['oam'] }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['ksn']}}
|
||||
- 127.0.0.1
|
||||
- localhost
|
||||
- kubernetes-etcd.kube-system.svc.cluster.local
|
||||
- 10.96.0.2
|
||||
{% endif %}
|
||||
{%endfor%}
|
||||
{%endfor%}
|
||||
{% for racks in data['baremetal'].keys()%}
|
||||
{% for host in data['baremetal'][racks].keys()%}
|
||||
{% if data['baremetal'][racks][host]['type'] == 'controller' or data['baremetal'][racks][host]['type'] == 'genesis' %}
|
||||
- document_name: kubernetes-etcd-{{ host }}-peer
|
||||
common_name: kubernetes-etcd-{{ host }}-peer
|
||||
hosts:
|
||||
- {{ host }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['oam'] }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['ksn']}}
|
||||
- 127.0.0.1
|
||||
- localhost
|
||||
- kubernetes-etcd.kube-system.svc.cluster.local
|
||||
- 10.96.0.2
|
||||
{% endif %}
|
||||
{%endfor%}
|
||||
{%endfor%}
|
||||
ksn-etcd:
|
||||
description: Certificates for Calico etcd client traffic
|
||||
certificates:
|
||||
- document_name: ksn-etcd-anchor
|
||||
description: anchor
|
||||
common_name: anchor
|
||||
{% for racks in data['baremetal'].keys()%}
|
||||
{% for host in data['baremetal'][racks].keys()%}
|
||||
{% if data['baremetal'][racks][host]['type'] == 'controller' or data['baremetal'][racks][host]['type'] == 'genesis' %}
|
||||
- document_name: ksn-etcd-{{ host }}
|
||||
common_name: ksn-etcd-{{ host }}
|
||||
hosts:
|
||||
- {{ host }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['oam'] }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['ksn']}}
|
||||
- 127.0.0.1
|
||||
- localhost
|
||||
- 10.96.232.136
|
||||
{% endif %}
|
||||
{%endfor%}
|
||||
{%endfor%}
|
||||
- document_name: ksn-node
|
||||
common_name: calcico-node
|
||||
ksn-etcd-peer:
|
||||
description: Certificates for Calico etcd clients
|
||||
certificates:
|
||||
{% for racks in data['baremetal'].keys()%}
|
||||
{% for host in data['baremetal'][racks].keys()%}
|
||||
{% if data['baremetal'][racks][host]['type'] == 'controller' or data['baremetal'][racks][host]['type'] == 'genesis' %}
|
||||
- document_name: ksn-etcd-{{ host }}-peer
|
||||
common_name: ksn-etcd-{{ host }}-peer
|
||||
hosts:
|
||||
- {{ host }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['oam'] }}
|
||||
- {{ data['baremetal'][racks][host]['ip']['ksn']}}
|
||||
- 127.0.0.1
|
||||
- localhost
|
||||
- 10.96.232.136
|
||||
{% endif %}
|
||||
{%endfor%}
|
||||
{%endfor%}
|
||||
- document_name: ksn-node-peer
|
||||
common_name: calico-node-peer
|
||||
keypairs:
|
||||
- name: service-account
|
||||
description: Service account signing key for use by Kubernetes controller-manager.
|
||||
...
|
40
spyglass/examples/templates/profile/genesis.yaml.j2
Normal file
40
spyglass/examples/templates/profile/genesis.yaml.j2
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
schema: promenade/Genesis/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: genesis-site
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
name: genesis-global
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
labels:
|
||||
dynamic:
|
||||
- beta.kubernetes.io/fluentd-ds-ready=true
|
||||
- calico-etcd=enabled
|
||||
- ceph-mds=enabled
|
||||
- ceph-mon=enabled
|
||||
- ceph-osd=enabled
|
||||
- ceph-rgw=enabled
|
||||
- ceph-mgr=enabled
|
||||
- ceph-bootstrap=enabled
|
||||
- kube-dns=enabled
|
||||
- kube-ingress=enabled
|
||||
- kubernetes-apiserver=enabled
|
||||
- kubernetes-controller-manager=enabled
|
||||
- kubernetes-etcd=enabled
|
||||
- kubernetes-scheduler=enabled
|
||||
- promenade-genesis=enabled
|
||||
- ucp-control-plane=enabled
|
||||
- maas-control-plane=enabled
|
||||
- ceph-osd-bootstrap=enabled
|
||||
- openstack-control-plane=enabled
|
||||
- openvswitch=enabled
|
||||
- openstack-l3-agent=enabled
|
||||
- node-exporter=enabled
|
||||
...
|
@ -0,0 +1,76 @@
|
||||
---
|
||||
schema: 'drydock/HardwareProfile/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: dell_r720
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
# Vendor of the server chassis
|
||||
vendor: DELL
|
||||
# Generation of the chassis model
|
||||
generation: '8'
|
||||
# Version of the chassis model within its generation - not version of the hardware definition
|
||||
hw_version: '3'
|
||||
# The certified version of the chassis BIOS
|
||||
bios_version: '2.2.3'
|
||||
# Mode of the default boot of hardware - bios, uefi
|
||||
boot_mode: bios
|
||||
# Protocol of boot of the hardware - pxe, usb, hdd
|
||||
bootstrap_protocol: pxe
|
||||
# Which interface to use for network booting within the OOB manager, not OS device
|
||||
pxe_interface: 0
|
||||
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
|
||||
# in a site to result in a consistent configuration
|
||||
device_aliases:
|
||||
## network
|
||||
# eno1
|
||||
pxe_nic01:
|
||||
address: '0000:01:00.0'
|
||||
# type could identify expected hardware - used for hardware manifest validation
|
||||
dev_type: 'I350 Gigabit Network Connection'
|
||||
bus_type: 'pci'
|
||||
# enp67s0f0
|
||||
data_nic01:
|
||||
address: '0000:43:00.0'
|
||||
dev_type: 'Ethernet 10G 2P X520 Adapter'
|
||||
bus_type: 'pci'
|
||||
# enp67s0f1
|
||||
data_nic02:
|
||||
address: '0000:43:00.1'
|
||||
dev_type: 'Ethernet 10G 2P X520 Adapter'
|
||||
bus_type: 'pci'
|
||||
# enp68s0f0
|
||||
data_nic03:
|
||||
address: '0000:44:00.0'
|
||||
dev_type: 'Ethernet 10G 2P X520 Adapter'
|
||||
bus_type: 'pci'
|
||||
# enp68s0f1
|
||||
data_nic04:
|
||||
address: '0000:44:00.1'
|
||||
dev_type: 'Ethernet 10G 2P X520 Adapter'
|
||||
bus_type: 'pci'
|
||||
## storage
|
||||
# /dev/sda
|
||||
bootdisk:
|
||||
address: '0:2.0.0'
|
||||
dev_type: 'PERC H710P'
|
||||
bus_type: 'scsi'
|
||||
# /dev/sdb
|
||||
cephjournal1:
|
||||
address: '0:2.1.0'
|
||||
dev_type: 'PERC H710P'
|
||||
bus_type: 'scsi'
|
||||
# /dev/sdc
|
||||
cephjournal2:
|
||||
address: '0:2.2.0'
|
||||
dev_type: 'PERC H710P'
|
||||
bus_type: 'scsi'
|
||||
# /dev/sdc
|
||||
ephemeral:
|
||||
address: '0:2.3.0'
|
||||
dev_type: 'PERC H710P'
|
||||
bus_type: 'scsi'
|
||||
...
|
270
spyglass/examples/templates/profile/host/cp_r720.yaml.j2
Normal file
270
spyglass/examples/templates/profile/host/cp_r720.yaml.j2
Normal file
@ -0,0 +1,270 @@
|
||||
---
|
||||
# The primary control plane host profile for Airship for DELL R720s, and
|
||||
# should not need to be altered if you are using matching HW. The active
|
||||
# participants in the Ceph cluster run on this profile. Other control plane
|
||||
# services are not affected by primary vs secondary designation.
|
||||
schema: drydock/HostProfile/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: cp_r720-primary
|
||||
storagePolicy: cleartext
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
hosttype: cp-global
|
||||
actions:
|
||||
- method: replace
|
||||
path: .interfaces
|
||||
- method: replace
|
||||
path: .storage
|
||||
- method: merge
|
||||
path: .
|
||||
data:
|
||||
hardware_profile: dell_r720
|
||||
|
||||
primary_network: oam
|
||||
interfaces:
|
||||
pxe:
|
||||
device_link: pxe
|
||||
slaves:
|
||||
- pxe_nic01
|
||||
networks:
|
||||
- pxe
|
||||
bond0:
|
||||
device_link: data
|
||||
slaves:
|
||||
- data_nic01
|
||||
- data_nic02
|
||||
- data_nic03
|
||||
- data_nic04
|
||||
networks:
|
||||
- oam
|
||||
- storage
|
||||
- overlay
|
||||
- calico
|
||||
|
||||
storage:
|
||||
physical_devices:
|
||||
bootdisk:
|
||||
labels:
|
||||
bootdrive: 'true'
|
||||
partitions:
|
||||
- name: 'root'
|
||||
size: '30g'
|
||||
bootable: true
|
||||
filesystem:
|
||||
mountpoint: '/'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
- name: 'boot'
|
||||
size: '1g'
|
||||
filesystem:
|
||||
mountpoint: '/boot'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
- name: 'var_log'
|
||||
size: '100g'
|
||||
filesystem:
|
||||
mountpoint: '/var/log'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
- name: 'var'
|
||||
size: '>100g'
|
||||
filesystem:
|
||||
mountpoint: '/var'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
|
||||
cephjournal1:
|
||||
partitions:
|
||||
- name: 'ceph-j1'
|
||||
size: '10g'
|
||||
- name: 'ceph-j2'
|
||||
size: '10g'
|
||||
- name: 'ceph-j3'
|
||||
size: '10g'
|
||||
- name: 'ceph-j4'
|
||||
size: '10g'
|
||||
cephjournal2:
|
||||
partitions:
|
||||
- name: 'ceph-j5'
|
||||
size: '10g'
|
||||
- name: 'ceph-j6'
|
||||
size: '10g'
|
||||
- name: 'ceph-j7'
|
||||
size: '10g'
|
||||
- name: 'ceph-j8'
|
||||
size: '10g'
|
||||
|
||||
platform:
|
||||
kernel: 'hwe-16.04'
|
||||
kernel_params:
|
||||
console: 'ttyS1,115200n8'
|
||||
|
||||
metadata:
|
||||
owner_data:
|
||||
openstack-l3-agent: enabled
|
||||
...
|
||||
---
|
||||
schema: drydock/HostProfile/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: cp_r740-secondary
|
||||
storagePolicy: cleartext
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
hosttype: cp-global
|
||||
actions:
|
||||
- method: replace
|
||||
path: .interfaces
|
||||
- method: replace
|
||||
path: .storage
|
||||
- method: replace
|
||||
path: .metadata.owner_data
|
||||
- method: merge
|
||||
path: .
|
||||
data:
|
||||
hardware_profile: dell_r720
|
||||
|
||||
primary_network: oam
|
||||
interfaces:
|
||||
pxe:
|
||||
device_link: pxe
|
||||
slaves:
|
||||
- pxe_nic01
|
||||
networks:
|
||||
- pxe
|
||||
bond0:
|
||||
device_link: data
|
||||
slaves:
|
||||
- data_nic01
|
||||
- data_nic02
|
||||
- data_nic03
|
||||
- data_nic04
|
||||
networks:
|
||||
- oam
|
||||
- storage
|
||||
- overlay
|
||||
- calico
|
||||
|
||||
storage:
|
||||
physical_devices:
|
||||
bootdisk:
|
||||
labels:
|
||||
bootdrive: 'true'
|
||||
partitions:
|
||||
- name: 'root'
|
||||
size: '30g'
|
||||
bootable: true
|
||||
filesystem:
|
||||
mountpoint: '/'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
- name: 'boot'
|
||||
size: '1g'
|
||||
filesystem:
|
||||
mountpoint: '/boot'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
- name: 'var_log'
|
||||
size: '100g'
|
||||
filesystem:
|
||||
mountpoint: '/var/log'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
- name: 'var'
|
||||
size: '>100g'
|
||||
filesystem:
|
||||
mountpoint: '/var'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
|
||||
cephjournal1:
|
||||
partitions:
|
||||
- name: 'ceph-j1'
|
||||
size: '10g'
|
||||
- name: 'ceph-j2'
|
||||
size: '10g'
|
||||
- name: 'ceph-j3'
|
||||
size: '10g'
|
||||
- name: 'ceph-j4'
|
||||
size: '10g'
|
||||
cephjournal2:
|
||||
partitions:
|
||||
- name: 'ceph-j5'
|
||||
size: '10g'
|
||||
- name: 'ceph-j6'
|
||||
size: '10g'
|
||||
- name: 'ceph-j7'
|
||||
size: '10g'
|
||||
- name: 'ceph-j8'
|
||||
size: '10g'
|
||||
|
||||
platform:
|
||||
kernel: 'hwe-16.04'
|
||||
kernel_params:
|
||||
console: 'ttyS1,115200n8'
|
||||
|
||||
metadata:
|
||||
owner_data:
|
||||
control-plane: enabled
|
||||
ucp-control-plane: enabled
|
||||
openstack-control-plane: enabled
|
||||
openstack-heat: enabled
|
||||
openstack-keystone: enabled
|
||||
openstack-rabbitmq: enabled
|
||||
openstack-dns-helper: enabled
|
||||
openstack-mariadb: enabled
|
||||
openstack-nova-control: enabled
|
||||
# openstack-etcd: enabled
|
||||
openstack-mistral: enabled
|
||||
openstack-memcached: enabled
|
||||
openstack-glance: enabled
|
||||
openstack-horizon: enabled
|
||||
openstack-cinder-control: enabled
|
||||
openstack-cinder-volume: control
|
||||
openstack-neutron: enabled
|
||||
openvswitch: enabled
|
||||
ucp-barbican: enabled
|
||||
# ceph-mon: enabled
|
||||
ceph-mgr: enabled
|
||||
ceph-osd: enabled
|
||||
ceph-mds: enabled
|
||||
ceph-rgw: enabled
|
||||
ucp-maas: enabled
|
||||
kube-dns: enabled
|
||||
tenant-ceph-control-plane: enabled
|
||||
# tenant-ceph-mon: enabled
|
||||
tenant-ceph-rgw: enabled
|
||||
tenant-ceph-mgr: enabled
|
||||
kubernetes-apiserver: enabled
|
||||
kubernetes-controller-manager: enabled
|
||||
# kubernetes-etcd: enabled
|
||||
kubernetes-scheduler: enabled
|
||||
tiller-helm: enabled
|
||||
# kube-etcd: enabled
|
||||
calico-policy: enabled
|
||||
calico-node: enabled
|
||||
# calico-etcd: enabled
|
||||
ucp-armada: enabled
|
||||
ucp-drydock: enabled
|
||||
ucp-deckhand: enabled
|
||||
ucp-shipyard: enabled
|
||||
IAM: enabled
|
||||
ucp-promenade: enabled
|
||||
prometheus-server: enabled
|
||||
prometheus-client: enabled
|
||||
fluentd: enabled
|
||||
influxdb: enabled
|
||||
kibana: enabled
|
||||
elasticsearch-client: enabled
|
||||
elasticsearch-master: enabled
|
||||
elasticsearch-data: enabled
|
||||
postgresql: enabled
|
||||
kube-ingress: enabled
|
||||
beta.kubernetes.io/fluentd-ds-ready: 'true'
|
||||
node-exporter: enabled
|
||||
...
|
103
spyglass/examples/templates/profile/host/dp_r720.yaml.j2
Normal file
103
spyglass/examples/templates/profile/host/dp_r720.yaml.j2
Normal file
@ -0,0 +1,103 @@
|
||||
---
|
||||
# The data plane host profile for Airship for DELL R720s, and should
|
||||
# not need to be altered if you are using matching HW. The host profile is setup
|
||||
# for cpu isolation (for nova pinning), hugepages, and sr-iov.
|
||||
schema: drydock/HostProfile/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: dp_r720
|
||||
storagePolicy: cleartext
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
hosttype: dp-global
|
||||
actions:
|
||||
- method: replace
|
||||
path: .interfaces
|
||||
- method: replace
|
||||
path: .storage
|
||||
- method: merge
|
||||
path: .
|
||||
data:
|
||||
hardware_profile: dell_r720
|
||||
|
||||
primary_network: oam
|
||||
interfaces:
|
||||
pxe:
|
||||
device_link: pxe
|
||||
slaves:
|
||||
- pxe_nic01
|
||||
networks:
|
||||
- pxe
|
||||
bond0:
|
||||
device_link: data
|
||||
slaves:
|
||||
- data_nic01
|
||||
- data_nic02
|
||||
- data_nic03
|
||||
- data_nic04
|
||||
networks:
|
||||
- oam
|
||||
- storage
|
||||
- overlay
|
||||
- calico
|
||||
|
||||
storage:
|
||||
physical_devices:
|
||||
bootdisk:
|
||||
labels:
|
||||
bootdrive: 'true'
|
||||
partitions:
|
||||
- name: 'root'
|
||||
size: '30g'
|
||||
bootable: true
|
||||
filesystem:
|
||||
mountpoint: '/'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
- name: 'boot'
|
||||
size: '1g'
|
||||
filesystem:
|
||||
mountpoint: '/boot'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
- name: 'var_log'
|
||||
size: '100g'
|
||||
filesystem:
|
||||
mountpoint: '/var/log'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
- name: 'var'
|
||||
size: '>100g'
|
||||
filesystem:
|
||||
mountpoint: '/var'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
|
||||
cephjournal1:
|
||||
partitions:
|
||||
- name: 'ceph-j1'
|
||||
size: '10g'
|
||||
- name: 'ceph-j2'
|
||||
size: '10g'
|
||||
cephjournal2:
|
||||
partitions:
|
||||
- name: 'ceph-j3'
|
||||
size: '10g'
|
||||
- name: 'ceph-j4'
|
||||
size: '10g'
|
||||
|
||||
ephemeral:
|
||||
partitions:
|
||||
- name: 'nova'
|
||||
size: '99%'
|
||||
filesystem:
|
||||
mountpoint: '/var/lib/nova'
|
||||
fstype: 'ext4'
|
||||
mount_options: 'defaults'
|
||||
platform:
|
||||
kernel: 'hwe-16.04'
|
||||
kernel_params:
|
||||
console: 'ttyS1,115200n8'
|
||||
...
|
35
spyglass/examples/templates/profile/region.yaml.j2
Normal file
35
spyglass/examples/templates/profile/region.yaml.j2
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
schema: 'drydock/Region/v1'
|
||||
metadata:
|
||||
schema: 'metadata/Document/v1'
|
||||
name: {{ data['region_name'] }}
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
substitutions:
|
||||
- dest:
|
||||
path: .authorized_keys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: jenkins_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .authorized_keys[1]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: {{ data['region_name'] }}_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .repositories.main_archive
|
||||
src:
|
||||
schema: pegleg/SoftwareVersions/v1
|
||||
name: software-versions
|
||||
path: .packages.repositories.main_archive
|
||||
data:
|
||||
tag_definitions: []
|
||||
authorized_keys: []
|
||||
repositories:
|
||||
remove_unlisted: true
|
||||
...
|
||||
|
File diff suppressed because it is too large
Load Diff
135
spyglass/examples/templates/secrets/certificates/ingress.yaml.j2
Normal file
135
spyglass/examples/templates/secrets/certificates/ingress.yaml.j2
Normal file
@ -0,0 +1,135 @@
|
||||
---
|
||||
# Example manifest for ingress cert.
|
||||
# Shall be replaced with proper/valid set.
|
||||
# Self-signed certs are not supported.
|
||||
metadata:
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
name: ingress-crt
|
||||
schema: metadata/Document/v1
|
||||
labels:
|
||||
name: ingress-crt-site
|
||||
storagePolicy: cleartext
|
||||
schema: deckhand/Certificate/v1
|
||||
data: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFKzCCA5OgAwIBAgIMW2h6FCcFdKeaw3vnMA0GCSqGSIb3DQEBCwUAMBIxEDAO
|
||||
BgNVBAMTB0FpcnNoaXAwHhcNMTgwODA2MTY0MDUyWhcNMTkwODA2MTY0MDUyWjBJ
|
||||
MTUwMwYDVQQDEyxpbmdyZXNzLmFpcnNoaXAtc2Vhd29ydGh5LmF0bGFudGFmb3Vu
|
||||
ZHJ5LmNvbTEQMA4GA1UEChMHQWlyc2hpcDCCAaIwDQYJKoZIhvcNAQEBBQADggGP
|
||||
ADCCAYoCggGBALvNHm/G/ylh6aPcvrhOcb4qz1BjcNtnxH8bzZng/rMeX3W2AzjC
|
||||
r2JloJcDvOLBp/TkLOZPImnFW2/GCwktxPgXZuBTPzFV50g77KsPFw0fn3Si7+bs
|
||||
F22tLhdOGk6MQj/WW4pKGHqdw1/VbPwOHBT+I4/scR1L2SZxYtSFIKGenHJH+PMV
|
||||
bCdwnNOR80F8KRzK5iZs/r6S/QqVheieARSWWnk2+TtkM1BloGOhLSd+ZkWh9VO1
|
||||
eOnZowkaDAJwD/G6zoSr5n+beaXzDnEcoVXFSwd4FLoV+om77o92XmZ4rVw0vTMO
|
||||
k6jVwmkdT+dM2K2hLUG/TXWoV2/Qms70gzDOs85RtAkTPe4Ohtdpr51Q0hd35TKG
|
||||
YLKzX/OPblD68iYJYSBvMPpAVTbFYVPW1AQx8wWfannYbMoeL8XTEOKfkqm90YP9
|
||||
EhIdtmw4D7GZxlzG5FXXutmT9sqLfqlRu/RynAhBP8NQvw74WumhOe8r7GhCwgzC
|
||||
gaPLGjeekoS6LQIDAQABo4IBSDCCAUQwDAYDVR0TAQH/BAIwADCBzQYDVR0RBIHF
|
||||
MIHCgixpbmdyZXNzLmFpcnNoaXAtc2Vhd29ydGh5LmF0bGFudGFmb3VuZHJ5LmNv
|
||||
bYIta2V5c3RvbmUuYWlyc2hpcC1zZWF3b3J0aHkuYXRsYW50YWZvdW5kcnkuY29t
|
||||
gilub3ZhLmFpcnNoaXAtc2Vhd29ydGh5LmF0bGFudGFmb3VuZHJ5LmNvbYIsaG9y
|
||||
aXpvbi5haXJzaGlwLXNlYXdvcnRoeS5hdGxhbnRhZm91bmRyeS5jb22HBAoXFQuH
|
||||
BAoXFgswEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0PAQH/BAUDAwegADAdBgNV
|
||||
HQ4EFgQUfTAjNgn/1U1Uh1MJDYT2m4dzhsYwHwYDVR0jBBgwFoAUJFuXPZo6RzfE
|
||||
BlJjnnk5jhcP4wIwDQYJKoZIhvcNAQELBQADggGBAE2ISWmrxqrledJI3aLaS9Yw
|
||||
WsZc8O8CnIyLoxrE85vUubFjuI9ixC/6dJxl2iB1n0H8JgmFREox32Q4+kDJI8V/
|
||||
X9x0PFpRzL7QEPrLZhW94Yis3sOphLW0rf0t06ZepdHHeodYJu1pVMDmLq6bKXdX
|
||||
vo+/WwKnZBXC1qPbXJByv/CN9MtViXOnBGORFRTJPb6U8379LNWclJ/LW12yTwNk
|
||||
JGIbZU61Vxu+2nLIabmmRoODH2jomgMOMMzLgjT3Hvw3whe8GrUoxDiPYQVTDGNm
|
||||
ly6m+5B1Nx06fkZazonozeaOhSQ7RblUSbo+w8TJmLRzD9ft7p4vpjBGxRADMcuF
|
||||
DOjATgdZeisBUHTGEO0P6wJOBQuCFMX9AVl+u8ZpcuRaRaN+pBE6/BqcHBB6qV/N
|
||||
w2DdNtP8BrJ3kJVNEDIo5oTbH5SToxgA4hWBV42M1rB+5vIMDKN3rwVDdNKWYhYc
|
||||
VZpU3V9V6JzSW1O2w4Wu9PdbWJD9oSvC0qJgnjOXzg==
|
||||
-----END CERTIFICATE-----
|
||||
...
|
||||
---
|
||||
metadata:
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
name: ingress-ca
|
||||
schema: metadata/Document/v1
|
||||
labels:
|
||||
name: ingress-ca-site
|
||||
storagePolicy: cleartext
|
||||
schema: deckhand/CertificateAuthority/v1
|
||||
data: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID7TCCAlWgAwIBAgIMW2h3tgSwie0Ypx8eMA0GCSqGSIb3DQEBCwUAMBIxEDAO
|
||||
BgNVBAMTB0FpcnNoaXAwHhcNMTgwODA2MTYzMDQ2WhcNMTkwODA2MTYzMDQ2WjAS
|
||||
MRAwDgYDVQQDEwdBaXJzaGlwMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKC
|
||||
AYEAny0Nqu9U2tXdCCTNzD2T62htMmBLg3CmzWajfbfFl7ALqzo3HgbbY3PxTHDE
|
||||
OJ/lwdm0HkEaGfEDXhJd06WZsa8+fKGqhKXvZXwXx5mJ8LCGxz6xiaxwo9lnKe6V
|
||||
o3YX7bJ5YIVxQ2jhvZo+dY8Z/buloi2Tp2HbqTejKULH9+qdiQTDXAnyR0NLqzJ0
|
||||
YQ4v4yU3zix3nBi8z29lQekGO9quNEka3nw2n0Gxmq5z1bNALGCF5F759mVkB0uT
|
||||
fPGF+zm9eqlqAgduYg7R+JYUumVHvIoRY454GtAdZHTJHJZP0gQSGJsLff8ROFpI
|
||||
GVYsOZhJXU9Ihc5VBC5PMErbmCn0YkuxAWNOYBstZ8l+uY6YiPoFV5Ulc/8M0If+
|
||||
T6jbqzWoFC+4ysgY95RKOw53S4o/T6AFwiIKIw0xp3UfHCf6kr5Y0+XdDn5CXpJB
|
||||
d1KK3PoUWzPSsxcUMXvgKWT4x1vsCId21dn1SmVSOEBhM08VZfjd5bvL9Xjt/E0j
|
||||
mUqDAgMBAAGjQzBBMA8GA1UdEwEB/wQFMAMBAf8wDwYDVR0PAQH/BAUDAwcEADAd
|
||||
BgNVHQ4EFgQUJFuXPZo6RzfEBlJjnnk5jhcP4wIwDQYJKoZIhvcNAQELBQADggGB
|
||||
AJaoEtnDoWUUs4nSSqIGcoCfpIO0oqVp8DvkBOcxz5Rz8vMVJSC24/UnuCD2Wknx
|
||||
2V/E3edXIeRo7duhPtNCT7c8OKY/pJsZQTgOczn4rphoD1pmAIPZmpG6ssPadPiM
|
||||
EP8xWJHZt8NXG7D5kJX2COvBvgNeWXL6MF7Tv8+t5xzt59Vitdb/7lm9Z6jjpvN+
|
||||
zoG0pKx3XYESsnLAVAf00F+kWwds/3x3gQywUAQUDER0jliYUE5id+sojp357Cl9
|
||||
XtY+8zSnTduuP8CfMhwv5p6j9xbqacfT7AzpQ6cy4xcQ7MA6JBQcxbaq4NtvIf6+
|
||||
d/5N9d8LGnfXdCd9iwNy9Qk23Ea0SNhnk9F/NqGBPakU4TbHh4iTYMC/+hDGInpO
|
||||
TIRelTidNBFNaIBg3Z0vsh0lDwbt/xhpXip+ZVBqKMTtktEceiVGru9cYUQA2tKI
|
||||
XNoc5s0uQGMpdFzgED4lXZf+n7yGVMKohvi7Yn96HqujGIrVH6qThsI6m7pUSz40
|
||||
+g==
|
||||
-----END CERTIFICATE-----
|
||||
...
|
||||
---
|
||||
metadata:
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
name: ingress-key
|
||||
schema: metadata/Document/v1
|
||||
labels:
|
||||
name: ingress-key-site
|
||||
storagePolicy: cleartext
|
||||
schema: deckhand/CertificateKey/v1
|
||||
data: |
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG4wIBAAKCAYEAu80eb8b/KWHpo9y+uE5xvirPUGNw22fEfxvNmeD+sx5fdbYD
|
||||
OMKvYmWglwO84sGn9OQs5k8iacVbb8YLCS3E+Bdm4FM/MVXnSDvsqw8XDR+fdKLv
|
||||
5uwXba0uF04aToxCP9ZbikoYep3DX9Vs/A4cFP4jj+xxHUvZJnFi1IUgoZ6cckf4
|
||||
8xVsJ3Cc05HzQXwpHMrmJmz+vpL9CpWF6J4BFJZaeTb5O2QzUGWgY6EtJ35mRaH1
|
||||
U7V46dmjCRoMAnAP8brOhKvmf5t5pfMOcRyhVcVLB3gUuhX6ibvuj3ZeZnitXDS9
|
||||
Mw6TqNXCaR1P50zYraEtQb9NdahXb9CazvSDMM6zzlG0CRM97g6G12mvnVDSF3fl
|
||||
MoZgsrNf849uUPryJglhIG8w+kBVNsVhU9bUBDHzBZ9qedhsyh4vxdMQ4p+Sqb3R
|
||||
g/0SEh22bDgPsZnGXMbkVde62ZP2yot+qVG79HKcCEE/w1C/Dvha6aE57yvsaELC
|
||||
DMKBo8saN56ShLotAgMBAAECggGAYzZDhA1+sx/0zApL/xYB5NK83t0Ju/8fwX6w
|
||||
qUBBjeLXz1mubgf7m2HQ6ragzLI9xpPcXHcl2PbYDT50ig7R5baHNK8FzUxyeKif
|
||||
qOa56Mbx+C4zyqyi2+AHX2x1XVWfkhXuGip2sCA0HKalgqr5juWLZ/ci8rUlLLft
|
||||
3BPQX1FpmL4I+HIyxsspLmQGPGwZVAqkd1xRX+BLKZJAQdlm/LdJaIvwMr4Glcx6
|
||||
ZOe68QhHgzXCYsyV6gR9qstF2OvVuLa2mUc7EzYInFIFhXUdAAwmDqkuuLRdRQhf
|
||||
Ur8nqQW33T0cG0GBUzgBI5YmSPJvTSzcPmeSyNVx2/Yb0pkuXtCw67oDcAsN4nW8
|
||||
uls49E2RaiLJYsy5vPsX5aJNcAxw/CWLdadQ3ukviD/MDJbpTl4F52GOVYL6K4XH
|
||||
g5TJjj7xzjmK3ldR/Kscg7HpCitQLGUYdgIsAFdspXf4aSIa68IjDrc5NsJZuMzc
|
||||
PbVHrw7QYNfHY7VNdUlOVqH5lS3BAoHBANRqKrQXtnJmM006TCEJXdcN/5M685jz
|
||||
+L4Ox0Rhrq8ROgcN5q/hjKb6kP/MccQ9voGQOl9TKEyinGNdTtyc/fuH7RNlQwpS
|
||||
HT+vEzVEcrSe8UFs8c6oJnHFO72ylFcibFf56LvbI3L8BZXp7gPSPQkp5f1NWEZk
|
||||
X5bUL4UNiOm0diltba/ofxywF0M9WGD00eqi0Q29JRlvun+355j06CENxRoonNZC
|
||||
wk1evIxhhckP9zLjI2Ykb1hV6yzwPWtmyQKBwQDiVgru/B396KhzDhLl5AL+pBWA
|
||||
GsfiCbmPLh6W6V5VzldB4+GlMRrJ4zSjZQ3/nvX5KepqjMn1N6LQpZQUI/YShCKE
|
||||
mW0XMiAfbp2d23MRMjLD8L/bIoBHQOPkCaMjbmyDOlCagWakEvHJO/TieVgTmYk6
|
||||
mtEYVjJFWI9OCNMAHdl8ovWr3p+8YbVZ8LLv5ZO/V1cIjczoNQ6p8LG/pPMTDLXM
|
||||
ScN9a8z3f8LQLBHBlu0155xvt95PQLAon/x21kUCgcAvPVk36hoiQQZhw3hQ1JNx
|
||||
E2TmanLobkHAiurYE11VA+DC1t2Z+fBc5la+/MnEWfL3P4srzgOlX3imRIcYWzXE
|
||||
7crUyG1ray2kDxyXeRyFfN+srDzut8is/q81lfSVmEs+GY8f0DGHDfN0Dq1nXidC
|
||||
1XWXqs7aANKdaZ0T2xm61+57ciG1wGAckjDqPEdecLQKmaEijBEnIgj5BH5WLwk8
|
||||
6KIQGj4fDIPHzyzhj4LAX3ObdpZVzf6RR7JgsSEHtLkCgcBROW2dDC87MqZY++D+
|
||||
TVBhz8LDgVjgHntQDc3+fGtVQcKAq+YLYU7qyrXWOWrHpGVDcK5mZHYJoVi1peY5
|
||||
QBqL1I2KpoDGxT9P6GN6BgoKTsh3FsvTOVNtvrTJ3keEbJlWkrPgbrXGBeJtRC4C
|
||||
pGdeSUg9FtgY8r4BsuFisLoAHbYyC008y5zpfusVBtNAUlQuY4qhUDoLzxafF/jB
|
||||
/NEasgH/+SzFss0QuPHRwS7yGVaxdJfoY8TNDjrpqVhx0T0CgcEAvKG4UoWvT8gJ
|
||||
pIeeAxxnv9yrMxgpntu4RXPDHgfX5tva6EaM3r3nLXjd9FVtlQ4cNBMhp9HNhS3a
|
||||
dK+oEDcBysVxxfltlS2Bx0+gQf3WxgBCJwayKe3i/XCDza92EENgxTPmqB1LHiq5
|
||||
2b5aOl2Y5fP0eX6UryxRc443c/ejMHw4lGwnno0qpRk9M9Ucqv5J96QCfAlBSQQS
|
||||
gOG9cypL0kBWzCejn9W4av8HkM8Noqd7Tqul1onv/46OBaX51kt3
|
||||
-----END RSA PRIVATE KEY-----
|
||||
...
|
17
spyglass/examples/templates/site-definition.yaml.j2
Normal file
17
spyglass/examples/templates/site-definition.yaml.j2
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
# High-level pegleg site definition file
|
||||
schema: pegleg/SiteDefinition/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
# NEWSITE-CHANGEME: Replace with the site name
|
||||
name: {{ data['region_name'] }}
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
# The type layer this site will delpoy with. Type layer is found in the
|
||||
# aic-clcp-manifests repo.
|
||||
site_type: {{ data['site_info']['sitetype'] }}
|
||||
...
|
||||
|
@ -0,0 +1,96 @@
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes-calico-etcd
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
name: kubernetes-calico-etcd-global
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
substitutions:
|
||||
{% set count = [0] %}
|
||||
{% for rack in data.baremetal.keys() %}
|
||||
{% for host in data["baremetal"][rack] %}
|
||||
{% if data["baremetal"][rack][host]["type"] == 'controller' %}
|
||||
- src:
|
||||
schema: pegleg/CommonAddresses/v1
|
||||
name: common-addresses
|
||||
path: .masters[{{ count[0] }}].hostname
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].name
|
||||
- src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-{{ host }}
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.client.cert
|
||||
- src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-{{ host }}
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.client.key
|
||||
- src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-{{ host }}
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.peer.cert
|
||||
- src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-{{ host }}-peer
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.peer.key
|
||||
|
||||
{% if count.append(count.pop() + 1) %}{% endif %} {# increment count by 1 #}
|
||||
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{% for rack in data.baremetal.keys() %}
|
||||
{% for host in data["baremetal"][rack] %}
|
||||
{% if data["baremetal"][rack][host]["type"] == 'genesis' %}
|
||||
- src:
|
||||
schema: pegleg/CommonAddresses/v1
|
||||
name: common-addresses
|
||||
path: .genesis.hostname
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].name
|
||||
- src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-{{ host }}
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.client.cert
|
||||
- src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-{{ host }}
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.client.key
|
||||
- src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: calico-etcd-{{ host }}-peer
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.peer.cert
|
||||
- src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: calico-etcd-{{ host }}-peer
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.peer.key
|
||||
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
data: {}
|
||||
...
|
@ -0,0 +1,92 @@
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: kubernetes-etcd
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
name: kubernetes-etcd-global
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
substitutions:
|
||||
{% set count = [0] %}
|
||||
{% for rack in data.baremetal.keys() %}
|
||||
{% for host in data["baremetal"][rack] %}
|
||||
{% if data["baremetal"][rack][host]["type"] == 'controller' %}
|
||||
- src:
|
||||
schema: pegleg/CommonAddresses/v1
|
||||
name: common-addresses
|
||||
path: .masters[{{ count[0] }}].hostname
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].name
|
||||
- src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: kubernetes-etcd-{{ host }}
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.client.cert
|
||||
- src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: kubernetes-etcd-{{ host }}
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.client.key
|
||||
- src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: kubernetes-etcd-{{ host }}-peer
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.peer.cert
|
||||
- src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: kubernetes-etcd-{{ host }}-peer
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.peer.key
|
||||
{% if count.append(count.pop() + 1) %}{% endif %} {# increment count by 1 #}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% for rack in data.baremetal.keys() %}
|
||||
{% for host in data["baremetal"][rack] %}
|
||||
{% if data["baremetal"][rack][host]["type"] == 'genesis' %}
|
||||
- src:
|
||||
schema: pegleg/CommonAddresses/v1
|
||||
name: common-addresses
|
||||
path: .genesis.hostname
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].name
|
||||
- src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: kubernetes-etcd-genesis
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.client.cert
|
||||
- src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: kubernetes-etcd-genesis
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.client.key
|
||||
- src:
|
||||
schema: deckhand/Certificate/v1
|
||||
name: kubernetes-etcd-genesis-peer
|
||||
path: .
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.peer.cert
|
||||
- src:
|
||||
schema: deckhand/CertificateKey/v1
|
||||
name: kubernetes-etcd-genesis-peer
|
||||
path: $
|
||||
dest:
|
||||
path: .values.nodes[{{ count[0] }}].tls.peer.key
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
data: {}
|
||||
...
|
@ -0,0 +1,28 @@
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: ingress-kube-system
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
ingress: kube-system
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
substitutions:
|
||||
- src:
|
||||
schema: pegleg/CommonAddresses/v1
|
||||
name: common-addresses
|
||||
path: .ksn.bgp.ipv4.ingress_vip
|
||||
dest:
|
||||
path: .values.network.vip.addr
|
||||
data:
|
||||
values:
|
||||
network:
|
||||
vip:
|
||||
manage: true
|
||||
interface: ingress0
|
||||
...
|
@ -0,0 +1,16 @@
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: elasticsearch
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
hosttype: elasticsearch-global
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
data: {}
|
||||
...
|
@ -0,0 +1,16 @@
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: fluent-logging
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
hosttype: fluent-logging-global
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
data: {}
|
||||
...
|
@ -0,0 +1,23 @@
|
||||
---
|
||||
# This file defines hardware-specific settings for neutron. If you use the same
|
||||
# hardware profile as this environment, you should not need to change this file.
|
||||
# Otherwise, you should review the settings here and adjust for your hardware.
|
||||
# In particular:
|
||||
# 1. logical network interface names
|
||||
# 2. physical device mappigns
|
||||
# TODO: Should move to global layer and become tied to the hardware profile
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: neutron-fixme
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
name: neutron-global
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
data: {}
|
||||
...
|
@ -0,0 +1,25 @@
|
||||
---
|
||||
# This file defines hardware-specific settings for nova. If you use the same
|
||||
# hardware profile as this environment, you should not need to change this file.
|
||||
# Otherwise, you should review the settings here and adjust for your hardware.
|
||||
# In particular:
|
||||
# 1. vcpu_pin_set will change if the number of logical CPUs on the hardware
|
||||
# changes.
|
||||
# 2. pci alias / passthrough_whitelist could change if the NIC type or NIC
|
||||
# slotting changes.
|
||||
# TODO: Should move to global layer and become tied to the hardware profile
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: nova
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
name: nova-global
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
data: {}
|
||||
...
|
@ -0,0 +1,22 @@
|
||||
---
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: tenant-ceph-client
|
||||
replacement: true
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
name: tenant-ceph-client-nc
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
values:
|
||||
conf:
|
||||
pool:
|
||||
target:
|
||||
osd: {{ data['storage']['ceph']['controller']['osd_count'] }}
|
||||
...
|
@ -0,0 +1,55 @@
|
||||
---
|
||||
# The purpose of this file is to define environment-specific parameters for
|
||||
# ceph-osd
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: tenant-ceph-osd
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
name: tenant-ceph-osd-global
|
||||
actions:
|
||||
- method: replace
|
||||
path: .values.conf.storage.osd
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
values:
|
||||
labels:
|
||||
osd:
|
||||
node_selector_key: tenant-ceph-osd
|
||||
node_selector_value: enabled
|
||||
conf:
|
||||
storage:
|
||||
# NEWSITE-CHANGEME: The OSD count and configuration here should not need
|
||||
# to change if your HW matches the HW used in this environment.
|
||||
# Otherwise you may need to add or subtract disks to this list.
|
||||
osd:
|
||||
- data:
|
||||
type: block-logical
|
||||
location: /dev/sde
|
||||
journal:
|
||||
type: block-logical
|
||||
location: /dev/sdb1
|
||||
- data:
|
||||
type: block-logical
|
||||
location: /dev/sdf
|
||||
journal:
|
||||
type: block-logical
|
||||
location: /dev/sdb2
|
||||
- data:
|
||||
type: block-logical
|
||||
location: /dev/sdg
|
||||
journal:
|
||||
type: block-logical
|
||||
location: /dev/sdc1
|
||||
- data:
|
||||
type: block-logical
|
||||
location: /dev/sdh
|
||||
journal:
|
||||
type: block-logical
|
||||
location: /dev/sdc2
|
||||
...
|
@ -0,0 +1,603 @@
|
||||
---
|
||||
# The purpose of this file is to define site-specific parameters to the
|
||||
# UAM-lite portion of the divingbell chart:
|
||||
# 1. User accounts to create on bare metal
|
||||
# 2. SSH public key for operationg system access to the bare metal
|
||||
# 3. Passwords for operating system access via iDrac/iLo console. SSH password-
|
||||
# based auth is disabled.
|
||||
schema: armada/Chart/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: ucp-divingbell
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
parentSelector:
|
||||
name: ucp-divingbell-global
|
||||
actions:
|
||||
- method: merge
|
||||
path: .
|
||||
storagePolicy: cleartext
|
||||
substitutions:
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[0].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: jenkins_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[0].user_crypt_passwd
|
||||
src:
|
||||
schema: deckhand/Passphrase/v1
|
||||
name: ubuntu_crypt_password
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[1].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: am240k_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[1].user_crypt_passwd
|
||||
src:
|
||||
schema: deckhand/Passphrase/v1
|
||||
name: am240k_crypt_password
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[2].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: dd118r_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[2].user_crypt_passwd
|
||||
src:
|
||||
schema: deckhand/Passphrase/v1
|
||||
name: dd118r_crypt_password
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[3].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ks3019_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[3].user_crypt_passwd
|
||||
src:
|
||||
schema: deckhand/Passphrase/v1
|
||||
name: ks3019_crypt_password
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[4].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: pb269f_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[4].user_crypt_passwd
|
||||
src:
|
||||
schema: deckhand/Passphrase/v1
|
||||
name: pb269f_crypt_password
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[5].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: sf5715_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[5].user_crypt_passwd
|
||||
src:
|
||||
schema: deckhand/Passphrase/v1
|
||||
name: sf5715_crypt_password
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[6].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ds6901_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[7].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: jenkins_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[8].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: kr336r_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[9].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ol7435_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[10].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: mm9745_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[11].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: sm108f_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[12].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: bp4242_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[13].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: sm028h_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[14].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: go4243_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[15].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: vg763v_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[16].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: jb654e_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[17].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: dy270k_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[18].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: dn5242_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[19].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: aw4825_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[20].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ak2685_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[21].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: sd592v_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[22].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: dl2017_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[23].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: kv289f_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[24].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: kp648d_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[25].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: mw145n_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[26].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: mm4762_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[27].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: mk721p_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[28].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ns707e_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[29].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: pk5294_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[30].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: rg9968_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[31].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: rk646r_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[32].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: sy825r_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[33].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: bj2343_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[34].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: bd006h_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[35].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: nw288p_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[36].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ja5277_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[37].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ds9665_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[38].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: vp8178_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[39].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ag073n_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[40].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ab4543_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[41].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ar074s_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[42].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: bg809e_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[43].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: sq074s_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[44].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: rg4437_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[45].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: gb458m_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[46].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ss254n_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[47].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: sd069y_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[48].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: sk559p_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[49].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: sp781b_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[50].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: sc842d_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[51].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ml844w_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[52].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: as487m_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[53].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ms8227_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[54].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ag073n_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[55].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: vv8334_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[56].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ar074s_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[57].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: sy336r_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[58].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: sk562s_ssh_public_key
|
||||
path: .
|
||||
- dest:
|
||||
path: .values.conf.uamlite.users[59].user_sshkeys[0]
|
||||
src:
|
||||
schema: deckhand/PublicKey/v1
|
||||
name: ag878f_ssh_public_key
|
||||
path: .
|
||||
data:
|
||||
values:
|
||||
conf:
|
||||
uamlite:
|
||||
users:
|
||||
- user_name: ubuntu
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: am240k
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: dd118r
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ks3019
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: pb269f
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: sf5715
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ds6901
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: jenkins
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: kr336r
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ol7435
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: mm9745
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: sm108f
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: bp4242
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: sm028h
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: go4243
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: vg763v
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: jb654e
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: dy270k
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: dn5242
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: aw4825
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ak2685
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: sd592v
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: dl2017
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: kv289f
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: kp648d
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: mw145n
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: mm4762
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: mk721p
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ns707e
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: pk5294
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: rg9968
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: rk646r
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: sy825r
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: bj2343
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: bd006h
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: nw288p
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ja5277
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ds9665
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: vp8178
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ag073n
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ab4543
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ar074s
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: bg809e
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: sq074s
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: rg4437
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: gb458m
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ss254n
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: sd069n
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: sk559p
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: sp781b
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: sc842d
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ml844w
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: as487m
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ms8227
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ag073n
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: vv8334
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ar074s
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: sy336r
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: sk562s
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
- user_name: ag878f
|
||||
user_sudo: true
|
||||
user_sshkeys: []
|
||||
...
|
@ -0,0 +1,16 @@
|
||||
---
|
||||
# The purpose of this file is to define site-specific common software config
|
||||
# paramters.
|
||||
# #SITE-PROMOTION-CANDIDATE# would require subsitutions out of common-addresses
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: common-software-config
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
osh:
|
||||
# NEWSITE-CHANGEME: Replace with the site name
|
||||
region_name: {{ data['region_name'] }}
|
1312
spyglass/examples/templates/software/config/endpoints.yaml.j2
Normal file
1312
spyglass/examples/templates/software/config/endpoints.yaml.j2
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,443 @@
|
||||
---
|
||||
# The purpose of this file is to define the account catalog for the site. This
|
||||
# mostly contains service usernames, but also contain some information which
|
||||
# should be changed like the region (site) name.
|
||||
schema: pegleg/AccountCatalogue/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: ucp_service_accounts
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
data:
|
||||
ucp:
|
||||
postgres:
|
||||
admin:
|
||||
username: postgres
|
||||
oslo_db:
|
||||
admin:
|
||||
username: root
|
||||
oslo_messaging:
|
||||
admin:
|
||||
username: rabbitmq
|
||||
keystone:
|
||||
admin:
|
||||
# NEWSITE-CHANGEME: Replace with the site name
|
||||
region_name: RegionOne
|
||||
username: admin
|
||||
project_name: admin
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
oslo_messaging:
|
||||
admin:
|
||||
username: rabbitmq
|
||||
keystone:
|
||||
username: keystone
|
||||
oslo_db:
|
||||
username: keystone
|
||||
database: keystone
|
||||
promenade:
|
||||
keystone:
|
||||
# NEWSITE-CHANGEME: Replace with the site name
|
||||
region_name: RegionOne
|
||||
role: admin
|
||||
project_name: service
|
||||
project_domain_name: default
|
||||
user_domain_name: default
|
||||
username: promenade
|
||||
drydock:
|
||||
keystone:
|
||||
# NEWSITE-CHANGEME: Replace with the site name
|
||||
region_name: RegionOne
|
||||
role: admin
|
||||
project_name: service
|
||||
project_domain_name: default
|
||||
user_domain_name: default
|
||||
username: drydock
|
||||
postgres:
|
||||
username: drydock
|
||||
database: drydock
|
||||
shipyard:
|
||||
keystone:
|
||||
# NEWSITE-CHANGEME: Replace with the site name
|
||||
region_name: RegionOne
|
||||
role: admin
|
||||
project_name: service
|
||||
project_domain_name: default
|
||||
user_domain_name: default
|
||||
username: shipyard
|
||||
postgres:
|
||||
username: shipyard
|
||||
database: shipyard
|
||||
airflow:
|
||||
postgres:
|
||||
username: airflow
|
||||
database: airflow
|
||||
oslo_messaging:
|
||||
username: rabbitmq
|
||||
maas:
|
||||
admin:
|
||||
username: admin
|
||||
email: none@none
|
||||
postgres:
|
||||
username: maas
|
||||
database: maasdb
|
||||
barbican:
|
||||
keystone:
|
||||
# NEWSITE-CHANGEME: Replace with the site name
|
||||
region_name: RegionOne
|
||||
role: admin
|
||||
project_name: service
|
||||
project_domain_name: default
|
||||
user_domain_name: default
|
||||
username: barbican
|
||||
oslo_db:
|
||||
username: barbican
|
||||
database: barbican
|
||||
oslo_messaging:
|
||||
admin:
|
||||
username: rabbitmq
|
||||
keystone:
|
||||
username: keystone
|
||||
armada:
|
||||
keystone:
|
||||
project_domain_name: default
|
||||
user_domain_name: default
|
||||
project_name: service
|
||||
# NEWSITE-CHANGEME: Replace with the site name
|
||||
region_name: RegionOne
|
||||
role: admin
|
||||
user_domain_name: default
|
||||
username: armada
|
||||
deckhand:
|
||||
keystone:
|
||||
# NEWSITE-CHANGEME: Replace with the site name
|
||||
region_name: RegionOne
|
||||
role: admin
|
||||
project_name: service
|
||||
project_domain_name: default
|
||||
user_domain_name: default
|
||||
username: deckhand
|
||||
postgres:
|
||||
username: deckhand
|
||||
database: deckhand
|
||||
prometheus_openstack_exporter:
|
||||
user:
|
||||
region_name: RegionOne
|
||||
role: admin
|
||||
username: prometheus-openstack-exporter
|
||||
project_name: service
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
ceph:
|
||||
swift:
|
||||
keystone:
|
||||
role: admin
|
||||
# NEWSITE-CHANGEME: Replace with the site name
|
||||
region_name: RegionOne
|
||||
username: swift
|
||||
project_name: service
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
...
|
||||
---
|
||||
schema: pegleg/AccountCatalogue/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: osh_service_accounts
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
substitutions:
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh.keystone.admin.region_name
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh.cinder.cinder.region_name
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh.glance.glance.region_name
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh.heat.heat.region_name
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh.heat.heat_trustee.region_name
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh.heat.heat_stack_user.region_name
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh.swift.keystone.region_name
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh.neutron.neutron.region_name
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh.nova.nova.region_name
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh.nova.placement.region_name
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh.barbican.barbican.region_name
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh.barbican.barbican.region_name
|
||||
data:
|
||||
osh:
|
||||
keystone:
|
||||
admin:
|
||||
username: admin
|
||||
project_name: admin
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
oslo_db:
|
||||
username: keystone
|
||||
database: keystone
|
||||
oslo_messaging:
|
||||
admin:
|
||||
username: keystone-rabbitmq-admin
|
||||
keystone:
|
||||
username: keystone-rabbitmq-user
|
||||
ldap:
|
||||
# NEWSITE-CHANGEME: Replace with the site's LDAP account used to
|
||||
# authenticate to the active directory backend to validate keystone
|
||||
# users.
|
||||
username: "test@ldap.example.com"
|
||||
cinder:
|
||||
cinder:
|
||||
role: admin
|
||||
username: cinder
|
||||
project_name: service
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
oslo_db:
|
||||
username: cinder
|
||||
database: cinder
|
||||
oslo_messaging:
|
||||
admin:
|
||||
username: cinder-rabbitmq-admin
|
||||
cinder:
|
||||
username: cinder-rabbitmq-user
|
||||
glance:
|
||||
glance:
|
||||
role: admin
|
||||
username: glance
|
||||
project_name: service
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
oslo_db:
|
||||
username: glance
|
||||
database: glance
|
||||
oslo_messaging:
|
||||
admin:
|
||||
username: glance-rabbitmq-admin
|
||||
glance:
|
||||
username: glance-rabbitmq-user
|
||||
ceph_object_store:
|
||||
username: glance
|
||||
heat:
|
||||
heat:
|
||||
role: admin
|
||||
username: heat
|
||||
project_name: service
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
heat_trustee:
|
||||
role: admin
|
||||
username: heat-trust
|
||||
project_name: service
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
heat_stack_user:
|
||||
role: admin
|
||||
username: heat-domain
|
||||
domain_name: heat
|
||||
oslo_db:
|
||||
username: heat
|
||||
database: heat
|
||||
oslo_messaging:
|
||||
admin:
|
||||
username: heat-rabbitmq-admin
|
||||
heat:
|
||||
username: heat-rabbitmq-user
|
||||
swift:
|
||||
keystone:
|
||||
role: admin
|
||||
username: swift
|
||||
project_name: service
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
oslo_db:
|
||||
admin:
|
||||
username: root
|
||||
prometheus_mysql_exporter:
|
||||
user:
|
||||
username: osh-oslodb-exporter
|
||||
neutron:
|
||||
neutron:
|
||||
role: admin
|
||||
username: neutron
|
||||
project_name: service
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
oslo_db:
|
||||
username: neutron
|
||||
database: neutron
|
||||
oslo_messaging:
|
||||
admin:
|
||||
username: neutron-rabbitmq-admin
|
||||
neutron:
|
||||
username: neutron-rabbitmq-user
|
||||
nova:
|
||||
nova:
|
||||
role: admin
|
||||
username: nova
|
||||
project_name: service
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
placement:
|
||||
role: admin
|
||||
username: placement
|
||||
project_name: service
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
oslo_db:
|
||||
username: nova
|
||||
database: nova
|
||||
oslo_db_api:
|
||||
username: nova
|
||||
database: nova_api
|
||||
oslo_db_cell0:
|
||||
username: nova
|
||||
database: "nova_cell0"
|
||||
oslo_messaging:
|
||||
admin:
|
||||
username: nova-rabbitmq-admin
|
||||
nova:
|
||||
username: nova-rabbitmq-user
|
||||
horizon:
|
||||
oslo_db:
|
||||
username: horizon
|
||||
database: horizon
|
||||
barbican:
|
||||
barbican:
|
||||
role: admin
|
||||
username: barbican
|
||||
project_name: service
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
oslo_db:
|
||||
username: barbican
|
||||
database: barbican
|
||||
oslo_messaging:
|
||||
admin:
|
||||
username: barbican-rabbitmq-admin
|
||||
barbican:
|
||||
username: barbican-rabbitmq-user
|
||||
...
|
||||
---
|
||||
schema: pegleg/AccountCatalogue/v1
|
||||
metadata:
|
||||
schema: metadata/Document/v1
|
||||
name: osh_infra_service_accounts
|
||||
layeringDefinition:
|
||||
abstract: false
|
||||
layer: site
|
||||
storagePolicy: cleartext
|
||||
substitutions:
|
||||
- src:
|
||||
schema: pegleg/CommonSoftwareConfig/v1
|
||||
name: common-software-config
|
||||
path: .osh.region_name
|
||||
dest:
|
||||
path: .osh_infra.prometheus_openstack_exporter.user.region_name
|
||||
data:
|
||||
osh_infra:
|
||||
ceph_object_store:
|
||||
admin:
|
||||
username: s3_admin
|
||||
elasticsearch:
|
||||
username: elasticsearch
|
||||
grafana:
|
||||
admin:
|
||||
username: grafana
|
||||
oslo_db:
|
||||
username: grafana
|
||||
database: grafana
|
||||
oslo_db_session:
|
||||
username: grafana_session
|
||||
database: grafana_session
|
||||
elasticsearch:
|
||||
admin:
|
||||
username: elasticsearch
|
||||
kibana:
|
||||
admin:
|
||||
username: kibana
|
||||
oslo_db:
|
||||
admin:
|
||||
username: root
|
||||
prometheus_mysql_exporter:
|
||||
user:
|
||||
username: osh-infra-oslodb-exporter
|
||||
prometheus_openstack_exporter:
|
||||
user:
|
||||
role: admin
|
||||
username: prometheus-openstack-exporter
|
||||
project_name: service
|
||||
user_domain_name: default
|
||||
project_domain_name: default
|
||||
nagios:
|
||||
admin:
|
||||
username: nagios
|
||||
prometheus:
|
||||
admin:
|
||||
username: prometheus
|
||||
ldap:
|
||||
admin:
|
||||
# NEWSITE-CHANGEME: Replace with the site's LDAP account used to
|
||||
# authenticate to the active directory backend to validate keystone
|
||||
# users.
|
||||
bind: "test@ldap.example.com"
|
||||
...
|
||||
|
@ -18,7 +18,6 @@ import logging
|
||||
import pkg_resources
|
||||
import pprint
|
||||
import sys
|
||||
|
||||
import jsonschema
|
||||
import netaddr
|
||||
import yaml
|
||||
@ -50,9 +49,14 @@ class ProcessDataSource():
|
||||
self.sitetype = None
|
||||
self.genesis_node = None
|
||||
self.region_name = None
|
||||
self.network_subnets = None
|
||||
|
||||
def _get_network_subnets(self):
|
||||
# Extract subnet information for networks
|
||||
""" Extract subnet information for networks.
|
||||
|
||||
|
||||
In some networks, there are multiple subnets, in that case
|
||||
we assign only the first subnet """
|
||||
LOG.info("Extracting network subnets")
|
||||
network_subnets = {}
|
||||
for net_type in self.data['network']['vlan_network_data']:
|
||||
@ -60,37 +64,35 @@ class ProcessDataSource():
|
||||
if (net_type != 'ingress'):
|
||||
network_subnets[net_type] = netaddr.IPNetwork(
|
||||
self.data['network']['vlan_network_data'][net_type]
|
||||
['subnet'])
|
||||
['subnet'][0])
|
||||
|
||||
LOG.debug("Network subnets:\n{}".format(
|
||||
pprint.pformat(network_subnets)))
|
||||
return network_subnets
|
||||
|
||||
def _get_genesis_node_details(self):
|
||||
# Returns the genesis node details
|
||||
LOG.info("Getting Genesis Node Details")
|
||||
# Get genesis host node details from the hosts based on host type
|
||||
for racks in self.data['baremetal'].keys():
|
||||
rack_hosts = self.data['baremetal'][racks]
|
||||
for host in rack_hosts:
|
||||
if rack_hosts[host]['type'] == 'genesis':
|
||||
self.genesis_node = rack_hosts[host]
|
||||
self.genesis_node['name'] = host
|
||||
|
||||
LOG.debug("Genesis Node Details:{}".format(
|
||||
LOG.debug("Genesis Node Details:\n{}".format(
|
||||
pprint.pformat(self.genesis_node)))
|
||||
|
||||
def _validate_extracted_data(self, data):
|
||||
""" Validates the extracted data from input source.
|
||||
def _validate_intermediary_data(self, data):
|
||||
""" Validates the intermediary data before generating manifests.
|
||||
|
||||
|
||||
It checks wether the data types and data format are as expected.
|
||||
The method validates this with regex pattern defined for each
|
||||
data type.
|
||||
"""
|
||||
LOG.info('Validating data read from extracted data')
|
||||
LOG.info('Validating Intermediary data')
|
||||
temp_data = {}
|
||||
# Peforming a deep copy
|
||||
temp_data = copy.deepcopy(data)
|
||||
|
||||
# Converting baremetal dict to list.
|
||||
baremetal_list = []
|
||||
for rack in temp_data['baremetal'].keys():
|
||||
@ -103,7 +105,6 @@ class ProcessDataSource():
|
||||
json_data = json.loads(json.dumps(temp_data))
|
||||
with open(schema_file, 'r') as f:
|
||||
json_schema = json.load(f)
|
||||
|
||||
try:
|
||||
# Suppressing writing of data2.json. Can use it for debugging
|
||||
with open('data2.json', 'w') as outfile:
|
||||
@ -143,7 +144,6 @@ class ProcessDataSource():
|
||||
rules_yaml = yaml.safe_load(rules_data_raw)
|
||||
rules_data = {}
|
||||
rules_data.update(rules_yaml)
|
||||
|
||||
for rule in rules_data.keys():
|
||||
rule_name = rules_data[rule]['name']
|
||||
function_str = "_apply_rule_" + rule_name
|
||||
@ -153,12 +153,75 @@ class ProcessDataSource():
|
||||
LOG.info("Applying rule:{}".format(rule_name))
|
||||
|
||||
def _apply_rule_host_profile_interfaces(self, rule_data):
|
||||
# TODO(pg710r)Nothing to do as of now since host profile
|
||||
# information is already present in plugin data.
|
||||
# This function shall be defined if plugin data source
|
||||
# doesn't provide host profile information.
|
||||
pass
|
||||
|
||||
def _apply_rule_hardware_profile(self, rule_data):
|
||||
pass
|
||||
""" Apply rules to define host type from hardware profile info.
|
||||
|
||||
|
||||
Host profile will define host types as "controller, compute or
|
||||
genesis". The rule_data has pre-defined information to define
|
||||
compute or controller based on host_profile. For defining 'genesis'
|
||||
the first controller host is defined as genesis."""
|
||||
is_genesis = False
|
||||
hardware_profile = rule_data[self.data['site_info']['sitetype']]
|
||||
# Getting individual racks. The racks are sorted to ensure that the
|
||||
# first controller of the first rack is assigned as 'genesis' node.
|
||||
for rack in sorted(self.data['baremetal'].keys()):
|
||||
# Getting individual hosts in each rack. Sorting of the hosts are
|
||||
# done to determine the genesis node.
|
||||
for host in sorted(self.data['baremetal'][rack].keys()):
|
||||
host_info = self.data['baremetal'][rack][host]
|
||||
if (host_info['host_profile'] == hardware_profile[
|
||||
'profile_name']['ctrl']):
|
||||
if not is_genesis:
|
||||
host_info['type'] = 'genesis'
|
||||
is_genesis = True
|
||||
else:
|
||||
host_info['type'] = 'controller'
|
||||
else:
|
||||
host_info['type'] = 'compute'
|
||||
|
||||
def _apply_rule_ip_alloc_offset(self, rule_data):
|
||||
""" Apply offset rules to update baremetal host ip's and vlan network
|
||||
data """
|
||||
|
||||
# Get network subnets
|
||||
self.network_subnets = self._get_network_subnets()
|
||||
|
||||
self._update_vlan_net_data(rule_data)
|
||||
self._update_baremetal_host_ip_data(rule_data)
|
||||
|
||||
def _update_baremetal_host_ip_data(self, rule_data):
|
||||
""" Update baremetal host ip's for applicable networks.
|
||||
|
||||
|
||||
The applicable networks are oob, oam, ksn, storage and overlay.
|
||||
These IPs are assigned based on network subnets ranges.
|
||||
If a particular ip exists it is overridden."""
|
||||
|
||||
# Ger defult ip offset
|
||||
default_ip_offset = rule_data['default']
|
||||
|
||||
host_idx = 0
|
||||
LOG.info("Update baremetal host ip's")
|
||||
for racks in self.data['baremetal'].keys():
|
||||
rack_hosts = self.data['baremetal'][racks]
|
||||
for host in rack_hosts:
|
||||
host_networks = rack_hosts[host]['ip']
|
||||
for net in host_networks:
|
||||
ips = list(self.network_subnets[net])
|
||||
host_networks[net] = str(ips[host_idx + default_ip_offset])
|
||||
host_idx = host_idx + 1
|
||||
|
||||
LOG.debug("Updated baremetal host:\n{}".format(
|
||||
pprint.pformat(self.data['baremetal'])))
|
||||
|
||||
def _update_vlan_net_data(self, rule_data):
|
||||
""" Offset allocation rules to determine ip address range(s)
|
||||
|
||||
|
||||
@ -166,7 +229,6 @@ class ProcessDataSource():
|
||||
network address, gateway ip and other address ranges
|
||||
"""
|
||||
LOG.info("Apply network design rules")
|
||||
vlan_network_data = {}
|
||||
|
||||
# Collect Rules
|
||||
default_ip_offset = rule_data['default']
|
||||
@ -179,7 +241,7 @@ class ProcessDataSource():
|
||||
dhcp_ip_end_offset = rule_data['dhcp_ip_end']
|
||||
|
||||
# Set ingress vip and CIDR for bgp
|
||||
LOG.info("Applying rule to network bgp data")
|
||||
LOG.info("Apply network design rules:bgp")
|
||||
subnet = netaddr.IPNetwork(
|
||||
self.data['network']['vlan_network_data']['ingress']['subnet'][0])
|
||||
ips = list(subnet)
|
||||
@ -190,27 +252,24 @@ class ProcessDataSource():
|
||||
LOG.debug("Updated network bgp data:\n{}".format(
|
||||
pprint.pformat(self.data['network']['bgp'])))
|
||||
|
||||
LOG.info("Applying rule to vlan network data")
|
||||
# Get network subnets
|
||||
network_subnets = self._get_network_subnets()
|
||||
LOG.info("Apply network design rules:vlan")
|
||||
# Apply rules to vlan networks
|
||||
for net_type in network_subnets:
|
||||
for net_type in self.network_subnets:
|
||||
if net_type == 'oob':
|
||||
ip_offset = oob_ip_offset
|
||||
else:
|
||||
ip_offset = default_ip_offset
|
||||
vlan_network_data[net_type] = {}
|
||||
subnet = network_subnets[net_type]
|
||||
|
||||
subnet = self.network_subnets[net_type]
|
||||
ips = list(subnet)
|
||||
|
||||
vlan_network_data[net_type]['network'] = str(
|
||||
network_subnets[net_type])
|
||||
self.data['network']['vlan_network_data'][net_type][
|
||||
'gateway'] = str(ips[gateway_ip_offset])
|
||||
|
||||
vlan_network_data[net_type]['gateway'] = str(
|
||||
ips[gateway_ip_offset])
|
||||
|
||||
vlan_network_data[net_type]['reserved_start'] = str(ips[1])
|
||||
vlan_network_data[net_type]['reserved_end'] = str(ips[ip_offset])
|
||||
self.data['network']['vlan_network_data'][net_type][
|
||||
'reserved_start'] = str(ips[1])
|
||||
self.data['network']['vlan_network_data'][net_type][
|
||||
'reserved_end'] = str(ips[ip_offset])
|
||||
|
||||
static_start = str(ips[ip_offset + 1])
|
||||
static_end = str(ips[static_ip_end_offset])
|
||||
@ -221,69 +280,78 @@ class ProcessDataSource():
|
||||
dhcp_start = str(ips[mid])
|
||||
dhcp_end = str(ips[dhcp_ip_end_offset])
|
||||
|
||||
vlan_network_data[net_type]['dhcp_start'] = dhcp_start
|
||||
vlan_network_data[net_type]['dhcp_end'] = dhcp_end
|
||||
self.data['network']['vlan_network_data'][net_type][
|
||||
'dhcp_start'] = dhcp_start
|
||||
self.data['network']['vlan_network_data'][net_type][
|
||||
'dhcp_end'] = dhcp_end
|
||||
|
||||
vlan_network_data[net_type]['static_start'] = static_start
|
||||
vlan_network_data[net_type]['static_end'] = static_end
|
||||
self.data['network']['vlan_network_data'][net_type][
|
||||
'static_start'] = static_start
|
||||
self.data['network']['vlan_network_data'][net_type][
|
||||
'static_end'] = static_end
|
||||
|
||||
# There is no vlan for oob network
|
||||
if (net_type != 'oob'):
|
||||
vlan_network_data[net_type]['vlan'] = self.data['network'][
|
||||
'vlan_network_data'][net_type]['vlan']
|
||||
self.data['network']['vlan_network_data'][net_type][
|
||||
'vlan'] = self.data['network']['vlan_network_data'][
|
||||
net_type]['vlan']
|
||||
|
||||
# OAM have default routes. Only for cruiser. TBD
|
||||
if (net_type == 'oam'):
|
||||
routes = ["0.0.0.0/0"]
|
||||
else:
|
||||
routes = []
|
||||
vlan_network_data[net_type]['routes'] = routes
|
||||
|
||||
# Update network data to self.data
|
||||
self.data['network']['vlan_network_data'][
|
||||
net_type] = vlan_network_data[net_type]
|
||||
self.data['network']['vlan_network_data'][net_type][
|
||||
'routes'] = routes
|
||||
|
||||
LOG.debug("Updated vlan network data:\n{}".format(
|
||||
pprint.pformat(vlan_network_data)))
|
||||
pprint.pformat(self.data['network']['vlan_network_data'])))
|
||||
|
||||
def load_extracted_data_from_data_source(self, extracted_data):
|
||||
"""
|
||||
Function called from spyglass.py to pass extracted data
|
||||
from input data source
|
||||
"""
|
||||
LOG.info("Load extracted data from data source")
|
||||
self._validate_extracted_data(extracted_data)
|
||||
# TBR(pg710r): for internal testing
|
||||
"""
|
||||
raw_data = self._read_file('extracted_data.yaml')
|
||||
extracted_data = yaml.safe_load(raw_data)
|
||||
"""
|
||||
|
||||
LOG.info("Loading plugin data source")
|
||||
self.data = extracted_data
|
||||
LOG.debug("Extracted data from plugin data source:\n{}".format(
|
||||
LOG.debug("Extracted data from plugin:\n{}".format(
|
||||
pprint.pformat(extracted_data)))
|
||||
extracted_file = "extracted_file.yaml"
|
||||
yaml_file = yaml.dump(extracted_data, default_flow_style=False)
|
||||
with open(extracted_file, 'w') as f:
|
||||
f.write(yaml_file)
|
||||
f.close()
|
||||
|
||||
# Append region_data supplied from CLI to self.data
|
||||
self.data['region_name'] = self.region_name
|
||||
|
||||
def dump_intermediary_file(self, intermediary_dir):
|
||||
""" Dumping intermediary yaml """
|
||||
LOG.info("Dumping intermediary yaml")
|
||||
""" Writing intermediary yaml """
|
||||
LOG.info("Writing intermediary yaml")
|
||||
intermediary_file = "{}_intermediary.yaml".format(
|
||||
self.data['region_name'])
|
||||
|
||||
# Check of if output dir = intermediary_dir exists
|
||||
if intermediary_dir is not None:
|
||||
outfile = "{}/{}".format(intermediary_dir, intermediary_file)
|
||||
else:
|
||||
outfile = intermediary_file
|
||||
LOG.info("Intermediary file dir:{}".format(outfile))
|
||||
LOG.info("Intermediary file:{}".format(outfile))
|
||||
yaml_file = yaml.dump(self.data, default_flow_style=False)
|
||||
with open(outfile, 'w') as f:
|
||||
f.write(yaml_file)
|
||||
f.close()
|
||||
|
||||
def generate_intermediary_yaml(self):
|
||||
""" Generating intermediary yaml """
|
||||
LOG.info("Generating intermediary yaml")
|
||||
LOG.info("Start: Generate Intermediary")
|
||||
self._apply_design_rules()
|
||||
self._get_genesis_node_details()
|
||||
self._validate_intermediary_data(self.data)
|
||||
self.intermediary_yaml = self.data
|
||||
return self.intermediary_yaml
|
||||
|
@ -13,7 +13,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import pkg_resources
|
||||
import os
|
||||
from jinja2 import Environment
|
||||
from jinja2 import FileSystemLoader
|
||||
@ -27,7 +26,7 @@ class SiteProcessor(BaseProcessor):
|
||||
self.yaml_data = intermediary_yaml
|
||||
self.manifest_dir = manifest_dir
|
||||
|
||||
def render_template(self):
|
||||
def render_template(self, template_dir):
|
||||
""" The method renders network config yaml from j2 templates.
|
||||
|
||||
|
||||
@ -42,8 +41,7 @@ class SiteProcessor(BaseProcessor):
|
||||
site_manifest_dir = 'pegleg_manifests/site/'
|
||||
LOG.info("Site manifest output dir:{}".format(site_manifest_dir))
|
||||
|
||||
template_software_dir = pkg_resources.resource_filename(
|
||||
'spyglass', 'templates/')
|
||||
template_software_dir = template_dir + '/'
|
||||
template_dir_abspath = os.path.dirname(template_software_dir)
|
||||
LOG.debug("Template Path:%s", template_dir_abspath)
|
||||
|
||||
|
@ -67,6 +67,23 @@ LOG = logging.getLogger('spyglass')
|
||||
'-mdir',
|
||||
type=click.Path(exists=True),
|
||||
help='The path where manifest files needs to be generated')
|
||||
@click.option(
|
||||
'--template_dir',
|
||||
'-tdir',
|
||||
type=click.Path(exists=True),
|
||||
help='The path where J2 templates are available')
|
||||
@click.option(
|
||||
'--excel',
|
||||
'-x',
|
||||
multiple=True,
|
||||
type=click.Path(exists=True),
|
||||
help=
|
||||
'Path to engineering excel file, to be passed with generate_intermediary')
|
||||
@click.option(
|
||||
'--excel_spec',
|
||||
'-e',
|
||||
type=click.Path(exists=True),
|
||||
help='Path to excel spec, to be passed with generate_intermediary')
|
||||
@click.option(
|
||||
'--loglevel',
|
||||
'-l',
|
||||
@ -83,6 +100,7 @@ def main(*args, **kwargs):
|
||||
manifest_dir = kwargs['manifest_dir']
|
||||
intermediary = kwargs['intermediary']
|
||||
site = kwargs['site']
|
||||
template_dir = kwargs['template_dir']
|
||||
loglevel = kwargs['loglevel']
|
||||
|
||||
# Set Logging format
|
||||
@ -94,7 +112,7 @@ def main(*args, **kwargs):
|
||||
LOG.addHandler(stream_handle)
|
||||
|
||||
LOG.info("Spyglass start")
|
||||
LOG.debug("CLI Parameters passed:\n{}".format(kwargs))
|
||||
LOG.info("CLI Parameters passed:\n{}".format(kwargs))
|
||||
|
||||
if not (generate_intermediary or generate_manifests):
|
||||
LOG.error("Invalid CLI parameters passed!! Spyglass exited")
|
||||
@ -102,6 +120,14 @@ def main(*args, **kwargs):
|
||||
LOG.info("CLI Parameters:\n{}".format(kwargs))
|
||||
exit()
|
||||
|
||||
if generate_manifests:
|
||||
if template_dir is None:
|
||||
LOG.error("Template directory not specified!! Spyglass exited")
|
||||
LOG.error(
|
||||
"It is mandatory to provide it when generate_manifests is true"
|
||||
)
|
||||
exit()
|
||||
|
||||
# Generate Intermediary yaml and manifests extracting data
|
||||
# from data source specified by plugin type
|
||||
intermediary_yaml = {}
|
||||
@ -163,7 +189,7 @@ def main(*args, **kwargs):
|
||||
if generate_manifests:
|
||||
LOG.info("Generating site Manifests")
|
||||
processor_engine = SiteProcessor(intermediary_yaml, manifest_dir)
|
||||
processor_engine.render_template()
|
||||
processor_engine.render_template(template_dir)
|
||||
|
||||
LOG.info("Spyglass Execution Completed")
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user