From 72ab1a5d1296a746fef486a5159b126d784eff6c Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 9 Nov 2015 11:31:30 +0200 Subject: [PATCH 01/51] Use config.py:C in solar and fix compose file --- solar/solar/config.py | 63 +++++++++++++++++++++++++++++++++ solar/solar/dblayer/__init__.py | 9 +++++ 2 files changed, 72 insertions(+) create mode 100644 solar/solar/config.py create mode 100644 solar/solar/dblayer/__init__.py diff --git a/solar/solar/config.py b/solar/solar/config.py new file mode 100644 index 00000000..9a92c501 --- /dev/null +++ b/solar/solar/config.py @@ -0,0 +1,63 @@ +import os +import yaml +from bunch import Bunch + +CWD = os.getcwd() + +C = Bunch() +C.redis = Bunch(port='6379', host='10.0.0.2') +C.solar_db = Bunch(mode='riak', port='8087', host='10.0.0.2', protocol='pbc') + + +def _lookup_vals(setter, config, prefix=None): + for key, val in config.iteritems(): + if prefix is None: + sub = [key] + else: + sub = prefix + [key] + if isinstance(val, Bunch): + _lookup_vals(setter, val, sub) + else: + setter(config, sub) + + +def from_configs(): + + paths = [ + os.getenv('SOLAR_CONFIG', os.path.join(CWD, '.config')), + os.path.join(CWD, '.config.override') + ] + data = {} + + def _load_from_path(data, path): + with open(path) as f: + loaded = yaml.load(f) + if loaded: + data.update(loaded) + + for path in paths: + if not os.path.exists(path): + continue + with open(path) as f: + loaded = yaml.load(f) + if loaded: + data.update(loaded) + + def _setter(config, path): + vals = data + for key in path: + vals = vals[key] + config[path[-1]] = vals + if data: + _lookup_vals(_setter, C) + + +def from_env(): + def _setter(config, path): + env_key = '_'.join(path).upper() + if env_key in os.environ: + config[path[-1]] = os.environ[env_key] + _lookup_vals(_setter, C) + +from_configs() +from_env() diff --git a/solar/solar/dblayer/__init__.py b/solar/solar/dblayer/__init__.py new file mode 100644 index 00000000..2f3b0fae --- /dev/null +++ b/solar/solar/dblayer/__init__.py @@ -0,0 +1,9 @@ +from solar.dblayer.model import ModelMeta +from solar.dblayer.riak_client import RiakClient +from solar.config import C + +client = RiakClient( + protocol=C.riak.protcol, host=C.riak.host, pb_port=C.riak.port) +# client = RiakClient(protocol='http', host='10.0.0.2', http_port=8098) + +ModelMeta.setup(client) From 48f6aed54635aaba3aa2a9da4f2bdc9563d8dcc5 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 9 Nov 2015 14:30:21 +0200 Subject: [PATCH 02/51] Translate tasks into resources without inputs --- f2s/.gitignore | 2 + f2s/f2s.py | 109 ++++++++++++++++++++++++++++++ f2s/resources/role_data/meta.yaml | 10 +++ 3 files changed, 121 insertions(+) create mode 100644 f2s/.gitignore create mode 100755 f2s/f2s.py create mode 100644 f2s/resources/role_data/meta.yaml diff --git a/f2s/.gitignore b/f2s/.gitignore new file mode 100644 index 00000000..5f4a0636 --- /dev/null +++ b/f2s/.gitignore @@ -0,0 +1,2 @@ +fuel-library +tmp/ diff --git a/f2s/f2s.py b/f2s/f2s.py new file mode 100755 index 00000000..3bc91a60 --- /dev/null +++ b/f2s/f2s.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python + +import os + +import click +import yaml +from fnmatch import fnmatch +import shutil + +def ensure_dir(dir): + try: + os.makedirs(dir) + except OSError: + pass + +CURDIR = os.path.dirname(os.path.realpath(__file__)) + +LIBRARY_PATH = os.path.join(CURDIR, 'fuel-library') +RESOURCE_TMP_WORKDIR = os.path.join(CURDIR, 'tmp/resources') +ensure_dir(RESOURCE_TMP_WORKDIR) +RESOURCE_DIR = os.path.join(CURDIR, 'resources') + +class Task(object): + + def __init__(self, task_data, task_path): + self.data = task_data + self.src_path = task_path + self.name = self.data['id'] + self.type = self.data['type'] + + @property + def dst_path(self): + return os.path.join(RESOURCE_TMP_WORKDIR, self.data['id']) + + @property + def actions_path(self): + return os.path.join(self.dst_path, 'actions') + + @property + def meta_path(self): + return os.path.join(self.dst_path, 'meta.yaml') + + def meta(self): + data = {'id': self.data['id'], + 'handler': 'puppetv2', + 'version': '8.0', + 'inputs': self.inputs()} + return yaml.safe_dump(data, default_flow_style=False) + + @property + def actions(self): + """yield an iterable of src/dst + """ + yield os.path.join(self.src_path, self.data['id'] + '.pp'), os.path.join(self.actions_path, 'run.pp') + + def inputs(self): + return {} + + +def get_files(base_dir, file_pattern='*tasks.yaml'): + for root, _dirs, files in os.walk(base_dir): + for file_name in files: + if fnmatch(file_name, file_pattern): + yield root, file_name + +def load_data(base, file_name): + with open(os.path.join(base, file_name)) as f: + return yaml.load(f) + +def preview(task): + print 'PATH' + print task.dst_path + print 'META' + print task.meta() + print 'ACTIONS' + for action in task.actions(): + print 'src=%s dst=%s' % action + +def create(task): + ensure_dir(task.dst_path) + ensure_dir(task.actions_path) + with open(task.meta_path, 'w') as f: + f.write(task.meta()) + for src, dst in task.actions: + shutil.copyfile(src, dst) + +@click.group() +def main(): + pass + +@main.command(help='converts tasks into resources') +@click.argument('tasks', nargs=-1) +@click.option('-t', is_flag=True) +@click.option('-p', is_flag=True) +def t2r(tasks, t, p): + for base, task_yaml in get_files(LIBRARY_PATH + '/deployment'): + for item in load_data(base, task_yaml): + task = Task(item, base) + if task.type != 'puppet': + continue + + if task.name in tasks or tasks is (): + if p: + preview(task) + else: + create(task) + +if __name__ == '__main__': + main() diff --git a/f2s/resources/role_data/meta.yaml b/f2s/resources/role_data/meta.yaml new file mode 100644 index 00000000..78d257d4 --- /dev/null +++ b/f2s/resources/role_data/meta.yaml @@ -0,0 +1,10 @@ +# data container resource that will fetch data from nailgun +id: role_data +handler: none +version: 0.0.1 +input: + # should be auto-generated based on outputs from globals.pp + var1: + type: str! + value: meta + From 0172537577a892c8b5eaa0a8b9c2fae4a71a2a9e Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 9 Nov 2015 14:46:48 +0200 Subject: [PATCH 03/51] Fix path to manifest --- f2s/f2s.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/f2s/f2s.py b/f2s/f2s.py index 3bc91a60..8835ca25 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -20,6 +20,11 @@ RESOURCE_TMP_WORKDIR = os.path.join(CURDIR, 'tmp/resources') ensure_dir(RESOURCE_TMP_WORKDIR) RESOURCE_DIR = os.path.join(CURDIR, 'resources') + +def clean_resources(): + shutil.rmtree(RESOURCE_TMP_WORKDIR) + ensure_dir(RESOURCE_TMP_WORKDIR) + class Task(object): def __init__(self, task_data, task_path): @@ -28,6 +33,13 @@ class Task(object): self.name = self.data['id'] self.type = self.data['type'] + @property + def manifest(self): + after_naily = self.data['parameters']['puppet_manifest'].split('osnailyfacter/')[-1] + return os.path.join( + LIBRARY_PATH, 'deployment', 'puppet', 'osnailyfacter', + after_naily) + @property def dst_path(self): return os.path.join(RESOURCE_TMP_WORKDIR, self.data['id']) @@ -51,7 +63,7 @@ class Task(object): def actions(self): """yield an iterable of src/dst """ - yield os.path.join(self.src_path, self.data['id'] + '.pp'), os.path.join(self.actions_path, 'run.pp') + yield self.manifest, os.path.join(self.actions_path, 'run.pp') def inputs(self): return {} @@ -92,14 +104,17 @@ def main(): @click.argument('tasks', nargs=-1) @click.option('-t', is_flag=True) @click.option('-p', is_flag=True) -def t2r(tasks, t, p): +@click.option('-c', is_flag=True) +def t2r(tasks, t, p, c): + if c: + clean_resources() for base, task_yaml in get_files(LIBRARY_PATH + '/deployment'): for item in load_data(base, task_yaml): task = Task(item, base) if task.type != 'puppet': continue - if task.name in tasks or tasks is (): + if task.name in tasks or tasks == (): if p: preview(task) else: From aeb25393ab3d404f13b66fb5101171f2679ef681 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 9 Nov 2015 18:58:25 +0200 Subject: [PATCH 04/51] Add g2vr command that will create resources and events --- f2s/f2s.py | 190 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 172 insertions(+), 18 deletions(-) diff --git a/f2s/f2s.py b/f2s/f2s.py index 8835ca25..9136be9f 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -1,11 +1,13 @@ #!/usr/bin/env python import os +from fnmatch import fnmatch +import shutil import click import yaml -from fnmatch import fnmatch -import shutil +import networkx as nx + def ensure_dir(dir): try: @@ -19,12 +21,20 @@ LIBRARY_PATH = os.path.join(CURDIR, 'fuel-library') RESOURCE_TMP_WORKDIR = os.path.join(CURDIR, 'tmp/resources') ensure_dir(RESOURCE_TMP_WORKDIR) RESOURCE_DIR = os.path.join(CURDIR, 'resources') - +VR_TMP_DIR = os.path.join(CURDIR, 'tmp/vrs') +ensure_dir(VR_TMP_DIR) +INPUTS_LOCATION = "/tmp/fuel_specs/" +DEPLOYMENT_GROUP_PATH = os.path.join(LIBRARY_PATH, + 'deployment', 'puppet', 'deployment_groups', 'tasks.yaml') def clean_resources(): shutil.rmtree(RESOURCE_TMP_WORKDIR) ensure_dir(RESOURCE_TMP_WORKDIR) +def clean_vr(): + shutil.rmtree(VR_TMP_DIR) + ensure_dir(VR_TMP_DIR) + class Task(object): def __init__(self, task_data, task_path): @@ -33,6 +43,22 @@ class Task(object): self.name = self.data['id'] self.type = self.data['type'] + def edges(self): + data = self.data + if 'required_for' in data: + for req in data['required_for']: + yield self.name, req + if 'requires' in task: + for req in data['requires']: + yield req, self.name + + if 'groups' in data: + for req in data['groups']: + yield self.name, req + if 'tasks' in data: + for req in data['tasks']: + yield req, self.name + @property def manifest(self): after_naily = self.data['parameters']['puppet_manifest'].split('osnailyfacter/')[-1] @@ -42,7 +68,7 @@ class Task(object): @property def dst_path(self): - return os.path.join(RESOURCE_TMP_WORKDIR, self.data['id']) + return os.path.join(RESOURCE_TMP_WORKDIR, self.name) @property def actions_path(self): @@ -53,7 +79,7 @@ class Task(object): return os.path.join(self.dst_path, 'meta.yaml') def meta(self): - data = {'id': self.data['id'], + data = {'id': self.name, 'handler': 'puppetv2', 'version': '8.0', 'inputs': self.inputs()} @@ -66,7 +92,78 @@ class Task(object): yield self.manifest, os.path.join(self.actions_path, 'run.pp') def inputs(self): - return {} + """ + Inputs prepared by + + fuel_noop_tests.rb + identity = spec.split('/')[-1] + ENV["SPEC"] = identity + + hiera.rb + File.open("/tmp/fuel_specs/#{ENV['SPEC']}", 'a') { |f| f << "- #{key}\n" } + """ + lookup_stack_path = os.path.join( + INPUTS_LOCATION, self.name+"_spec.rb'") + if not os.path.exists(lookup_stack_path): + return {} + + with open(lookup_stack_path) as f: + data = yaml.safe_load(f) or [] + return {key: None for key in set(data)} + + +class RoleData(Task): + + name = 'globals' + + def meta(self): + data = {'id': self.name, + 'handler': 'puppetv2', + 'version': '8.0', + 'inputs': self.inputs(), + 'manager': 'globals.py'} + return yaml.safe_dump(data, default_flow_style=False) + + @property + def actions(self): + pass + + +class DGroup(object): + + def __init__(self, name, tasks): + self.name = name + self.tasks = tasks + + def resources(self): + for t, _, _ in self.tasks: + yield {'id': t.name+"{{index}}", + 'from': 'f2s/resources/'+t.name, + 'location': "{{node}}"} + + + def events(self): + for t, inner, outer self.tasks: + for dep in set(inner): + yield { + 'type': 'depends_on', + 'state': 'success', + 'parent_action': dep + '{{index}}.run', + 'child_action': t.name + '{{index}}.run'} + for dep in set(outer): + yield { + 'type': 'depends_on', + 'state': 'success', + 'parent': { + 'with_tags': ['resource=' + dep], + 'action': 'run'} + 'depend_action': t.name + '{{index}}.run'} + + def meta(self): + data = {'id': self.name, + 'resources': self.resources(), + 'events': self.events()} + return yaml.safe_dump(data, default_flow_style=False) def get_files(base_dir, file_pattern='*tasks.yaml'): @@ -90,11 +187,32 @@ def preview(task): def create(task): ensure_dir(task.dst_path) - ensure_dir(task.actions_path) + if task.actions_path: + ensure_dir(task.actions_path) + for src, dst in task.actions: + shutil.copyfile(src, dst) + with open(task.meta_path, 'w') as f: f.write(task.meta()) - for src, dst in task.actions: - shutil.copyfile(src, dst) + + +def get_tasks(): + for base, task_yaml in get_files(LIBRARY_PATH + '/deployment'): + for item in load_data(base, task_yaml): + yield Task(item, base) + + +def get_graph(): + dg = nx.DiGraph) + for t in get_tasks(): + dg.add_edges_from(list(t.edges())) + dg.add_node(t.name, t=t) + return dg + +def dgroup_subgraph(dg, dgroup): + preds = [p for p in dg.predecessors(dgroup) + if dg.node[p]['t'].type == 'puppet'] + return dg.subgraph(preds) @click.group() def main(): @@ -108,17 +226,53 @@ def main(): def t2r(tasks, t, p, c): if c: clean_resources() - for base, task_yaml in get_files(LIBRARY_PATH + '/deployment'): - for item in load_data(base, task_yaml): - task = Task(item, base) - if task.type != 'puppet': - continue - if task.name in tasks or tasks == (): - if p: - preview(task) + for task in get_tasks(): + if task.type != 'puppet': + continue + + if task.name in tasks or tasks == (): + if p: + preview(task) + else: + create(task) + role_data = RoleData() + if p: + preview(role_data) + else: + create(role_data) + + +@main.command(help='convert groups into templates') +@click.argument('groups', nargs=-1) +@click.option('-c', is_flag=True) +def g2vr(groups, c): + if c: + clean_vr() + + dg = get_graph() + dgroups = [n for n in dg.node[n]['t'].type == 'group'] + + for d in dgroups: + if groups and d not in groups: + continue + + ordered = [] + dsub = dg.subgraph(dg.predecessors(group)) + for t in nx.topological(dsub): + inner_preds = [] + outer_preds = [] + for p in dg.predecessors(t): + if p in dsub: + inner_preds.append(p) else: - create(task) + outer_preds.append(p) + + if dg.node[t]['t'].type == 'puppet': + ordered.append(dg.node[t]['t'], inner_preds, outer_preds) + + # based on inner/outer aggregation configure joins in events + if __name__ == '__main__': main() From 21fcb1af1b44625685b0ef291b46143df44cd45d Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 9 Nov 2015 21:57:44 +0200 Subject: [PATCH 05/51] Create resoruces and events based on tasks --- f2s/f2s.py | 42 +++++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/f2s/f2s.py b/f2s/f2s.py index 9136be9f..cc43a00e 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -48,7 +48,7 @@ class Task(object): if 'required_for' in data: for req in data['required_for']: yield self.name, req - if 'requires' in task: + if 'requires' in data: for req in data['requires']: yield req, self.name @@ -109,7 +109,8 @@ class Task(object): with open(lookup_stack_path) as f: data = yaml.safe_load(f) or [] - return {key: None for key in set(data)} + return {key: {'value': None} for key + in set(data) if '::' not in key} class RoleData(Task): @@ -143,7 +144,7 @@ class DGroup(object): def events(self): - for t, inner, outer self.tasks: + for t, inner, outer in self.tasks: for dep in set(inner): yield { 'type': 'depends_on', @@ -156,15 +157,19 @@ class DGroup(object): 'state': 'success', 'parent': { 'with_tags': ['resource=' + dep], - 'action': 'run'} + 'action': 'run'}, 'depend_action': t.name + '{{index}}.run'} def meta(self): data = {'id': self.name, - 'resources': self.resources(), - 'events': self.events()} + 'resources': list(self.resources()), + 'events': list(self.events())} return yaml.safe_dump(data, default_flow_style=False) + @property + def path(self): + return os.path.join(VR_TMP_DIR, self.name + '.yml') + def get_files(base_dir, file_pattern='*tasks.yaml'): for root, _dirs, files in os.walk(base_dir): @@ -203,7 +208,7 @@ def get_tasks(): def get_graph(): - dg = nx.DiGraph) + dg = nx.DiGraph() for t in get_tasks(): dg.add_edges_from(list(t.edges())) dg.add_node(t.name, t=t) @@ -236,11 +241,11 @@ def t2r(tasks, t, p, c): preview(task) else: create(task) - role_data = RoleData() - if p: - preview(role_data) - else: - create(role_data) + # role_data = RoleData() + # if p: + # preview(role_data) + # else: + # create(role_data) @main.command(help='convert groups into templates') @@ -251,15 +256,15 @@ def g2vr(groups, c): clean_vr() dg = get_graph() - dgroups = [n for n in dg.node[n]['t'].type == 'group'] + dgroups = [n for n in dg if dg.node[n]['t'].type == 'group'] - for d in dgroups: - if groups and d not in groups: + for group in dgroups: + if groups and group not in groups: continue ordered = [] dsub = dg.subgraph(dg.predecessors(group)) - for t in nx.topological(dsub): + for t in nx.topological_sort(dsub): inner_preds = [] outer_preds = [] for p in dg.predecessors(t): @@ -269,8 +274,11 @@ def g2vr(groups, c): outer_preds.append(p) if dg.node[t]['t'].type == 'puppet': - ordered.append(dg.node[t]['t'], inner_preds, outer_preds) + ordered.append((dg.node[t]['t'], inner_preds, outer_preds)) + obj = DGroup(group, ordered) + with open(obj.path, 'w') as f: + f.write(obj.meta()) # based on inner/outer aggregation configure joins in events From 1a3a4191bdc9280bbb336101f5d784f423e79ed9 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 9 Nov 2015 21:58:01 +0200 Subject: [PATCH 06/51] Save applied patches to generate inputs --- f2s/patches/hiera.patch | 10 ++++++++++ f2s/patches/noop_tests.patch | 14 ++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 f2s/patches/hiera.patch create mode 100644 f2s/patches/noop_tests.patch diff --git a/f2s/patches/hiera.patch b/f2s/patches/hiera.patch new file mode 100644 index 00000000..abf5a1e7 --- /dev/null +++ b/f2s/patches/hiera.patch @@ -0,0 +1,10 @@ +--- /tmp/noop/.bundled_gems/gems/hiera-1.3.4/lib/hiera.rb 2015-11-09 19:55:29.127004136 +0000 ++++ /tmp/noop/.bundled_gems/gems/hiera-1.3.4/lib/hiera.rb 2015-11-09 14:15:54.372852787 +0000 +@@ -57,6 +57,7 @@ + # The order-override will insert as first in the hierarchy a data source + # of your choice. + def lookup(key, default, scope, order_override=nil, resolution_type=:priority) ++ File.open("/tmp/fuel_specs/#{ENV['SPEC']}", 'a') { |f| f << "- #{key}\n" } + Backend.lookup(key, default, scope, order_override, resolution_type) + end + end diff --git a/f2s/patches/noop_tests.patch b/f2s/patches/noop_tests.patch new file mode 100644 index 00000000..c475ac22 --- /dev/null +++ b/f2s/patches/noop_tests.patch @@ -0,0 +1,14 @@ +--- fuel-library/utils/jenkins/fuel_noop_tests.rb 2015-11-09 19:51:53.000000000 +0000 ++++ fuel-library/utils/jenkins/fuel_noop_tests.rb 2015-11-09 19:51:17.000000000 +0000 +@@ -271,8 +271,10 @@ + # @return [Array] success and empty report array + def self.rspec(spec) + inside_noop_tests_directory do ++ identity = spec.split('/')[-1] ++ ENV["SPEC"] = identity + command = "rspec #{RSPEC_OPTIONS} #{spec}" +- command = 'bundle exec ' + command if options[:bundle] ++ command = "bundle exec " + command if options[:bundle] + if options[:filter_examples] + options[:filter_examples].each do |example| + command = command + " -e #{example}" From 9883075d6375ea740dbd688c33455dcfcac5aeec Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 10 Nov 2015 09:49:59 +0200 Subject: [PATCH 07/51] Add values_from mechanism that will allow to guess mappings --- solar/core/resource/virtual_resource.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/solar/core/resource/virtual_resource.py b/solar/core/resource/virtual_resource.py index ecab8455..22bbc974 100644 --- a/solar/core/resource/virtual_resource.py +++ b/solar/core/resource/virtual_resource.py @@ -130,6 +130,7 @@ def create_resources(resources, tags=None): resource_name = r['id'] args = r.get('values', {}) node = r.get('location', None) + values_from = r.get('values_from') from_path = r.get('from', None) tags = r.get('tags', []) base_path = os.path.join(cwd, from_path) @@ -142,6 +143,11 @@ def create_resources(resources, tags=None): node.connect(r, mapping={}) r.add_tags('location={}'.format(node.name)) update_inputs(resource_name, args) + + if values_from: + from_resource = load_resource(values_from) + from_resource.connect_with_events(r, use_defaults=False) + return created_resources From aa76fbb49ff6005adabbc5d48a40170b218a8349 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 10 Nov 2015 10:10:51 +0200 Subject: [PATCH 08/51] Add values_from to vr generator --- f2s/f2s.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/f2s/f2s.py b/f2s/f2s.py index cc43a00e..e09d71db 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -140,7 +140,8 @@ class DGroup(object): for t, _, _ in self.tasks: yield {'id': t.name+"{{index}}", 'from': 'f2s/resources/'+t.name, - 'location': "{{node}}"} + 'location': "{{node}}", + 'values_from': RoleData.name+"{{index}}"} def events(self): From 742d6993862f1da4ee26352d596a58e0493d4c97 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 10 Nov 2015 11:12:07 +0200 Subject: [PATCH 09/51] Add naive manager mechanism and prefetch command --- resources/ex_managed/managers/manager.py | 10 ++++++++++ resources/ex_managed/meta.yaml | 11 +++++++++++ solar/cli/resource.py | 14 ++++++++++++++ solar/core/resource/resource.py | 12 +++++++++++- solar/dblayer/solar_models.py | 1 + solar/utils.py | 5 +++++ 6 files changed, 52 insertions(+), 1 deletion(-) create mode 100755 resources/ex_managed/managers/manager.py create mode 100644 resources/ex_managed/meta.yaml diff --git a/resources/ex_managed/managers/manager.py b/resources/ex_managed/managers/manager.py new file mode 100755 index 00000000..588c0e0b --- /dev/null +++ b/resources/ex_managed/managers/manager.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python + +import sys +import json + +data = json.loads(sys.stdin.read()) + +rst = {'val_x_val': int(data['val'])**2} + +sys.stdout.write(json.dumps(rst)) diff --git a/resources/ex_managed/meta.yaml b/resources/ex_managed/meta.yaml new file mode 100644 index 00000000..79a4a666 --- /dev/null +++ b/resources/ex_managed/meta.yaml @@ -0,0 +1,11 @@ +id: managed +handler: none +version: 1.0.0 +manager: managers/manager.py +input: + val: + schema: int! + value: 2 + val_x_val: + schema: int + value: diff --git a/solar/cli/resource.py b/solar/cli/resource.py index 2aed97de..3ae5adb8 100644 --- a/solar/cli/resource.py +++ b/solar/cli/resource.py @@ -261,3 +261,17 @@ def remove(name, tag, f): else: msg = 'Resource %s will be removed after commiting changes.' % res.name # NOQA click.echo(msg) + + +@resource.command() +@click.option('--name', '-n') +@click.option('--tag', '-t', multiple=True) +def prefetch(name, tag): + if name: + resources = [sresource.load(name)] + elif tag: + resources = sresource.load_by_tags(set(tag)) + + for res in resources: + res.prefetch() + click.echo(res.color_repr()) diff --git a/solar/core/resource/resource.py b/solar/core/resource/resource.py index 5f2f8fa1..0fb53a58 100644 --- a/solar/core/resource/resource.py +++ b/solar/core/resource/resource.py @@ -89,7 +89,8 @@ class Resource(object): 'version': metadata.get('version', ''), 'meta_inputs': inputs, 'tags': tags, - 'state': RESOURCE_STATE.created.name + 'state': RESOURCE_STATE.created.name, + 'manager': metadata.get('manager') }) self.create_inputs(args) @@ -285,6 +286,15 @@ class Resource(object): receiver.db_obj.save_lazy() self.db_obj.save_lazy() + def prefetch(self): + if not self.db_obj.manager: + return + + manager_path = os.path.join( + self.db_obj.base_path, self.db_obj.manager) + data = json.dumps(self.args) + rst = utils.communicate([manager_path], data) + self.update(json.loads(rst)) def load(name): r = DBResource.get(name) diff --git a/solar/dblayer/solar_models.py b/solar/dblayer/solar_models.py index c006dfa1..4b809026 100644 --- a/solar/dblayer/solar_models.py +++ b/solar/dblayer/solar_models.py @@ -715,6 +715,7 @@ class Resource(Model): meta_inputs = Field(dict, default=dict) state = Field(str) # on_set/on_get would be useful events = Field(list, default=list) + manager = Field(str) inputs = InputsField(default=dict) tags = TagsField(default=list) diff --git a/solar/utils.py b/solar/utils.py index 31c8ffd7..0b0a0c54 100644 --- a/solar/utils.py +++ b/solar/utils.py @@ -18,6 +18,7 @@ import json import logging import os import uuid +from subprocess import Popen, PIPE, STDOUT from jinja2 import Environment import yaml @@ -34,6 +35,10 @@ def to_pretty_json(data): return json.dumps(data, indent=4) +def communicate(command, data): + popen = Popen(command, stdout=PIPE, stdin=PIPE, stderr=STDOUT) + return popen.communicate(input=data)[0] + # Configure jinja2 filters jinja_env_with_filters = Environment() jinja_env_with_filters.filters['to_json'] = to_json From 162c0731e3f05b508a8147b3e7dcf2c15d8d80f3 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 10 Nov 2015 11:59:21 +0200 Subject: [PATCH 10/51] Add naive_sync driver which will copy resource from solar location --- resources/sources/meta.yaml | 7 ++++++ solar/core/handlers/__init__.py | 4 +++- solar/core/resource/resource.py | 5 ++--- solar/solar/core/handlers/naive_sync.py | 30 +++++++++++++++++++++++++ templates/sources.yaml | 8 +++++++ 5 files changed, 50 insertions(+), 4 deletions(-) create mode 100644 resources/sources/meta.yaml create mode 100644 solar/solar/core/handlers/naive_sync.py create mode 100644 templates/sources.yaml diff --git a/resources/sources/meta.yaml b/resources/sources/meta.yaml new file mode 100644 index 00000000..71057781 --- /dev/null +++ b/resources/sources/meta.yaml @@ -0,0 +1,7 @@ +id: sources +handler: naive_sync +version: 1.0.0 +input: + sources: + schema: [{'src': 'str!', 'dst': 'str!'}] + value: [] diff --git a/solar/core/handlers/__init__.py b/solar/core/handlers/__init__.py index 530da841..2090d97d 100644 --- a/solar/core/handlers/__init__.py +++ b/solar/core/handlers/__init__.py @@ -18,6 +18,7 @@ from solar.core.handlers.ansible_playbook import AnsiblePlaybook from solar.core.handlers.base import Empty from solar.core.handlers.puppet import Puppet, PuppetV2 from solar.core.handlers.shell import Shell +from solar.core.handlers.naive_sync import NaiveSync HANDLERS = {'ansible': AnsibleTemplate, @@ -25,7 +26,8 @@ HANDLERS = {'ansible': AnsibleTemplate, 'shell': Shell, 'puppet': Puppet, 'none': Empty, - 'puppetv2': PuppetV2} + 'puppetv2': PuppetV2, + 'naive_sync': NaiveSync} def get(handler_name): diff --git a/solar/core/resource/resource.py b/solar/core/resource/resource.py index 0fb53a58..c97f34b3 100644 --- a/solar/core/resource/resource.py +++ b/solar/core/resource/resource.py @@ -90,7 +90,7 @@ class Resource(object): 'meta_inputs': inputs, 'tags': tags, 'state': RESOURCE_STATE.created.name, - 'manager': metadata.get('manager') + 'manager': metadata.get('manager', '') }) self.create_inputs(args) @@ -292,8 +292,7 @@ class Resource(object): manager_path = os.path.join( self.db_obj.base_path, self.db_obj.manager) - data = json.dumps(self.args) - rst = utils.communicate([manager_path], data) + rst = utils.communicate([manager_path], json.dumps(self.args)) self.update(json.loads(rst)) def load(name): diff --git a/solar/solar/core/handlers/naive_sync.py b/solar/solar/core/handlers/naive_sync.py new file mode 100644 index 00000000..c06b7236 --- /dev/null +++ b/solar/solar/core/handlers/naive_sync.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2015 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from solar.core.handlers.base import BaseHandler + + +class NaiveSync(BaseHandler): + + def action(self, resource, action_name): + # it is inconsistent with handlers because action_name + # is totally useless piece of info here + + args = resource.args + # this src seems not intuitive to me, wo context it is impossible + # to understand where src comes from + for item in args['sources']: + self.transport_sync.copy(resource, item['src'], item['dst']) + self.transport_sync.sync_all() diff --git a/templates/sources.yaml b/templates/sources.yaml new file mode 100644 index 00000000..a4f13e38 --- /dev/null +++ b/templates/sources.yaml @@ -0,0 +1,8 @@ +id: sources +resources: + - id: sources{{index}} + from: resources/sources + location: {{node}} + values: + sources: + - {src: /tmp/sources_test, dst: /tmp/sources_test} From cd3f1978b3e04f81b82e25b135d2cb7c8a35e445 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 10 Nov 2015 14:48:20 +0200 Subject: [PATCH 11/51] Add manager globals.pp --- f2s/resources/role_data/managers/from_file.py | 13 + f2s/resources/role_data/managers/globals.pp | 294 ++++++++ f2s/resources/role_data/managers/globals.py | 54 ++ .../role_data/managers/test_sample.yaml | 695 ++++++++++++++++++ f2s/resources/role_data/meta.yaml | 8 +- 5 files changed, 1062 insertions(+), 2 deletions(-) create mode 100755 f2s/resources/role_data/managers/from_file.py create mode 100644 f2s/resources/role_data/managers/globals.pp create mode 100755 f2s/resources/role_data/managers/globals.py create mode 100644 f2s/resources/role_data/managers/test_sample.yaml diff --git a/f2s/resources/role_data/managers/from_file.py b/f2s/resources/role_data/managers/from_file.py new file mode 100755 index 00000000..cc3e62a1 --- /dev/null +++ b/f2s/resources/role_data/managers/from_file.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python + +import os +import sys +import yaml +import json + +CURDIR = os.path.dirname(os.path.realpath(__file__)) + +with open(os.path.join(CURDIR, 'test_sample.yaml')) as f: + ARGS = yaml.safe_load(f) + +sys.stdout.write(json.dumps(ARGS)) diff --git a/f2s/resources/role_data/managers/globals.pp b/f2s/resources/role_data/managers/globals.pp new file mode 100644 index 00000000..c9bdf906 --- /dev/null +++ b/f2s/resources/role_data/managers/globals.pp @@ -0,0 +1,294 @@ +notice('MODULAR: globals.pp') + +$service_token_off = false + +$globals_yaml_file = "/etc/puppet/${uid}globals.yaml" + +# remove cached globals values before anything else +remove_file($globals_yaml_file) + +$network_scheme = hiera_hash('network_scheme', {}) +if empty($network_scheme) { + fail("Network_scheme not given in the astute.yaml") +} +$network_metadata = hiera_hash('network_metadata', {}) +if empty($network_metadata) { + fail("Network_metadata not given in the astute.yaml") +} + +$node_name = regsubst(hiera('fqdn', $::hostname), '\..*$', '') +$node = $network_metadata['nodes'][$node_name] +if empty($node) { + fail("Node hostname is not defined in the astute.yaml") +} + +prepare_network_config($network_scheme) + +# DEPRICATED +$nodes_hash = hiera('nodes', {}) + +$deployment_mode = hiera('deployment_mode', 'ha_compact') +$roles = $node['node_roles'] +$storage_hash = hiera('storage', {}) +$syslog_hash = hiera('syslog', {}) +$base_syslog_hash = hiera('base_syslog', {}) +$sahara_hash = hiera('sahara', {}) +$murano_hash = hiera('murano', {}) +$heat_hash = hiera_hash('heat', {}) +$vcenter_hash = hiera('vcenter', {}) +$nova_hash = hiera_hash('nova', {}) +$mysql_hash = hiera('mysql', {}) +$rabbit_hash = hiera_hash('rabbit', {}) +$glance_hash = hiera_hash('glance', {}) +$swift_hash = hiera('swift', {}) +$cinder_hash = hiera_hash('cinder', {}) +$ceilometer_hash = hiera('ceilometer',{}) +$access_hash = hiera_hash('access', {}) +$mp_hash = hiera('mp', {}) +$keystone_hash = merge({'service_token_off' => $service_token_off}, + hiera_hash('keystone', {})) + +$node_role = hiera('role') +$dns_nameservers = hiera('dns_nameservers', []) +$use_ceilometer = $ceilometer_hash['enabled'] +$use_neutron = hiera('quantum', false) +$use_ovs = hiera('use_ovs', $use_neutron) +$verbose = true +$debug = hiera('debug', false) +$use_monit = false +$master_ip = hiera('master_ip') +$use_syslog = hiera('use_syslog', true) +$syslog_log_facility_glance = hiera('syslog_log_facility_glance', 'LOG_LOCAL2') +$syslog_log_facility_cinder = hiera('syslog_log_facility_cinder', 'LOG_LOCAL3') +$syslog_log_facility_neutron = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4') +$syslog_log_facility_nova = hiera('syslog_log_facility_nova','LOG_LOCAL6') +$syslog_log_facility_keystone = hiera('syslog_log_facility_keystone', 'LOG_LOCAL7') +$syslog_log_facility_murano = hiera('syslog_log_facility_murano', 'LOG_LOCAL0') +$syslog_log_facility_heat = hiera('syslog_log_facility_heat','LOG_LOCAL0') +$syslog_log_facility_sahara = hiera('syslog_log_facility_sahara','LOG_LOCAL0') +$syslog_log_facility_ceilometer = hiera('syslog_log_facility_ceilometer','LOG_LOCAL0') +$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') + +$nova_report_interval = hiera('nova_report_interval', 60) +$nova_service_down_time = hiera('nova_service_down_time', 180) +$apache_ports = hiera_array('apache_ports', ['80', '8888', '5000', '35357']) + +$openstack_version = hiera('openstack_version', + { + 'keystone' => 'installed', + 'glance' => 'installed', + 'horizon' => 'installed', + 'nova' => 'installed', + 'novncproxy' => 'installed', + 'cinder' => 'installed', + } +) + +$nova_rate_limits = hiera('nova_rate_limits', + { + 'POST' => 100000, + 'POST_SERVERS' => 100000, + 'PUT' => 1000, + 'GET' => 100000, + 'DELETE' => 100000 + } +) + +$cinder_rate_limits = hiera('cinder_rate_limits', + { + 'POST' => 100000, + 'POST_SERVERS' => 100000, + 'PUT' => 100000, + 'GET' => 100000, + 'DELETE' => 100000 + } +) + +$default_gateway = get_default_gateways() +$public_vip = $network_metadata['vips']['public']['ipaddr'] +$management_vip = $network_metadata['vips']['management']['ipaddr'] +$public_vrouter_vip = $network_metadata['vips']['vrouter_pub']['ipaddr'] +$management_vrouter_vip = $network_metadata['vips']['vrouter']['ipaddr'] + +$database_vip = is_hash($network_metadata['vips']['database']) ? { + true => pick($network_metadata['vips']['database']['ipaddr'], $management_vip), + default => $management_vip +} +$service_endpoint = is_hash($network_metadata['vips']['service_endpoint']) ? { + true => pick($network_metadata['vips']['service_endpoint']['ipaddr'], $management_vip), + default => $management_vip +} + +if $use_neutron { + $novanetwork_params = {} + $neutron_config = hiera_hash('quantum_settings') + $network_provider = 'neutron' + $neutron_db_password = $neutron_config['database']['passwd'] + $neutron_user_password = $neutron_config['keystone']['admin_password'] + $neutron_metadata_proxy_secret = $neutron_config['metadata']['metadata_proxy_shared_secret'] + $base_mac = $neutron_config['L2']['base_mac'] + $management_network_range = get_network_role_property('mgmt/vip', 'network') +} else { + $neutron_config = {} + $novanetwork_params = hiera('novanetwork_parameters') + $network_size = $novanetwork_params['network_size'] + $num_networks = $novanetwork_params['num_networks'] + $network_provider = 'nova' + if ( $novanetwork_params['network_manager'] == 'FlatDHCPManager') { + $private_int = get_network_role_property('novanetwork/fixed', 'interface') + } else { + $private_int = get_network_role_property('novanetwork/vlan', 'interface') + $vlan_start = $novanetwork_params['vlan_start'] + $network_config = { + 'vlan_start' => $vlan_start, + } + } + $network_manager = "nova.network.manager.${novanetwork_params['network_manager']}" + $management_network_range = hiera('management_network_range') +} + +if $node_role == 'primary-controller' { + $primary_controller = true +} else { + $primary_controller = false +} + +$controllers_hash = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) +$mountpoints = filter_hash($mp_hash,'point') + +# AMQP configuration +$queue_provider = hiera('queue_provider','rabbitmq') +$rabbit_ha_queues = true + +if !$rabbit_hash['user'] { + $rabbit_hash['user'] = 'nova' +} + +$amqp_port = hiera('amqp_ports', '5673') +if hiera('amqp_hosts', false) { + # using pre-defined in astute.yaml RabbitMQ servers + $amqp_hosts = hiera('amqp_hosts') +} else { + # using RabbitMQ servers on controllers + # todo(sv): switch from 'controller' nodes to 'rmq' nodes as soon as it was implemented as additional node-role + $controllers_with_amqp_server = get_node_to_ipaddr_map_by_network_role($controllers_hash, 'mgmt/messaging') + $amqp_nodes = ipsort(values($controllers_with_amqp_server)) + # amqp_hosts() randomize order of RMQ endpoints and put local one first + $amqp_hosts = amqp_hosts($amqp_nodes, $amqp_port, get_network_role_property('mgmt/messaging', 'ipaddr')) +} + +# MySQL and SQLAlchemy backend configuration +$custom_mysql_setup_class = hiera('custom_mysql_setup_class', 'galera') +$max_pool_size = hiera('max_pool_size', min($::processorcount * 5 + 0, 30 + 0)) +$max_overflow = hiera('max_overflow', min($::processorcount * 5 + 0, 60 + 0)) +$max_retries = hiera('max_retries', '-1') +$idle_timeout = hiera('idle_timeout','3600') +$nova_db_password = $nova_hash['db_password'] +$sql_connection = "mysql://nova:${nova_db_password}@${database_vip}/nova?read_timeout = 6 0" +$mirror_type = hiera('mirror_type', 'external') +$multi_host = hiera('multi_host', true) + +# Determine who should get the volume service +if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) { + $manage_volumes = 'iscsi' +} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) { + $manage_volumes = 'vmdk' +} elsif ($storage_hash['volumes_ceph']) { + $manage_volumes = 'ceph' +} else { + $manage_volumes = false +} + +# Define ceph-related variables +$ceph_primary_monitor_node = get_nodes_hash_by_roles($network_metadata, ['primary-controller']) +$ceph_monitor_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) +$ceph_rgw_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +#Determine who should be the default backend +if ($storage_hash['images_ceph']) { + $glance_backend = 'ceph' + $glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ] +} elsif ($storage_hash['images_vcenter']) { + $glance_backend = 'vmware' + $glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ] +} else { + $glance_backend = 'file' + $glance_known_stores = false +} + +# Define ceilometer-related variables: +# todo: use special node-roles instead controllers in the future +$ceilometer_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +# Define memcached-related variables: +$memcache_roles = hiera('memcache_roles', ['primary-controller', 'controller']) + +# Define node roles, that will carry corosync/pacemaker +$corosync_roles = hiera('corosync_roles', ['primary-controller', 'controller']) + +# Define cinder-related variables +# todo: use special node-roles instead controllers in the future +$cinder_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +# Define horizon-related variables: +# todo: use special node-roles instead controllers in the future +$horizon_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +# Define swift-related variables +# todo(sv): use special node-roles instead controllers in the future +$swift_master_role = 'primary-controller' +$swift_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) +$swift_proxies = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) +$swift_proxy_caches = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) # memcache for swift +$is_primary_swift_proxy = $primary_controller + +# Define murano-related variables +$murano_roles = ['primary-controller', 'controller'] + +# Define heat-related variables: +$heat_roles = ['primary-controller', 'controller'] + +# Define sahara-related variable +$sahara_roles = ['primary-controller', 'controller'] + +# Define ceilometer-releated parameters +if !$ceilometer_hash['event_time_to_live'] { $ceilometer_hash['event_time_to_live'] = '604800'} +if !$ceilometer_hash['metering_time_to_live'] { $ceilometer_hash['metering_time_to_live'] = '604800' } +if !$ceilometer_hash['http_timeout'] { $ceilometer_hash['http_timeout'] = '600' } + +# Define database-related variables: +# todo: use special node-roles instead controllers in the future +$database_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +# Define Nova-API variables: +# todo: use special node-roles instead controllers in the future +$nova_api_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +# Define mongo-related variables +$mongo_roles = ['primary-mongo', 'mongo'] + +# Define neutron-related variables: +# todo: use special node-roles instead controllers in the future +$neutron_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +#Define Ironic-related variables: +$ironic_api_nodes = $controllers_hash + +# Change nova_hash to add vnc port to it +# TODO(sbog): change this when we will get rid of global hashes +$public_ssl_hash = hiera('public_ssl') +if $public_ssl_hash['services'] { + $nova_hash['vncproxy_protocol'] = 'https' +} else { + $nova_hash['vncproxy_protocol'] = 'http' +} + +# save all these global variables into hiera yaml file for later use +# by other manifests with hiera function +file { $globals_yaml_file : + ensure => 'present', + mode => '0644', + owner => 'root', + group => 'root', + content => template('osnailyfacter/globals_yaml.erb') +} diff --git a/f2s/resources/role_data/managers/globals.py b/f2s/resources/role_data/managers/globals.py new file mode 100755 index 00000000..b9c34dee --- /dev/null +++ b/f2s/resources/role_data/managers/globals.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python + +import os +import sys +from subprocess import Popen, PIPE +import yaml +import json + +CURDIR = os.path.dirname(os.path.realpath(__file__)) + +ARGS = json.loads(sys.stdin.read()) + +def execute(command, **env_vars): + env = os.environ.copy() + for var in env_vars: + env[var] = env_vars[var] + popen = Popen(command, stdin=PIPE, stdout=PIPE, env=env) + return popen.communicate() + +def prepare_hiera(): + hiera_conf = """:backends: + - yaml +:yaml: + :datadir: /etc/puppet/hieradata +:hierarchy: + - {} +""".format(ARGS['uid']) + with open('/etc/puppet/' + ARGS['uid'] + 'globals.yaml', 'w') as f: + f.write('') + + with open('/etc/puppet/hiera.yaml', 'w') as f: + f.write(hiera_conf) + + with open('/etc/puppet/hieradata/{}.yaml'.format(ARGS['uid']), 'w') as f: + f.write(yaml.safe_dump(ARGS)) + +def run_command(): + cmd = [ + 'puppet', 'apply', '--modulepath={}'.format(ARGS['puppet_modules']), + os.path.join(CURDIR, 'globals.pp')] + return execute(cmd) + +def collect_results(): + path = '/etc/puppet/' + ARGS['uid'] + 'globals.yaml' + with open(path) as f: + return yaml.safe_load(f) + +def main(): + prepare_hiera() + rst = collect_results() + sys.stdout.write(json.dumps(rst)) + +if __name__ == '__main__': + main() diff --git a/f2s/resources/role_data/managers/test_sample.yaml b/f2s/resources/role_data/managers/test_sample.yaml new file mode 100644 index 00000000..d1907e45 --- /dev/null +++ b/f2s/resources/role_data/managers/test_sample.yaml @@ -0,0 +1,695 @@ +access: + email: admin@localhost + metadata: + label: Access + weight: 10 + password: admin + tenant: admin + user: admin +auth_key: '' +auto_assign_floating_ip: false +base_syslog: + syslog_port: '514' + syslog_server: 10.108.0.2 +ceilometer: + db_password: ZcffCIm5 + enabled: false + metering_secret: 7aqxzabx + user_password: FQUfTQ6a +cinder: + db_password: 71kNkN9U + fixed_key: 0ded0202e2a355df942df2bacbaba992658a0345f68f2db6e1bdb6dbb8f682cf + user_password: O2st17AP +cobbler: + profile: ubuntu_1404_x86_64 +corosync: + group: 226.94.1.1 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: '12000' + verified: false +debug: false +deployment_id: 38 +deployment_mode: ha_compact +external_dns: + dns_list: 8.8.8.8, 8.8.4.4 + metadata: + label: Upstream DNS + weight: 90 +external_mongo: + hosts_ip: '' + metadata: + label: External MongoDB + restrictions: + - action: hide + condition: settings:additional_components.mongo.value == false + weight: 20 + mongo_db_name: ceilometer + mongo_password: ceilometer + mongo_replset: '' + mongo_user: ceilometer +external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: 0.pool.ntp.org, 1.pool.ntp.org +public_ssl: + metadata: + label: Public TLS + weight: 110 + horizon: true + services: true + cert_source: self_signed + cert_data: + content: 'somedataaboutyourkeypair' + hostname: public.fuel.local +fail_if_error: false +fqdn: node-118.test.domain.local +fuel_version: '6.1' +glance: + db_password: 0UYCFNfc + image_cache_max_size: '5368709120' + user_password: 94lWbeNn +heat: + auth_encryption_key: 8edb899a7e81e56abe51639880aa32dd + db_password: AuaPc3Yq + enabled: true + rabbit_password: Nmn2wr9S + user_password: EWJfBLJ9 +kernel_params: + kernel: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 + nomodeset + metadata: + label: Kernel parameters + weight: 40 +keystone: + admin_token: 0be9G8hj + db_password: 32TWl29R +last_controller: node-131 +libvirt_type: qemu +management_network_range: 192.168.0.0/24 +management_vip: 192.168.0.6 +management_vrouter_vip: 192.168.0.7 +master_ip: 10.108.0.2 +metadata: + label: Common + weight: 30 +mongo: + enabled: false +mp: +- point: '1' + weight: '1' +- point: '2' + weight: '2' +murano: + db_password: R3SuvZbh + enabled: true + rabbit_password: ZNdTAgF3 + user_password: xP8WtHQw +murano_settings: + metadata: + label: Murano Settings + restrictions: + - action: hide + condition: settings:additional_components.murano.value == false + weight: 20 + murano_repo_url: http://catalog.openstack.org/ +mysql: + root_password: Lz18BpbQ + wsrep_password: JrlrVOHu +network_metadata: + nodes: + node-118: + swift_zone: '1' + uid: '118' + fqdn: node-118.test.domain.local + network_roles: + keystone/api: 192.168.0.1 + neutron/api: 192.168.0.1 + mgmt/database: 192.168.0.1 + sahara/api: 192.168.0.1 + heat/api: 192.168.0.1 + ceilometer/api: 192.168.0.1 + ex: + ceph/public: 192.168.0.1 + ceph/radosgw: + management: 192.168.0.1 + swift/api: 192.168.0.1 + mgmt/api: 192.168.0.1 + storage: 192.168.1.1 + mgmt/corosync: 192.168.0.1 + cinder/api: 192.168.0.1 + public/vip: + swift/replication: 192.168.1.1 + mgmt/messaging: 192.168.0.1 + neutron/mesh: 192.168.0.1 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.1 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.1 + mgmt/vip: 192.168.0.1 + murano/api: 192.168.0.1 + nova/api: 192.168.0.1 + horizon: 192.168.0.1 + mgmt/memcache: 192.168.0.1 + cinder/iscsi: 192.168.1.1 + ceph/replication: 192.168.1.1 + user_node_name: Untitled (6a:e7) + node_roles: + - cinder + name: node-118 + node-128: + swift_zone: '1' + uid: '128' + fqdn: node-128.test.domain.local + network_roles: + keystone/api: 192.168.0.2 + neutron/api: 192.168.0.2 + mgmt/database: 192.168.0.2 + sahara/api: 192.168.0.2 + heat/api: 192.168.0.2 + ceilometer/api: 192.168.0.2 + ex: 172.16.0.2 + ceph/public: 192.168.0.2 + ceph/radosgw: 172.16.0.2 + management: 192.168.0.2 + swift/api: 192.168.0.2 + mgmt/api: 192.168.0.2 + storage: 192.168.1.2 + mgmt/corosync: 192.168.0.2 + cinder/api: 192.168.0.2 + public/vip: 172.16.0.2 + swift/replication: 192.168.1.2 + mgmt/messaging: 192.168.0.2 + neutron/mesh: 192.168.0.2 + admin/pxe: 10.108.0.3 + mongo/db: 192.168.0.2 + neutron/private: + neutron/floating: + fw-admin: 10.108.0.3 + glance/api: 192.168.0.2 + mgmt/vip: 192.168.0.2 + murano/api: 192.168.0.2 + nova/api: 192.168.0.2 + horizon: 192.168.0.2 + mgmt/memcache: 192.168.0.2 + cinder/iscsi: 192.168.1.2 + ceph/replication: 192.168.1.2 + user_node_name: Untitled (6a:e7) + node_roles: + - primary-controller + name: node-128 + node-129: + swift_zone: '1' + uid: '129' + fqdn: node-129.test.domain.local + network_roles: + keystone/api: 192.168.0.3 + neutron/api: 192.168.0.3 + mgmt/database: 192.168.0.3 + sahara/api: 192.168.0.3 + heat/api: 192.168.0.3 + ceilometer/api: 192.168.0.3 + ex: 172.16.0.3 + ceph/public: 192.168.0.3 + ceph/radosgw: 172.16.0.3 + management: 192.168.0.3 + swift/api: 192.168.0.3 + mgmt/api: 192.168.0.3 + storage: 192.168.1.3 + mgmt/corosync: 192.168.0.3 + cinder/api: 192.168.0.3 + public/vip: 172.16.0.3 + swift/replication: 192.168.1.3 + mgmt/messaging: 192.168.0.3 + neutron/mesh: 192.168.0.3 + admin/pxe: 10.108.0.6 + mongo/db: 192.168.0.3 + neutron/private: + neutron/floating: + fw-admin: 10.108.0.6 + glance/api: 192.168.0.3 + mgmt/vip: 192.168.0.3 + murano/api: 192.168.0.3 + nova/api: 192.168.0.3 + horizon: 192.168.0.3 + mgmt/memcache: 192.168.0.3 + cinder/iscsi: 192.168.1.3 + ceph/replication: 192.168.1.3 + user_node_name: Untitled (6a:e7) + node_roles: + - controller + name: node-129 + node-131: + swift_zone: '1' + uid: '131' + fqdn: node-131.test.domain.local + network_roles: + keystone/api: 192.168.0.4 + neutron/api: 192.168.0.4 + mgmt/database: 192.168.0.4 + sahara/api: 192.168.0.4 + heat/api: 192.168.0.4 + ceilometer/api: 192.168.0.4 + ex: 172.16.0.4 + ceph/public: 192.168.0.4 + ceph/radosgw: 172.16.0.4 + management: 192.168.0.4 + swift/api: 192.168.0.4 + mgmt/api: 192.168.0.4 + storage: 192.168.1.4 + mgmt/corosync: 192.168.0.4 + cinder/api: 192.168.0.4 + public/vip: 172.16.0.4 + swift/replication: 192.168.1.4 + mgmt/messaging: 192.168.0.4 + neutron/mesh: 192.168.0.4 + admin/pxe: 10.109.0.9 + mongo/db: 192.168.0.4 + neutron/private: + neutron/floating: + fw-admin: 10.109.0.9 + glance/api: 192.168.0.4 + mgmt/vip: 192.168.0.4 + murano/api: 192.168.0.4 + nova/api: 192.168.0.4 + horizon: 192.168.0.4 + mgmt/memcache: 192.168.0.4 + cinder/iscsi: 192.168.1.4 + ceph/replication: 192.168.1.4 + user_node_name: Untitled (6a:e7) + node_roles: + - controller + name: node-131 + node-132: + swift_zone: '1' + uid: '132' + fqdn: node-132.test.domain.local + network_roles: + keystone/api: 192.168.0.5 + neutron/api: 192.168.0.5 + mgmt/database: 192.168.0.5 + sahara/api: 192.168.0.5 + heat/api: 192.168.0.5 + ceilometer/api: 192.168.0.5 + ex: + ceph/public: 192.168.0.5 + ceph/radosgw: + management: 192.168.0.5 + swift/api: 192.168.0.5 + mgmt/api: 192.168.0.5 + storage: 192.168.1.5 + mgmt/corosync: 192.168.0.5 + cinder/api: 192.168.0.5 + public/vip: + swift/replication: 192.168.1.5 + mgmt/messaging: 192.168.0.5 + neutron/mesh: 192.168.0.5 + admin/pxe: 10.108.0.4 + mongo/db: 192.168.0.5 + neutron/private: + neutron/floating: + fw-admin: 10.108.0.4 + glance/api: 192.168.0.5 + mgmt/vip: 192.168.0.5 + murano/api: 192.168.0.5 + nova/api: 192.168.0.5 + horizon: 192.168.0.5 + mgmt/memcache: 192.168.0.5 + cinder/iscsi: 192.168.1.5 + ceph/replication: 192.168.1.5 + user_node_name: Untitled (6a:e7) + node_roles: + - compute + name: node-132 + vips: + vrouter: + ipaddr: 192.168.0.3 + management: + ipaddr: 192.168.0.2 + public: + ipaddr: 10.109.1.2 + vrouter_pub: + ipaddr: 10.109.1.3 +network_scheme: + endpoints: + br-fw-admin: + IP: + - 10.108.0.7/24 + br-mgmt: + IP: + - 192.168.0.1/24 + gateway: 192.168.0.7 + vendor_specific: + phy_interfaces: + - eth0 + vlans: 101 + br-storage: + IP: + - 192.168.1.1/24 + vendor_specific: + phy_interfaces: + - eth0 + vlans: 102 + interfaces: + eth0: + vendor_specific: + bus_info: '0000:00:03.0' + driver: e1000 + eth1: + vendor_specific: + bus_info: '0000:00:04.0' + driver: e1000 + eth2: + vendor_specific: + bus_info: '0000:00:05.0' + driver: e1000 + eth3: + vendor_specific: + bus_info: '0000:00:06.0' + driver: e1000 + eth4: + vendor_specific: + bus_info: '0000:00:07.0' + driver: e1000 + provider: lnx + roles: + ex: br-ex + public/vip: br-ex + neutron/floating: br-floating + storage: br-storage + keystone/api: br-mgmt + neutron/api: br-mgmt + mgmt/database: br-mgmt + sahara/api: br-mgmt + ceilometer/api: br-mgmt + mgmt/vip: br-mgmt + ceph/public: br-mgmt + mgmt/messaging: br-mgmt + management: br-mgmt + swift/api: br-mgmt + mgmt/api: br-mgmt + storage: br-storage + mgmt/corosync: br-mgmt + cinder/api: br-mgmt + swift/replication: br-storage + neutron/mesh: br-mgmt + admin/pxe: br-fw-admin + mongo/db: br-mgmt + neutron/private: br-prv + fw-admin: br-fw-admin + glance/api: br-mgmt + heat/api: br-mgmt + murano/api: br-mgmt + nova/api: br-mgmt + horizon: br-mgmt + mgmt/memcache: br-mgmt + cinder/iscsi: br-storage + ceph/replication: br-storage + neutron/mesh: br-mgmt + transformations: + - action: add-br + name: br-fw-admin + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-port + bridge: br-fw-admin + name: eth0 + - action: add-port + bridge: br-storage + name: eth0.102 + - action: add-port + bridge: br-mgmt + name: eth0.101 + version: '1.1' +neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: disabled + vf_num: '16' +nodes: +- fqdn: node-118.test.domain.local + internal_address: 192.168.0.1 + internal_netmask: 255.255.255.0 + name: node-118 + role: cinder + storage_address: 192.168.1.1 + storage_netmask: 255.255.255.0 + swift_zone: '118' + uid: '118' + user_node_name: Untitled (1d:4b) +- fqdn: node-128.test.domain.local + internal_address: 192.168.0.2 + internal_netmask: 255.255.255.0 + name: node-128 + public_address: 172.16.0.2 + public_netmask: 255.255.255.0 + role: primary-controller + storage_address: 192.168.1.2 + storage_netmask: 255.255.255.0 + swift_zone: '128' + uid: '128' + user_node_name: Untitled (6f:9d) +- fqdn: node-129.test.domain.local + internal_address: 192.168.0.3 + internal_netmask: 255.255.255.0 + name: node-129 + public_address: 172.16.0.3 + public_netmask: 255.255.255.0 + role: controller + storage_address: 192.168.1.3 + storage_netmask: 255.255.255.0 + swift_zone: '129' + uid: '129' + user_node_name: Untitled (74:27) +- fqdn: node-131.test.domain.local + internal_address: 192.168.0.4 + internal_netmask: 255.255.255.0 + name: node-131 + public_address: 172.16.0.4 + public_netmask: 255.255.255.0 + role: controller + storage_address: 192.168.1.4 + storage_netmask: 255.255.255.0 + swift_zone: '131' + uid: '131' + user_node_name: Untitled (34:45) +- fqdn: node-132.test.domain.local + internal_address: 192.168.0.5 + internal_netmask: 255.255.255.0 + name: node-132 + role: compute + storage_address: 192.168.1.5 + storage_netmask: 255.255.255.0 + swift_zone: '132' + uid: '132' + user_node_name: Untitled (18:c9) +nova: + db_password: mqnsUMgC + state_path: /var/lib/nova + user_password: fj4wVCEs +nova_quota: false +online: true +openstack_version: 2014.2-6.1 +openstack_version_prev: null +priority: 300 +provision: + codename: trusty + image_data: + /: + container: gzip + format: ext4 + uri: http://10.108.0.2:8080/targetimages/env_38_ubuntu_1404_amd64.img.gz + /boot: + container: gzip + format: ext2 + uri: http://10.108.0.2:8080/targetimages/env_38_ubuntu_1404_amd64-boot.img.gz + metadata: + label: Provision + weight: 80 + method: image +public_network_assignment: + assign_to_all_nodes: false + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 +neutron_advanced_configuration: + neutron_dvr: true + neutron_l2_pop: true +public_vip: 172.16.0.5 +public_vrouter_vip: 172.16.0.6 +puppet: + manifests: rsync://10.108.0.2:/puppet/2014.2-6.1/manifests/ + modules: rsync://10.108.0.2:/puppet/2014.2-6.1/modules/ +puppet_debug: true +puppet_modules: /vagrant/f2s/fuel-library/deployment/puppet +quantum: true +quantum_settings: + L2: + base_mac: fa:16:3e:00:00:00 + phys_nets: {} + segmentation_type: tun + tunnel_id_ranges: 2:65535 + L3: + use_namespaces: true + database: + passwd: QRpCfPk8 + keystone: + admin_password: oT56DSZF + metadata: + metadata_proxy_shared_secret: fp618p5V + predefined_networks: + net04: + L2: + network_type: gre + physnet: null + router_ext: false + segment_id: null + L3: + enable_dhcp: true + floating: null + gateway: 192.168.111.1 + nameservers: + - 8.8.4.4 + - 8.8.8.8 + subnet: 192.168.111.0/24 + shared: false + tenant: admin + net04_ext: + L2: + network_type: local + physnet: null + router_ext: true + segment_id: null + L3: + enable_dhcp: false + floating: 172.16.0.130:172.16.0.254 + gateway: 172.16.0.1 + nameservers: [] + subnet: 172.16.0.0/24 + shared: false + tenant: admin +rabbit: + password: c7fQJeSe +repo_setup: + installer_initrd: + local: /var/www/nailgun/ubuntu/x86_64/images/initrd.gz + remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz + installer_kernel: + local: /var/www/nailgun/ubuntu/x86_64/images/linux + remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux + metadata: + label: Repositories + weight: 50 + repos: + - name: ubuntu + priority: null + section: main universe multiverse + suite: trusty + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: ubuntu-updates + priority: null + section: main universe multiverse + suite: trusty-updates + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: ubuntu-security + priority: null + section: main universe multiverse + suite: trusty-security + type: deb + uri: http://archive.ubuntu.com/ubuntu/ + - name: mos + priority: 1050 + section: main restricted + suite: mos6.1 + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-updates + priority: 1050 + section: main restricted + suite: mos6.1-updates + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-security + priority: 1050 + section: main restricted + suite: mos6.1-security + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ + - name: mos-holdback + priority: 1100 + section: main restricted + suite: mos6.1-holdback + type: deb + uri: http://mirror.fuel-infra.org/mos/ubuntu/ +resume_guests_state_on_host_boot: true +role: cinder +sahara: + db_password: f0jl4v47 + enabled: true + user_password: pJc2zAOx +status: discover +storage: + ephemeral_ceph: false + images_ceph: false + images_vcenter: false + iser: false + metadata: + label: Storage + weight: 60 + objects_ceph: false + osd_pool_size: '2' + pg_num: 128 + volumes_ceph: false + volumes_lvm: true +storage_network_range: 192.168.1.0/24 +swift: + user_password: BP92J6tg +syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: '514' + syslog_server: '' + syslog_transport: tcp +test_vm_image: + container_format: bare + disk_format: qcow2 + glance_properties: '' + img_name: TestVM + img_path: /usr/share/cirros-testvm/cirros-x86_64-disk.img + min_ram: 64 + os_name: cirros + public: 'true' +uid: '118' +use_cinder: true +use_cow_images: true +use_vcenter: false +user_node_name: Untitled (1d:4b) +workloads_collector: + enabled: true + metadata: + label: Workloads Collector User + restrictions: + - action: hide + condition: 'true' + weight: 10 + password: 1r3ROjcQ + tenant: services + username: workloads_collector diff --git a/f2s/resources/role_data/meta.yaml b/f2s/resources/role_data/meta.yaml index 78d257d4..c12b81ec 100644 --- a/f2s/resources/role_data/meta.yaml +++ b/f2s/resources/role_data/meta.yaml @@ -2,9 +2,13 @@ id: role_data handler: none version: 0.0.1 +manager: manager.py input: # should be auto-generated based on outputs from globals.pp - var1: + puppet_modules: type: str! - value: meta + value: + uid: + type: str! + value: From 4e05ce3adf4e570cc2666e2cf3856e7e794cfd52 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 10 Nov 2015 15:12:38 +0200 Subject: [PATCH 12/51] Allow multiple managers --- f2s/resources/role_data/meta.yaml | 4 +++- resources/ex_managed/meta.yaml | 3 ++- solar/core/resource/resource.py | 14 ++++++++------ solar/dblayer/solar_models.py | 2 +- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/f2s/resources/role_data/meta.yaml b/f2s/resources/role_data/meta.yaml index c12b81ec..b518651e 100644 --- a/f2s/resources/role_data/meta.yaml +++ b/f2s/resources/role_data/meta.yaml @@ -2,7 +2,9 @@ id: role_data handler: none version: 0.0.1 -manager: manager.py +managers: + - managers/from_file.py + - managers/globals.py input: # should be auto-generated based on outputs from globals.pp puppet_modules: diff --git a/resources/ex_managed/meta.yaml b/resources/ex_managed/meta.yaml index 79a4a666..fd280cea 100644 --- a/resources/ex_managed/meta.yaml +++ b/resources/ex_managed/meta.yaml @@ -1,7 +1,8 @@ id: managed handler: none version: 1.0.0 -manager: managers/manager.py +managers: + - managers/manager.py input: val: schema: int! diff --git a/solar/core/resource/resource.py b/solar/core/resource/resource.py index c97f34b3..9804d2ac 100644 --- a/solar/core/resource/resource.py +++ b/solar/core/resource/resource.py @@ -90,7 +90,7 @@ class Resource(object): 'meta_inputs': inputs, 'tags': tags, 'state': RESOURCE_STATE.created.name, - 'manager': metadata.get('manager', '') + 'managers': metadata.get('managers', []) }) self.create_inputs(args) @@ -287,13 +287,15 @@ class Resource(object): self.db_obj.save_lazy() def prefetch(self): - if not self.db_obj.manager: + if not self.db_obj.managers: return - manager_path = os.path.join( - self.db_obj.base_path, self.db_obj.manager) - rst = utils.communicate([manager_path], json.dumps(self.args)) - self.update(json.loads(rst)) + manager_stack = self.db_obj.managers + while manager_stack: + manager = manager_stack.pop(0) + manager_path = os.path.join(self.db_obj.base_path, manager) + rst = utils.communicate([manager_path], json.dumps(self.args)) + self.update(json.loads(rst)) def load(name): r = DBResource.get(name) diff --git a/solar/dblayer/solar_models.py b/solar/dblayer/solar_models.py index 4b809026..c6ef5c0f 100644 --- a/solar/dblayer/solar_models.py +++ b/solar/dblayer/solar_models.py @@ -715,7 +715,7 @@ class Resource(Model): meta_inputs = Field(dict, default=dict) state = Field(str) # on_set/on_get would be useful events = Field(list, default=list) - manager = Field(str) + managers = Field(list, default=list) inputs = InputsField(default=dict) tags = TagsField(default=list) From 51708d755c3b54dcbb352738db4e0369a360e6c3 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 10 Nov 2015 15:31:17 +0200 Subject: [PATCH 13/51] Generated inputs for role data --- f2s/resources/role_data/meta.yaml | 347 +++++++++++++++++++++++++++++- 1 file changed, 340 insertions(+), 7 deletions(-) diff --git a/f2s/resources/role_data/meta.yaml b/f2s/resources/role_data/meta.yaml index b518651e..f65a2e21 100644 --- a/f2s/resources/role_data/meta.yaml +++ b/f2s/resources/role_data/meta.yaml @@ -7,10 +7,343 @@ managers: - managers/globals.py input: # should be auto-generated based on outputs from globals.pp - puppet_modules: - type: str! - value: - uid: - type: str! - value: - + puppet_modules: + type: str! + value: + uid: + type: str! + value: + access: + value: null + access_hash: + value: null + amqp_hosts: + value: null + amqp_port: + value: null + apache_ports: + value: null + auth_key: + value: null + auto_assign_floating_ip: + value: null + base_mac: + value: null + base_syslog: + value: null + base_syslog_hash: + value: null + ceilometer: + value: null + ceilometer_hash: + value: null + ceilometer_nodes: + value: null + ceph_monitor_nodes: + value: null + ceph_primary_monitor_node: + value: null + ceph_rgw_nodes: + value: null + cinder: + value: null + cinder_hash: + value: null + cinder_nodes: + value: null + cinder_rate_limits: + value: null + cobbler: + value: null + corosync: + value: null + corosync_roles: + value: null + custom_mysql_setup_class: + value: null + database_nodes: + value: null + database_vip: + value: null + debug: + value: null + default_gateway: + value: null + deployment_id: + value: null + deployment_mode: + value: null + dns_nameservers: + value: null + external_dns: + value: null + external_mongo: + value: null + external_ntp: + value: null + fail_if_error: + value: null + fqdn: + value: null + fuel_version: + value: null + glance: + value: null + glance_backend: + value: null + glance_hash: + value: null + glance_known_stores: + value: null + heat: + value: null + heat_hash: + value: null + heat_roles: + value: null + horizon_nodes: + value: null + idle_timeout: + value: null + ironic_api_nodes: + value: null + is_primary_swift_proxy: + value: null + kernel_params: + value: null + keystone: + value: null + keystone_hash: + value: null + last_controller: + value: null + libvirt_type: + value: null + manage_volumes: + value: null + management_network_range: + value: null + management_vip: + value: null + management_vrouter_vip: + value: null + master_ip: + value: null + max_overflow: + value: null + max_pool_size: + value: null + max_retries: + value: null + memcache_roles: + value: null + metadata: + value: null + mirror_type: + value: null + mongo: + value: null + mongo_roles: + value: null + mountpoints: + value: null + mp: + value: null + multi_host: + value: null + murano: + value: null + murano_hash: + value: null + murano_roles: + value: null + murano_settings: + value: null + mysql: + value: null + mysql_hash: + value: null + network_config: + value: null + network_manager: + value: null + network_metadata: + value: null + network_scheme: + value: null + network_size: + value: null + neutron_advanced_configuration: + value: null + neutron_config: + value: null + neutron_db_password: + value: null + neutron_mellanox: + value: null + neutron_metadata_proxy_secret: + value: null + neutron_nodes: + value: null + neutron_user_password: + value: null + node: + value: null + node_name: + value: null + node_role: + value: null + nodes: + value: null + nodes_hash: + value: null + nova: + value: null + nova_api_nodes: + value: null + nova_db_password: + value: null + nova_hash: + value: null + nova_quota: + value: null + nova_rate_limits: + value: null + nova_report_interval: + value: null + nova_service_down_time: + value: null + novanetwork_params: + value: null + num_networks: + value: null + online: + value: null + openstack_version: + value: null + openstack_version_prev: + value: null + primary_controller: + value: null + priority: + value: null + private_int: + value: null + provision: + value: null + public_network_assignment: + value: null + public_ssl: + value: null + public_vip: + value: null + public_vrouter_vip: + value: null + puppet: + value: null + puppet_debug: + value: null + puppet_modules: + value: null + quantum: + value: null + quantum_settings: + value: null + queue_provider: + value: null + rabbit: + value: null + rabbit_ha_queues: + value: null + rabbit_hash: + value: null + repo_setup: + value: null + resume_guests_state_on_host_boot: + value: null + role: + value: null + roles: + value: null + sahara: + value: null + sahara_hash: + value: null + sahara_roles: + value: null + service_endpoint: + value: null + sql_connection: + value: null + status: + value: null + storage: + value: null + storage_hash: + value: null + storage_network_range: + value: null + swift: + value: null + swift_hash: + value: null + swift_master_role: + value: null + swift_nodes: + value: null + swift_proxies: + value: null + swift_proxy_caches: + value: null + syslog: + value: null + syslog_hash: + value: null + syslog_log_facility_ceilometer: + value: null + syslog_log_facility_ceph: + value: null + syslog_log_facility_cinder: + value: null + syslog_log_facility_glance: + value: null + syslog_log_facility_heat: + value: null + syslog_log_facility_keystone: + value: null + syslog_log_facility_murano: + value: null + syslog_log_facility_neutron: + value: null + syslog_log_facility_nova: + value: null + syslog_log_facility_sahara: + value: null + test_vm_image: + value: null + use_ceilometer: + value: null + use_cinder: + value: null + use_cow_images: + value: null + use_monit: + value: null + use_neutron: + value: null + use_ovs: + value: null + use_syslog: + value: null + use_vcenter: + value: null + user_node_name: + value: null + vcenter_hash: + value: null + verbose: + value: null + vlan_start: + value: null + workloads_collector: + value: null From 6c90653a1c0295725744498b47a9a3a75b744e8c Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 10 Nov 2015 16:20:04 +0200 Subject: [PATCH 14/51] Add resource for keys generation and vr for copying stuff --- f2s/resources/genkeys/actions/run.sh | 39 ++++++++++++++++++++++++++++ f2s/resources/genkeys/meta.yaml | 20 ++++++++++++++ f2s/vrs/prep.yaml | 23 ++++++++++++++++ 3 files changed, 82 insertions(+) create mode 100644 f2s/resources/genkeys/actions/run.sh create mode 100644 f2s/resources/genkeys/meta.yaml create mode 100644 f2s/vrs/prep.yaml diff --git a/f2s/resources/genkeys/actions/run.sh b/f2s/resources/genkeys/actions/run.sh new file mode 100644 index 00000000..1c0651f3 --- /dev/null +++ b/f2s/resources/genkeys/actions/run.sh @@ -0,0 +1,39 @@ +#!/bin/sh + +cluster_id={{uid}} +open_ssl_keys={{ssl|join(' ')}} +ssh_keys={{ ssh|join(' ') }} +keys_path={{path}} + +BASE_PATH=$keys_path/$cluster_id/ + +function generate_open_ssl_keys { + for i in $open_ssl_keys + do + local dir_path=$BASE_PATH$i/ + local key_path=$dir_path$i.key + mkdir -p $dir_path + if [ ! -f $key_path ]; then + openssl rand -base64 741 > $key_path 2>&1 + else + echo 'Key $key_path already exists' + fi + done +} + +function generate_ssh_keys { + for i in $ssh_keys + do + local dir_path=$BASE_PATH$i/ + local key_path=$dir_path$i + mkdir -p $dir_path + if [ ! -f $key_path ]; then + ssh-keygen -b 2048 -t rsa -N '' -f $key_path 2>&1 + else + echo 'Key $key_path already exists' + fi + done +} + +generate_open_ssl_keys +generate_ssh_keys diff --git a/f2s/resources/genkeys/meta.yaml b/f2s/resources/genkeys/meta.yaml new file mode 100644 index 00000000..a0250344 --- /dev/null +++ b/f2s/resources/genkeys/meta.yaml @@ -0,0 +1,20 @@ +id: genkeys +handler: bash +version: 0.0.1 +inputs: + uid: + schema: str! + value: + path: + schema: str! + value: /var/lib/fuel/keys/ + ssl: + schema: [] + value: + - mongo + ssh: + schema: [] + value: + - neutron + - nova + - mysql diff --git a/f2s/vrs/prep.yaml b/f2s/vrs/prep.yaml new file mode 100644 index 00000000..aae12fd5 --- /dev/null +++ b/f2s/vrs/prep.yaml @@ -0,0 +1,23 @@ +id: prep_tasks +resources: + - id: sources{{index}} + from: resources/sources + location: {{node}} + values: + sources: + - src: /var/lib/fuel/keys/{{uid}}/neutron/neutron.pub + dst: /var/lib/astute/neutron/neutron.pub + - src: /var/lib/fuel/keys/{{uid}}/neutron/neutron + dst: /var/lib/astute/neutron/neutron + - src: /var/lib/fuel/keys/{uid}/nova/nova.pub + dst: /var/lib/astute/nova/nova.pub + - src: /var/lib/fuel/keys/{{uid}}/nova/nova + dst: /var/lib/astute/nova/nova + - src: /var/lib/fuel/keys/{{uid}}/mysql/mysql.pub + dst: /var/lib/astute/mysql/mysql.pub + - src: /var/lib/fuel/keys/{{uid}}/mysql/mysql + dst: /var/lib/astute/mysql/mysql + - src: /var/lib/fuel/keys/{{uid}}/mongodb/mongodb.key + dst: /var/lib/astute/mongodb/mongodb.key + - src: /etc/puppet/modules + dst: /etc/puppet/modules From 180a1cd82b02d23b824d706c44d4c6838eca0dd2 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 10:00:50 +0200 Subject: [PATCH 15/51] Add from_nailgun.py manager --- f2s/resources/role_data/managers/from_nailgun.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 f2s/resources/role_data/managers/from_nailgun.py diff --git a/f2s/resources/role_data/managers/from_nailgun.py b/f2s/resources/role_data/managers/from_nailgun.py new file mode 100644 index 00000000..be2eb526 --- /dev/null +++ b/f2s/resources/role_data/managers/from_nailgun.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python + +import sys +import json + +from fuelclient.objects.environment import Environment + +ARGS = json.loads(sys.stdin.read()) + +env = Environment(ARGS['env']) +facts = env.get_default_facts('deployment', [ARGS['uid']]) + +sys.stdout.write(json.dumps(facts)) From a0379206e9147c3373810c23de26715a40c582b9 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 10:56:30 +0200 Subject: [PATCH 16/51] Add fuel entities and translate solar entities in order --- f2s/f2s.py | 94 ++++++++++++------- f2s/fsclient.py | 15 +++ .../role_data/managers/from_nailgun.py | 0 f2s/vrs/fuel_node.yaml | 18 ++++ 4 files changed, 95 insertions(+), 32 deletions(-) create mode 100755 f2s/fsclient.py mode change 100644 => 100755 f2s/resources/role_data/managers/from_nailgun.py create mode 100644 f2s/vrs/fuel_node.yaml diff --git a/f2s/f2s.py b/f2s/f2s.py index e09d71db..19e1ef23 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -3,6 +3,7 @@ import os from fnmatch import fnmatch import shutil +from collections import OrderedDict import click import yaml @@ -35,6 +36,18 @@ def clean_vr(): shutil.rmtree(VR_TMP_DIR) ensure_dir(VR_TMP_DIR) + +def ordered_dump(data, stream=None, Dumper=yaml.Dumper, **kwds): + class OrderedDumper(Dumper): + pass + def _dict_representer(dumper, data): + return dumper.represent_mapping( + yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, + data.items()) + OrderedDumper.add_representer(OrderedDict, _dict_representer) + return yaml.dump(data, stream, OrderedDumper, **kwds) + + class Task(object): def __init__(self, task_data, task_path): @@ -66,6 +79,11 @@ class Task(object): LIBRARY_PATH, 'deployment', 'puppet', 'osnailyfacter', after_naily) + @property + def manifest_name(self): + name = self.data['parameters']['puppet_manifest'].split('/')[-1] + return name.split('.')[0] + @property def dst_path(self): return os.path.join(RESOURCE_TMP_WORKDIR, self.name) @@ -79,11 +97,11 @@ class Task(object): return os.path.join(self.dst_path, 'meta.yaml') def meta(self): - data = {'id': self.name, - 'handler': 'puppetv2', - 'version': '8.0', - 'inputs': self.inputs()} - return yaml.safe_dump(data, default_flow_style=False) + data = OrderedDict([('id', self.name), + ('handler', 'puppetv2'), + ('version', '8.0'), + ('inputs', self.inputs())]) + return ordered_dump(data, default_flow_style=False) @property def actions(self): @@ -102,8 +120,9 @@ class Task(object): hiera.rb File.open("/tmp/fuel_specs/#{ENV['SPEC']}", 'a') { |f| f << "- #{key}\n" } """ + print self.manifest_name lookup_stack_path = os.path.join( - INPUTS_LOCATION, self.name+"_spec.rb'") + INPUTS_LOCATION, self.manifest_name+"_spec.rb'") if not os.path.exists(lookup_stack_path): return {} @@ -115,7 +134,7 @@ class Task(object): class RoleData(Task): - name = 'globals' + name = 'role_data' def meta(self): data = {'id': self.name, @@ -132,40 +151,57 @@ class RoleData(Task): class DGroup(object): + filtered = ['globals', 'hiera', 'deploy_start'] + def __init__(self, name, tasks): self.name = name self.tasks = tasks def resources(self): + for t, _, _ in self.tasks: - yield {'id': t.name+"{{index}}", - 'from': 'f2s/resources/'+t.name, - 'location': "{{node}}", - 'values_from': RoleData.name+"{{index}}"} + if t.name in self.filtered: + continue + + yield OrderedDict( + [('id', t.name+"{{index}}"), + ('from', 'f2s/resources/'+t.name), + ('location', "{{node}}"), + ('values_from', RoleData.name+"{{index}}")]) def events(self): for t, inner, outer in self.tasks: + if t.name in self.filtered: + continue + for dep in set(inner): - yield { - 'type': 'depends_on', - 'state': 'success', - 'parent_action': dep + '{{index}}.run', - 'child_action': t.name + '{{index}}.run'} + if dep in self.filtered: + continue + + yield OrderedDict([ + ('type', 'depends_on'), + ('state', 'success'), + ('parent_action', dep + '{{index}}.run'), + ('child_action', t.name + '{{index}}.run')]) for dep in set(outer): - yield { - 'type': 'depends_on', - 'state': 'success', - 'parent': { + if dep in self.filtered: + continue + + yield OrderedDict([ + ('type', 'depends_on'), + ('state', 'success'), + ('parent', { 'with_tags': ['resource=' + dep], - 'action': 'run'}, - 'depend_action': t.name + '{{index}}.run'} + 'action': 'run'}), + ('depend_action', t.name + '{{index}}.run')]) def meta(self): - data = {'id': self.name, - 'resources': list(self.resources()), - 'events': list(self.events())} - return yaml.safe_dump(data, default_flow_style=False) + data = OrderedDict([ + ('id', self.name), + ('resources', list(self.resources())), + ('events', list(self.events()))]) + return ordered_dump(data, default_flow_style=False) @property def path(self): @@ -242,11 +278,6 @@ def t2r(tasks, t, p, c): preview(task) else: create(task) - # role_data = RoleData() - # if p: - # preview(role_data) - # else: - # create(role_data) @main.command(help='convert groups into templates') @@ -282,6 +313,5 @@ def g2vr(groups, c): f.write(obj.meta()) # based on inner/outer aggregation configure joins in events - if __name__ == '__main__': main() diff --git a/f2s/fsclient.py b/f2s/fsclient.py new file mode 100755 index 00000000..da1a83f0 --- /dev/null +++ b/f2s/fsclient.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python + + +import click + +@click.group() +def main(): + pass + +@click.command() +@click.parameter('nodes', nargs=-1) +def nodes(nodes): + from fuelclient.objects.node import Node + nodes_obj = map(Node, nodes) + diff --git a/f2s/resources/role_data/managers/from_nailgun.py b/f2s/resources/role_data/managers/from_nailgun.py old mode 100644 new mode 100755 diff --git a/f2s/vrs/fuel_node.yaml b/f2s/vrs/fuel_node.yaml new file mode 100644 index 00000000..7ac745fa --- /dev/null +++ b/f2s/vrs/fuel_node.yaml @@ -0,0 +1,18 @@ +id: fuel_node +resources: + - id: ssh_transport{{index}} + from: resources/transport_ssh + values: + ssh_user: 'root' + - id: transports{{index}} + from: resources/transports + values: + transports:user: ssh_transport{{index}}::ssh_user + transports:port: ssh_transport{{index}}::ssh_port + transports:name: ssh_transport{{index}}::name + - id: node{{index}} + from: resources/ro_node + values: + name: node{{index}} + ip: {{ip}} + transports_id: transports{{j}}::transports_id From 975258374bedce5634a74a22c2837fc8de8eb798 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 11:35:51 +0200 Subject: [PATCH 17/51] Fix patches for noop tests and use new path to spec --- f2s/f2s.py | 10 ++++++---- f2s/patches/noop_tests.patch | 5 +++-- f2s/resources/role_data/meta.yaml | 3 +++ 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/f2s/f2s.py b/f2s/f2s.py index 19e1ef23..8141512d 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -80,9 +80,11 @@ class Task(object): after_naily) @property - def manifest_name(self): - name = self.data['parameters']['puppet_manifest'].split('/')[-1] - return name.split('.')[0] + def spec_name(self): + splitted = self.data['parameters']['puppet_manifest'].split('/') + directory = splitted[-2] + name = splitted[-1].split('.')[0] + return "{}_{}_spec.rb'".format(directory, name) @property def dst_path(self): @@ -122,7 +124,7 @@ class Task(object): """ print self.manifest_name lookup_stack_path = os.path.join( - INPUTS_LOCATION, self.manifest_name+"_spec.rb'") + INPUTS_LOCATION, self.spec_name) if not os.path.exists(lookup_stack_path): return {} diff --git a/f2s/patches/noop_tests.patch b/f2s/patches/noop_tests.patch index c475ac22..83d6ed86 100644 --- a/f2s/patches/noop_tests.patch +++ b/f2s/patches/noop_tests.patch @@ -4,8 +4,9 @@ # @return [Array] success and empty report array def self.rspec(spec) inside_noop_tests_directory do -+ identity = spec.split('/')[-1] -+ ENV["SPEC"] = identity ++ splitted = spec.split('/') ++ dir, name = splitted[-2], splitted[-1] ++ ENV["SPEC"] = "#{dir}_#{name}" command = "rspec #{RSPEC_OPTIONS} #{spec}" - command = 'bundle exec ' + command if options[:bundle] + command = "bundle exec " + command if options[:bundle] diff --git a/f2s/resources/role_data/meta.yaml b/f2s/resources/role_data/meta.yaml index f65a2e21..06b4a75b 100644 --- a/f2s/resources/role_data/meta.yaml +++ b/f2s/resources/role_data/meta.yaml @@ -13,6 +13,9 @@ input: uid: type: str! value: + env: + type: str! + value: access: value: null access_hash: From eb64d35afa1e256e60d6c852ec4b82395e287f87 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 12:13:13 +0200 Subject: [PATCH 18/51] Add mos repos and vr for genkeys --- f2s/f2s.py | 7 ++++++- f2s/vrs/genkeys.yaml | 5 +++++ f2s/vrs/prep.yaml | 13 +++++++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 f2s/vrs/genkeys.yaml diff --git a/f2s/f2s.py b/f2s/f2s.py index 8141512d..21486d85 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -122,7 +122,7 @@ class Task(object): hiera.rb File.open("/tmp/fuel_specs/#{ENV['SPEC']}", 'a') { |f| f << "- #{key}\n" } """ - print self.manifest_name + print self.spec_name lookup_stack_path = os.path.join( INPUTS_LOCATION, self.spec_name) if not os.path.exists(lookup_stack_path): @@ -161,6 +161,11 @@ class DGroup(object): def resources(self): + yield OrderedDict( + [('id', RoleData.name+"{{index}}"), + ('from', 'f2s/resources/'+RoleData.name), + ('location', "{{node}}")]) + for t, _, _ in self.tasks: if t.name in self.filtered: continue diff --git a/f2s/vrs/genkeys.yaml b/f2s/vrs/genkeys.yaml new file mode 100644 index 00000000..c62517b1 --- /dev/null +++ b/f2s/vrs/genkeys.yaml @@ -0,0 +1,5 @@ +id: genkeys +resources: + - id: genkeys{{index}} + from: f2s/resources/genkeys + location: {{node}} diff --git a/f2s/vrs/prep.yaml b/f2s/vrs/prep.yaml index aae12fd5..4c10bd8a 100644 --- a/f2s/vrs/prep.yaml +++ b/f2s/vrs/prep.yaml @@ -21,3 +21,16 @@ resources: dst: /var/lib/astute/mongodb/mongodb.key - src: /etc/puppet/modules dst: /etc/puppet/modules + - id: mos_repos{{index}} + from: templates/mos_repos.yaml + values: + node: {{node}} + index: {{index}} +events: + - type: depends_on + state: success + parent: + action: run + with_tags: + - resource=genkeys + depend_action: sources{{index}}.run From 62ec081cb4a5bf44ce76cccb6edaaa306c2583dd Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 12:36:00 +0200 Subject: [PATCH 19/51] Add role data into events and events from prep.yaml to role_data --- f2s/f2s.py | 7 +++++++ f2s/vrs/prep.yaml | 8 ++++++++ 2 files changed, 15 insertions(+) diff --git a/f2s/f2s.py b/f2s/f2s.py index 21486d85..2f4b3fd8 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -130,6 +130,7 @@ class Task(object): with open(lookup_stack_path) as f: data = yaml.safe_load(f) or [] + data = data + ['puppet_modules'] return {key: {'value': None} for key in set(data) if '::' not in key} @@ -182,6 +183,12 @@ class DGroup(object): if t.name in self.filtered: continue + yield OrderedDict([ + ('type', 'depends_on'), + ('state', 'success'), + ('parent_action', RoleData.name + '{{index}}.run'), + ('child_action', t.name + '{{index}}.run')]) + for dep in set(inner): if dep in self.filtered: continue diff --git a/f2s/vrs/prep.yaml b/f2s/vrs/prep.yaml index 4c10bd8a..7c6c48aa 100644 --- a/f2s/vrs/prep.yaml +++ b/f2s/vrs/prep.yaml @@ -34,3 +34,11 @@ events: with_tags: - resource=genkeys depend_action: sources{{index}}.run + - type: depends_on + state: success + parent_action: sources{{index}}.run + child_action: role_data{{index}}.run + - type: depends_on + state: success + parent_action: managed_apt_{{index}}.run + child_action: role_data{{index}}.run From 2019c35843a024afdb1d182f02cd18e2c3bd50e5 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 13:44:42 +0200 Subject: [PATCH 20/51] Implement f2s/fsclient.py which will bootstrap solar stuff from fuel --- f2s/f2s.py | 4 +- f2s/fsclient.py | 67 ++++++++++++++++++++++++++++--- f2s/resources/role_data/meta.yaml | 2 +- f2s/vrs/fuel_node.yaml | 2 +- f2s/vrs/prep.yaml | 14 +++---- templates/mos_repos.yaml | 12 +++--- 6 files changed, 80 insertions(+), 21 deletions(-) diff --git a/f2s/f2s.py b/f2s/f2s.py index 2f4b3fd8..007e7867 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -165,7 +165,9 @@ class DGroup(object): yield OrderedDict( [('id', RoleData.name+"{{index}}"), ('from', 'f2s/resources/'+RoleData.name), - ('location', "{{node}}")]) + ('location', "{{node}}"), + ('values', {'uid': '{{index}}', + 'env': '{{env}}'})]) for t, _, _ in self.tasks: if t.name in self.filtered: diff --git a/f2s/fsclient.py b/f2s/fsclient.py index da1a83f0..a9a65760 100755 --- a/f2s/fsclient.py +++ b/f2s/fsclient.py @@ -2,14 +2,71 @@ import click +from solar.core.resource import virtual_resource as vr + + @click.group() def main(): pass -@click.command() -@click.parameter('nodes', nargs=-1) -def nodes(nodes): - from fuelclient.objects.node import Node - nodes_obj = map(Node, nodes) +class NailgunSource(object): + def nodes(self, uids): + from fuelclient.objects.node import Node + nodes_obj = map(Node, nodes) + return [] + + def roles(self, roles): + return [] + +class DumbSource(object): + + def nodes(self, uids): + ip_mask = '10.0.0.%' + return [(uid, ip_mask % uid, 1) for uid in uids] + + def roles(self, uid): + return 'primary-controller' + + def master(self): + return 'master', '10.0.1.1' + +source = DumbSource() + +@click.command() +@click.parameter('uids', nargs=-1) +def nodes(uids): + master = source.master() + vr.create('master', 'f2s/vrs/fuel_node', + {'index': master[0], 'ip': master[1]}) + for uid, ip, env in source.nodes(uids): + vr.create('fuel_node', 'f2s/vrs/fuel_node', + {'index': uid, 'ip': ip}) + +@click.command() +@click.parameter('uids', nargs=-1) +def basic(uids): + master_index = source.master()[0] + other = nodes_data[1:] + + vr.create('genkeys', 'f2s/vrs/genkeys', { + 'node': 'node'+master_index, + 'index': master_index}) + for uid, ip, env in source.nodes(uids): + vr.create('prep', 'f2s/vrs/prep', + {'index': uid, 'env': env, 'node': 'node'+uid}) + + +@click.command() +@click.parameter('uids', nargs=-1) +def roles(uids): + + for uid, ip, env in source.nodes(uids): + role = source.roles(uid) + vr.create(role, 'f2s/vrs/'+role, + {'index': uid, 'env': 'env', 'node': 'node'+uid}) + + +if __name__ == '__main__': + main() diff --git a/f2s/resources/role_data/meta.yaml b/f2s/resources/role_data/meta.yaml index 06b4a75b..992dfa7b 100644 --- a/f2s/resources/role_data/meta.yaml +++ b/f2s/resources/role_data/meta.yaml @@ -9,7 +9,7 @@ input: # should be auto-generated based on outputs from globals.pp puppet_modules: type: str! - value: + value: /etc/puppet/modules uid: type: str! value: diff --git a/f2s/vrs/fuel_node.yaml b/f2s/vrs/fuel_node.yaml index 7ac745fa..1f54c360 100644 --- a/f2s/vrs/fuel_node.yaml +++ b/f2s/vrs/fuel_node.yaml @@ -15,4 +15,4 @@ resources: values: name: node{{index}} ip: {{ip}} - transports_id: transports{{j}}::transports_id + transports_id: transports{{index}}::transports_id diff --git a/f2s/vrs/prep.yaml b/f2s/vrs/prep.yaml index 7c6c48aa..abb8d3ea 100644 --- a/f2s/vrs/prep.yaml +++ b/f2s/vrs/prep.yaml @@ -5,19 +5,19 @@ resources: location: {{node}} values: sources: - - src: /var/lib/fuel/keys/{{uid}}/neutron/neutron.pub + - src: /var/lib/fuel/keys/{{env}}/neutron/neutron.pub dst: /var/lib/astute/neutron/neutron.pub - - src: /var/lib/fuel/keys/{{uid}}/neutron/neutron + - src: /var/lib/fuel/keys/{{env}}/neutron/neutron dst: /var/lib/astute/neutron/neutron - - src: /var/lib/fuel/keys/{uid}/nova/nova.pub + - src: /var/lib/fuel/keys/{{env}}/nova/nova.pub dst: /var/lib/astute/nova/nova.pub - - src: /var/lib/fuel/keys/{{uid}}/nova/nova + - src: /var/lib/fuel/keys/{{env}}/nova/nova dst: /var/lib/astute/nova/nova - - src: /var/lib/fuel/keys/{{uid}}/mysql/mysql.pub + - src: /var/lib/fuel/keys/{{env}}/mysql/mysql.pub dst: /var/lib/astute/mysql/mysql.pub - - src: /var/lib/fuel/keys/{{uid}}/mysql/mysql + - src: /var/lib/fuel/keys/{{env}}/mysql/mysql dst: /var/lib/astute/mysql/mysql - - src: /var/lib/fuel/keys/{{uid}}/mongodb/mongodb.key + - src: /var/lib/fuel/keys/{{env}}/mongodb/mongodb.key dst: /var/lib/astute/mongodb/mongodb.key - src: /etc/puppet/modules dst: /etc/puppet/modules diff --git a/templates/mos_repos.yaml b/templates/mos_repos.yaml index dddf431e..e08e0b8c 100644 --- a/templates/mos_repos.yaml +++ b/templates/mos_repos.yaml @@ -6,8 +6,8 @@ resources: values: name: mos-holdback package: '*' - repo: deb http://mirror.fuel-infra.org/mos-repos/ubuntu/7.0/ mos7.0-holdback main restricted - pin: release o=Mirantis,n=mos7.0,a=mos7.0-holdback,l=mos7.0 + repo: deb http://mirror.fuel-infra.org/mos-repos/ubuntu/8.0/ mos8.0-holdback main restricted + pin: release o=Mirantis,n=mos8.0,a=mos8.0-holdback,l=mos8.0 pin_priority: 1100 - id: mos_security_{{index}} from: resources/apt_repo @@ -15,8 +15,8 @@ resources: values: name: mos package: '*' - repo: deb http://mirror.fuel-infra.org/mos-repos/ubuntu/7.0/ mos7.0-security main restricted - pin: release o=Mirantis,n=mos7.0,a=mos7.0-security,l=mos7.0 + repo: deb http://mirror.fuel-infra.org/mos-repos/ubuntu/8.0/ mos8.0-security main restricted + pin: release o=Mirantis,n=mos8.0,a=mos8.0-security,l=mos8.0 pin_priority: 1050 - id: mos_updates_{{index}} from: resources/apt_repo @@ -24,8 +24,8 @@ resources: values: name: mos_update package: '*' - repo: deb http://mirror.fuel-infra.org/mos-repos/ubuntu/7.0/ mos7.0-updates main restricted - pin: release o=Mirantis,a=mos7.0-updates,l=mos7.0,n=mos7.0 + repo: deb http://mirror.fuel-infra.org/mos-repos/ubuntu/8.0/ mos8.0-updates main restricted + pin: release o=Mirantis,a=mos8.0-updates,l=mos8.0,n=mos8.0 pin_priority: 1050 - id: managed_apt_{{index}} from: resources/managed_apt From 9cf2b89e4168ec1864af98a6912dacf16449260f Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 14:07:37 +0200 Subject: [PATCH 21/51] Adjust docker to use f2s and small fixes --- Dockerfile | 5 +++++ docker-compose.yml | 2 +- f2s/.gitignore | 2 +- f2s/fsclient.py | 4 ++-- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0afd4735..1202cd57 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,12 +11,17 @@ ADD bootstrap/playbooks/celery.yaml /celery.yaml ADD resources /resources ADD templates /templates ADD run.sh /run.sh +ADD f2s /f2s RUN apt-get install -y libffi-dev libssl-dev + RUN pip install https://github.com/Mirantis/solar/archive/master.zip RUN pip install https://github.com/Mirantis/solar-agent/archive/master.zip RUN ansible-playbook -v -i "localhost," -c local /celery.yaml --tags install +RUN pip install riak peewee +RUN pip install -U setuptools>=17.1 +RUN pip install -U python-fuelclient CMD ["/run.sh"] diff --git a/docker-compose.yml b/docker-compose.yml index 947e083f..0114e194 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,4 @@ -solar-celery: +solar: image: solarproject/solar-celery # path inside of the container should be exactly the same as outside # because solar uses absolute path to find resoruce actions files diff --git a/f2s/.gitignore b/f2s/.gitignore index 5f4a0636..65923bcf 100644 --- a/f2s/.gitignore +++ b/f2s/.gitignore @@ -1,2 +1,2 @@ fuel-library -tmp/ +tmp diff --git a/f2s/fsclient.py b/f2s/fsclient.py index a9a65760..e9422a8e 100755 --- a/f2s/fsclient.py +++ b/f2s/fsclient.py @@ -30,7 +30,7 @@ class DumbSource(object): return 'primary-controller' def master(self): - return 'master', '10.0.1.1' + return 'master', '0.0.0.0' source = DumbSource() @@ -65,7 +65,7 @@ def roles(uids): for uid, ip, env in source.nodes(uids): role = source.roles(uid) vr.create(role, 'f2s/vrs/'+role, - {'index': uid, 'env': 'env', 'node': 'node'+uid}) + {'index': uid, 'env': env, 'node': 'node'+uid}) if __name__ == '__main__': From ba1c7202852cad2ec90c2655f8b46ce345e23ebd Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 14:58:26 +0200 Subject: [PATCH 22/51] Fix inconsistencies in clients --- f2s/f2s.py | 7 +++++-- f2s/fsclient.py | 28 +++++++++++++++------------- f2s/vrs/prep.yaml | 4 ++-- solar/dblayer/solar_models.py | 2 +- 4 files changed, 23 insertions(+), 18 deletions(-) diff --git a/f2s/f2s.py b/f2s/f2s.py index 007e7867..331af492 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -189,7 +189,7 @@ class DGroup(object): ('type', 'depends_on'), ('state', 'success'), ('parent_action', RoleData.name + '{{index}}.run'), - ('child_action', t.name + '{{index}}.run')]) + ('depend_action', t.name + '{{index}}.run')]) for dep in set(inner): if dep in self.filtered: @@ -199,7 +199,7 @@ class DGroup(object): ('type', 'depends_on'), ('state', 'success'), ('parent_action', dep + '{{index}}.run'), - ('child_action', t.name + '{{index}}.run')]) + ('depend_action', t.name + '{{index}}.run')]) for dep in set(outer): if dep in self.filtered: continue @@ -316,6 +316,9 @@ def g2vr(groups, c): inner_preds = [] outer_preds = [] for p in dg.predecessors(t): + if dg.node[p]['t'].type != 'puppet': + continue + if p in dsub: inner_preds.append(p) else: diff --git a/f2s/fsclient.py b/f2s/fsclient.py index e9422a8e..a59658be 100755 --- a/f2s/fsclient.py +++ b/f2s/fsclient.py @@ -20,10 +20,13 @@ class NailgunSource(object): def roles(self, roles): return [] + def master(self): + return 'master', '' + class DumbSource(object): def nodes(self, uids): - ip_mask = '10.0.0.%' + ip_mask = '10.0.0.%s' return [(uid, ip_mask % uid, 1) for uid in uids] def roles(self, uid): @@ -34,37 +37,36 @@ class DumbSource(object): source = DumbSource() -@click.command() -@click.parameter('uids', nargs=-1) +@main.command() +@click.argument('uids', nargs=-1) def nodes(uids): master = source.master() - vr.create('master', 'f2s/vrs/fuel_node', + vr.create('master', 'f2s/vrs/fuel_node.yaml', {'index': master[0], 'ip': master[1]}) for uid, ip, env in source.nodes(uids): - vr.create('fuel_node', 'f2s/vrs/fuel_node', + vr.create('fuel_node', 'f2s/vrs/fuel_node.yaml', {'index': uid, 'ip': ip}) -@click.command() -@click.parameter('uids', nargs=-1) +@main.command() +@click.argument('uids', nargs=-1) def basic(uids): master_index = source.master()[0] - other = nodes_data[1:] - vr.create('genkeys', 'f2s/vrs/genkeys', { + vr.create('genkeys', 'f2s/vrs/genkeys.yaml', { 'node': 'node'+master_index, 'index': master_index}) for uid, ip, env in source.nodes(uids): - vr.create('prep', 'f2s/vrs/prep', + vr.create('prep', 'f2s/vrs/prep.yaml', {'index': uid, 'env': env, 'node': 'node'+uid}) -@click.command() -@click.parameter('uids', nargs=-1) +@main.command() +@click.argument('uids', nargs=-1) def roles(uids): for uid, ip, env in source.nodes(uids): role = source.roles(uid) - vr.create(role, 'f2s/vrs/'+role, + vr.create(role, 'f2s/vrs/'+role +'.yml', {'index': uid, 'env': env, 'node': 'node'+uid}) diff --git a/f2s/vrs/prep.yaml b/f2s/vrs/prep.yaml index abb8d3ea..09614ea4 100644 --- a/f2s/vrs/prep.yaml +++ b/f2s/vrs/prep.yaml @@ -37,8 +37,8 @@ events: - type: depends_on state: success parent_action: sources{{index}}.run - child_action: role_data{{index}}.run + depend_action: role_data{{index}}.run - type: depends_on state: success parent_action: managed_apt_{{index}}.run - child_action: role_data{{index}}.run + depend_action: role_data{{index}}.run diff --git a/solar/dblayer/solar_models.py b/solar/dblayer/solar_models.py index c6ef5c0f..724c940b 100644 --- a/solar/dblayer/solar_models.py +++ b/solar/dblayer/solar_models.py @@ -53,7 +53,7 @@ class InputsFieldWrp(IndexFieldWrp): # XXX: it could be worth to precalculate it if ':' in name: name = name.split(":", 1)[0] - schema = resource.meta_inputs[name]['schema'] + schema = resource.meta_inputs[name].get('schema', None) if isinstance(schema, self._simple_types): return InputTypes.simple if isinstance(schema, list): From 88ddec81d04dea577162639f783d5846da3af365 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 15:09:50 +0200 Subject: [PATCH 23/51] Save generated vrs based on existing roles --- f2s/vrs/base-os.yml | 17 + f2s/vrs/ceph-osd.yml | 141 ++++ f2s/vrs/cinder-vmware.yml | 181 +++++ f2s/vrs/cinder.yml | 141 ++++ f2s/vrs/compute-vmware.yml | 145 ++++ f2s/vrs/compute.yml | 229 ++++++ f2s/vrs/controller.yml | 1066 ++++++++++++++++++++++++++ f2s/vrs/ironic.yml | 159 ++++ f2s/vrs/mongo.yml | 141 ++++ f2s/vrs/primary-controller.yml | 1273 ++++++++++++++++++++++++++++++++ f2s/vrs/primary-mongo.yml | 141 ++++ f2s/vrs/virt.yml | 99 +++ 12 files changed, 3733 insertions(+) create mode 100644 f2s/vrs/base-os.yml create mode 100644 f2s/vrs/ceph-osd.yml create mode 100644 f2s/vrs/cinder-vmware.yml create mode 100644 f2s/vrs/cinder.yml create mode 100644 f2s/vrs/compute-vmware.yml create mode 100644 f2s/vrs/compute.yml create mode 100644 f2s/vrs/controller.yml create mode 100644 f2s/vrs/ironic.yml create mode 100644 f2s/vrs/mongo.yml create mode 100644 f2s/vrs/primary-controller.yml create mode 100644 f2s/vrs/primary-mongo.yml create mode 100644 f2s/vrs/virt.yml diff --git a/f2s/vrs/base-os.yml b/f2s/vrs/base-os.yml new file mode 100644 index 00000000..329c85e2 --- /dev/null +++ b/f2s/vrs/base-os.yml @@ -0,0 +1,17 @@ +id: base-os +resources: +- id: role_data{{index}} + from: f2s/resources/role_data + location: '{{node}}' + values: + env: '{{env}}' + uid: '{{index}}' +- id: logging{{index}} + from: f2s/resources/logging + location: '{{node}}' + values_from: role_data{{index}} +events: +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: logging{{index}}.run diff --git a/f2s/vrs/ceph-osd.yml b/f2s/vrs/ceph-osd.yml new file mode 100644 index 00000000..80b3c0f2 --- /dev/null +++ b/f2s/vrs/ceph-osd.yml @@ -0,0 +1,141 @@ +id: ceph-osd +resources: +- id: role_data{{index}} + from: f2s/resources/role_data + location: '{{node}}' + values: + env: '{{env}}' + uid: '{{index}}' +- id: fuel_pkgs{{index}} + from: f2s/resources/fuel_pkgs + location: '{{node}}' + values_from: role_data{{index}} +- id: logging{{index}} + from: f2s/resources/logging + location: '{{node}}' + values_from: role_data{{index}} +- id: tools{{index}} + from: f2s/resources/tools + location: '{{node}}' + values_from: role_data{{index}} +- id: netconfig{{index}} + from: f2s/resources/netconfig + location: '{{node}}' + values_from: role_data{{index}} +- id: connectivity_tests{{index}} + from: f2s/resources/connectivity_tests + location: '{{node}}' + values_from: role_data{{index}} +- id: firewall{{index}} + from: f2s/resources/firewall + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-keys-saving{{index}} + from: f2s/resources/ssl-keys-saving + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-add-trust-chain{{index}} + from: f2s/resources/ssl-add-trust-chain + location: '{{node}}' + values_from: role_data{{index}} +- id: hosts{{index}} + from: f2s/resources/hosts + location: '{{node}}' + values_from: role_data{{index}} +- id: top-role-ceph-osd{{index}} + from: f2s/resources/top-role-ceph-osd + location: '{{node}}' + values_from: role_data{{index}} +events: +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: fuel_pkgs{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: logging{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: logging{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: ssl-keys-saving{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: ssl-add-trust-chain{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: top-role-ceph-osd{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: top-role-ceph-osd{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: top-role-ceph-osd{{index}}.run diff --git a/f2s/vrs/cinder-vmware.yml b/f2s/vrs/cinder-vmware.yml new file mode 100644 index 00000000..67a6fd07 --- /dev/null +++ b/f2s/vrs/cinder-vmware.yml @@ -0,0 +1,181 @@ +id: cinder-vmware +resources: +- id: role_data{{index}} + from: f2s/resources/role_data + location: '{{node}}' + values: + env: '{{env}}' + uid: '{{index}}' +- id: fuel_pkgs{{index}} + from: f2s/resources/fuel_pkgs + location: '{{node}}' + values_from: role_data{{index}} +- id: logging{{index}} + from: f2s/resources/logging + location: '{{node}}' + values_from: role_data{{index}} +- id: tools{{index}} + from: f2s/resources/tools + location: '{{node}}' + values_from: role_data{{index}} +- id: netconfig{{index}} + from: f2s/resources/netconfig + location: '{{node}}' + values_from: role_data{{index}} +- id: connectivity_tests{{index}} + from: f2s/resources/connectivity_tests + location: '{{node}}' + values_from: role_data{{index}} +- id: firewall{{index}} + from: f2s/resources/firewall + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-keys-saving{{index}} + from: f2s/resources/ssl-keys-saving + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-add-trust-chain{{index}} + from: f2s/resources/ssl-add-trust-chain + location: '{{node}}' + values_from: role_data{{index}} +- id: hosts{{index}} + from: f2s/resources/hosts + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-cinder{{index}} + from: f2s/resources/openstack-cinder + location: '{{node}}' + values_from: role_data{{index}} +- id: top-role-cinder-vmware{{index}} + from: f2s/resources/top-role-cinder-vmware + location: '{{node}}' + values_from: role_data{{index}} +events: +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: fuel_pkgs{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: logging{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: logging{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: ssl-keys-saving{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: ssl-add-trust-chain{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=cinder-keystone + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=keystone + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=rabbitmq + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=cinder-db + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: top-role-cinder-vmware{{index}}.run +- type: depends_on + state: success + parent_action: openstack-cinder{{index}}.run + depend_action: top-role-cinder-vmware{{index}}.run diff --git a/f2s/vrs/cinder.yml b/f2s/vrs/cinder.yml new file mode 100644 index 00000000..b89d78bb --- /dev/null +++ b/f2s/vrs/cinder.yml @@ -0,0 +1,141 @@ +id: cinder +resources: +- id: role_data{{index}} + from: f2s/resources/role_data + location: '{{node}}' + values: + env: '{{env}}' + uid: '{{index}}' +- id: fuel_pkgs{{index}} + from: f2s/resources/fuel_pkgs + location: '{{node}}' + values_from: role_data{{index}} +- id: logging{{index}} + from: f2s/resources/logging + location: '{{node}}' + values_from: role_data{{index}} +- id: tools{{index}} + from: f2s/resources/tools + location: '{{node}}' + values_from: role_data{{index}} +- id: netconfig{{index}} + from: f2s/resources/netconfig + location: '{{node}}' + values_from: role_data{{index}} +- id: connectivity_tests{{index}} + from: f2s/resources/connectivity_tests + location: '{{node}}' + values_from: role_data{{index}} +- id: firewall{{index}} + from: f2s/resources/firewall + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-keys-saving{{index}} + from: f2s/resources/ssl-keys-saving + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-add-trust-chain{{index}} + from: f2s/resources/ssl-add-trust-chain + location: '{{node}}' + values_from: role_data{{index}} +- id: hosts{{index}} + from: f2s/resources/hosts + location: '{{node}}' + values_from: role_data{{index}} +- id: top-role-cinder{{index}} + from: f2s/resources/top-role-cinder + location: '{{node}}' + values_from: role_data{{index}} +events: +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: fuel_pkgs{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: logging{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: logging{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: ssl-keys-saving{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: ssl-add-trust-chain{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: top-role-cinder{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: top-role-cinder{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: top-role-cinder{{index}}.run diff --git a/f2s/vrs/compute-vmware.yml b/f2s/vrs/compute-vmware.yml new file mode 100644 index 00000000..7a163e76 --- /dev/null +++ b/f2s/vrs/compute-vmware.yml @@ -0,0 +1,145 @@ +id: compute-vmware +resources: +- id: role_data{{index}} + from: f2s/resources/role_data + location: '{{node}}' + values: + env: '{{env}}' + uid: '{{index}}' +- id: logging{{index}} + from: f2s/resources/logging + location: '{{node}}' + values_from: role_data{{index}} +- id: tools{{index}} + from: f2s/resources/tools + location: '{{node}}' + values_from: role_data{{index}} +- id: netconfig{{index}} + from: f2s/resources/netconfig + location: '{{node}}' + values_from: role_data{{index}} +- id: connectivity_tests{{index}} + from: f2s/resources/connectivity_tests + location: '{{node}}' + values_from: role_data{{index}} +- id: firewall{{index}} + from: f2s/resources/firewall + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-keys-saving{{index}} + from: f2s/resources/ssl-keys-saving + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-add-trust-chain{{index}} + from: f2s/resources/ssl-add-trust-chain + location: '{{node}}' + values_from: role_data{{index}} +- id: hosts{{index}} + from: f2s/resources/hosts + location: '{{node}}' + values_from: role_data{{index}} +- id: top-role-compute{{index}} + from: f2s/resources/top-role-compute + location: '{{node}}' + values_from: role_data{{index}} +- id: top-role-compute-vmware{{index}} + from: f2s/resources/top-role-compute-vmware + location: '{{node}}' + values_from: role_data{{index}} +events: +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: logging{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: logging{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: ssl-keys-saving{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: ssl-add-trust-chain{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: top-role-compute{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: top-role-compute{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: top-role-compute{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: top-role-compute-vmware{{index}}.run +- type: depends_on + state: success + parent_action: top-role-compute{{index}}.run + depend_action: top-role-compute-vmware{{index}}.run diff --git a/f2s/vrs/compute.yml b/f2s/vrs/compute.yml new file mode 100644 index 00000000..3b4244fa --- /dev/null +++ b/f2s/vrs/compute.yml @@ -0,0 +1,229 @@ +id: compute +resources: +- id: role_data{{index}} + from: f2s/resources/role_data + location: '{{node}}' + values: + env: '{{env}}' + uid: '{{index}}' +- id: fuel_pkgs{{index}} + from: f2s/resources/fuel_pkgs + location: '{{node}}' + values_from: role_data{{index}} +- id: logging{{index}} + from: f2s/resources/logging + location: '{{node}}' + values_from: role_data{{index}} +- id: tools{{index}} + from: f2s/resources/tools + location: '{{node}}' + values_from: role_data{{index}} +- id: netconfig{{index}} + from: f2s/resources/netconfig + location: '{{node}}' + values_from: role_data{{index}} +- id: connectivity_tests{{index}} + from: f2s/resources/connectivity_tests + location: '{{node}}' + values_from: role_data{{index}} +- id: firewall{{index}} + from: f2s/resources/firewall + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-keys-saving{{index}} + from: f2s/resources/ssl-keys-saving + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-add-trust-chain{{index}} + from: f2s/resources/ssl-add-trust-chain + location: '{{node}}' + values_from: role_data{{index}} +- id: hosts{{index}} + from: f2s/resources/hosts + location: '{{node}}' + values_from: role_data{{index}} +- id: top-role-compute{{index}} + from: f2s/resources/top-role-compute + location: '{{node}}' + values_from: role_data{{index}} +- id: ceilometer-compute{{index}} + from: f2s/resources/ceilometer-compute + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-common-config{{index}} + from: f2s/resources/openstack-network-common-config + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-plugins-l2{{index}} + from: f2s/resources/openstack-network-plugins-l2 + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-agents-l3{{index}} + from: f2s/resources/openstack-network-agents-l3 + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-compute-nova{{index}} + from: f2s/resources/openstack-network-compute-nova + location: '{{node}}' + values_from: role_data{{index}} +events: +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: fuel_pkgs{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: logging{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: logging{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: ssl-keys-saving{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: ssl-add-trust-chain{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: top-role-compute{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: top-role-compute{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: top-role-compute{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ceilometer-compute{{index}}.run +- type: depends_on + state: success + parent_action: top-role-compute{{index}}.run + depend_action: ceilometer-compute{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=ceilometer-controller + depend_action: ceilometer-compute{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-common-config{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-plugins-l2{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-plugins-l2{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-server-config + depend_action: openstack-network-plugins-l2{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-agents-l3{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-plugins-l2{{index}}.run + depend_action: openstack-network-agents-l3{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-networks + depend_action: openstack-network-agents-l3{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-routers + depend_action: openstack-network-agents-l3{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-compute-nova{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-l3{{index}}.run + depend_action: openstack-network-compute-nova{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-compute-nova{{index}}.run diff --git a/f2s/vrs/controller.yml b/f2s/vrs/controller.yml new file mode 100644 index 00000000..39651c69 --- /dev/null +++ b/f2s/vrs/controller.yml @@ -0,0 +1,1066 @@ +id: controller +resources: +- id: role_data{{index}} + from: f2s/resources/role_data + location: '{{node}}' + values: + env: '{{env}}' + uid: '{{index}}' +- id: fuel_pkgs{{index}} + from: f2s/resources/fuel_pkgs + location: '{{node}}' + values_from: role_data{{index}} +- id: logging{{index}} + from: f2s/resources/logging + location: '{{node}}' + values_from: role_data{{index}} +- id: tools{{index}} + from: f2s/resources/tools + location: '{{node}}' + values_from: role_data{{index}} +- id: umm{{index}} + from: f2s/resources/umm + location: '{{node}}' + values_from: role_data{{index}} +- id: netconfig{{index}} + from: f2s/resources/netconfig + location: '{{node}}' + values_from: role_data{{index}} +- id: connectivity_tests{{index}} + from: f2s/resources/connectivity_tests + location: '{{node}}' + values_from: role_data{{index}} +- id: firewall{{index}} + from: f2s/resources/firewall + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-keys-saving{{index}} + from: f2s/resources/ssl-keys-saving + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-add-trust-chain{{index}} + from: f2s/resources/ssl-add-trust-chain + location: '{{node}}' + values_from: role_data{{index}} +- id: hosts{{index}} + from: f2s/resources/hosts + location: '{{node}}' + values_from: role_data{{index}} +- id: cluster{{index}} + from: f2s/resources/cluster + location: '{{node}}' + values_from: role_data{{index}} +- id: cluster_health{{index}} + from: f2s/resources/cluster_health + location: '{{node}}' + values_from: role_data{{index}} +- id: cluster-vrouter{{index}} + from: f2s/resources/cluster-vrouter + location: '{{node}}' + values_from: role_data{{index}} +- id: virtual_ips{{index}} + from: f2s/resources/virtual_ips + location: '{{node}}' + values_from: role_data{{index}} +- id: conntrackd{{index}} + from: f2s/resources/conntrackd + location: '{{node}}' + values_from: role_data{{index}} +- id: cluster-haproxy{{index}} + from: f2s/resources/cluster-haproxy + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-radosgw{{index}} + from: f2s/resources/openstack-haproxy-radosgw + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-swift{{index}} + from: f2s/resources/openstack-haproxy-swift + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-nova{{index}} + from: f2s/resources/openstack-haproxy-nova + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-glance{{index}} + from: f2s/resources/openstack-haproxy-glance + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-sahara{{index}} + from: f2s/resources/openstack-haproxy-sahara + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-cinder{{index}} + from: f2s/resources/openstack-haproxy-cinder + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-keystone{{index}} + from: f2s/resources/openstack-haproxy-keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-murano{{index}} + from: f2s/resources/openstack-haproxy-murano + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-stats{{index}} + from: f2s/resources/openstack-haproxy-stats + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-ironic{{index}} + from: f2s/resources/openstack-haproxy-ironic + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-heat{{index}} + from: f2s/resources/openstack-haproxy-heat + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-neutron{{index}} + from: f2s/resources/openstack-haproxy-neutron + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-mysqld{{index}} + from: f2s/resources/openstack-haproxy-mysqld + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-horizon{{index}} + from: f2s/resources/openstack-haproxy-horizon + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-ceilometer{{index}} + from: f2s/resources/openstack-haproxy-ceilometer + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy{{index}} + from: f2s/resources/openstack-haproxy + location: '{{node}}' + values_from: role_data{{index}} +- id: dns-server{{index}} + from: f2s/resources/dns-server + location: '{{node}}' + values_from: role_data{{index}} +- id: database{{index}} + from: f2s/resources/database + location: '{{node}}' + values_from: role_data{{index}} +- id: ceilometer-controller{{index}} + from: f2s/resources/ceilometer-controller + location: '{{node}}' + values_from: role_data{{index}} +- id: rabbitmq{{index}} + from: f2s/resources/rabbitmq + location: '{{node}}' + values_from: role_data{{index}} +- id: ironic-api{{index}} + from: f2s/resources/ironic-api + location: '{{node}}' + values_from: role_data{{index}} +- id: apache{{index}} + from: f2s/resources/apache + location: '{{node}}' + values_from: role_data{{index}} +- id: api-proxy{{index}} + from: f2s/resources/api-proxy + location: '{{node}}' + values_from: role_data{{index}} +- id: glance{{index}} + from: f2s/resources/glance + location: '{{node}}' + values_from: role_data{{index}} +- id: memcached{{index}} + from: f2s/resources/memcached + location: '{{node}}' + values_from: role_data{{index}} +- id: keystone{{index}} + from: f2s/resources/keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-cinder{{index}} + from: f2s/resources/openstack-cinder + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-controller{{index}} + from: f2s/resources/openstack-controller + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-common-config{{index}} + from: f2s/resources/openstack-network-common-config + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-server-config{{index}} + from: f2s/resources/openstack-network-server-config + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-plugins-l2{{index}} + from: f2s/resources/openstack-network-plugins-l2 + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-agents-l3{{index}} + from: f2s/resources/openstack-network-agents-l3 + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-server-nova{{index}} + from: f2s/resources/openstack-network-server-nova + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-agents-dhcp{{index}} + from: f2s/resources/openstack-network-agents-dhcp + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-agents-metadata{{index}} + from: f2s/resources/openstack-network-agents-metadata + location: '{{node}}' + values_from: role_data{{index}} +- id: heat{{index}} + from: f2s/resources/heat + location: '{{node}}' + values_from: role_data{{index}} +- id: horizon{{index}} + from: f2s/resources/horizon + location: '{{node}}' + values_from: role_data{{index}} +- id: murano{{index}} + from: f2s/resources/murano + location: '{{node}}' + values_from: role_data{{index}} +- id: sahara{{index}} + from: f2s/resources/sahara + location: '{{node}}' + values_from: role_data{{index}} +- id: ceph-mon{{index}} + from: f2s/resources/ceph-mon + location: '{{node}}' + values_from: role_data{{index}} +- id: ceph-radosgw{{index}} + from: f2s/resources/ceph-radosgw + location: '{{node}}' + values_from: role_data{{index}} +- id: swift{{index}} + from: f2s/resources/swift + location: '{{node}}' + values_from: role_data{{index}} +- id: controller_remaining_tasks{{index}} + from: f2s/resources/controller_remaining_tasks + location: '{{node}}' + values_from: role_data{{index}} +- id: vmware-vcenter{{index}} + from: f2s/resources/vmware-vcenter + location: '{{node}}' + values_from: role_data{{index}} +- id: swift-rebalance-cron{{index}} + from: f2s/resources/swift-rebalance-cron + location: '{{node}}' + values_from: role_data{{index}} +events: +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: fuel_pkgs{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: logging{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: logging{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: umm{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: umm{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: ssl-keys-saving{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: ssl-add-trust-chain{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: cluster{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: cluster{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: cluster{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: cluster_health{{index}}.run +- type: depends_on + state: success + parent_action: cluster{{index}}.run + depend_action: cluster_health{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: cluster-vrouter{{index}}.run +- type: depends_on + state: success + parent_action: cluster{{index}}.run + depend_action: cluster-vrouter{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: virtual_ips{{index}}.run +- type: depends_on + state: success + parent_action: cluster{{index}}.run + depend_action: virtual_ips{{index}}.run +- type: depends_on + state: success + parent_action: cluster-vrouter{{index}}.run + depend_action: virtual_ips{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: conntrackd{{index}}.run +- type: depends_on + state: success + parent_action: virtual_ips{{index}}.run + depend_action: conntrackd{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: cluster-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: cluster{{index}}.run + depend_action: cluster-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: virtual_ips{{index}}.run + depend_action: cluster-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-radosgw{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-radosgw{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-swift{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-swift{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-nova{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-nova{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-glance{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-glance{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-sahara{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-sahara{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-cinder{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-cinder{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-keystone{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-murano{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-murano{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-stats{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-stats{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-ironic{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-ironic{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-heat{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-heat{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-neutron{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-neutron{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-mysqld{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-mysqld{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-horizon{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-horizon{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-ceilometer{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-ceilometer{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-ceilometer{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-radosgw{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-horizon{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-nova{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-mysqld{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-glance{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-heat{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-sahara{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-cinder{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-keystone{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-murano{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-stats{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-ironic{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-swift{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-neutron{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: dns-server{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: dns-server{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: database{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: database{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ceilometer-controller{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: ceilometer-controller{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=ceilometer-keystone + depend_action: ceilometer-controller{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: rabbitmq{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: rabbitmq{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ironic-api{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: ironic-api{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=ironic-db + depend_action: ironic-api{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=ironic-keystone + depend_action: ironic-api{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: apache{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: apache{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: api-proxy{{index}}.run +- type: depends_on + state: success + parent_action: apache{{index}}.run + depend_action: api-proxy{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: glance{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: glance{{index}}.run +- type: depends_on + state: success + parent_action: rabbitmq{{index}}.run + depend_action: glance{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: glance{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=glance-keystone + depend_action: glance{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=glance-db + depend_action: glance{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: memcached{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: memcached{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: apache{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: memcached{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: rabbitmq{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=keystone-db + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: rabbitmq{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=cinder-keystone + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=cinder-db + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: openstack-cinder{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: ceilometer-controller{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: rabbitmq{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: ironic-api{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: glance{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=nova-keystone + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=nova-db + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-common-config{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-server-config{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-server-config{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-plugins-l2{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-server-config{{index}}.run + depend_action: openstack-network-plugins-l2{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-plugins-l2{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-agents-l3{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-plugins-l2{{index}}.run + depend_action: openstack-network-agents-l3{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-networks + depend_action: openstack-network-agents-l3{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-routers + depend_action: openstack-network-agents-l3{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-server-nova{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-l3{{index}}.run + depend_action: openstack-network-server-nova{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-agents-dhcp{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-l3{{index}}.run + depend_action: openstack-network-agents-dhcp{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-server-nova{{index}}.run + depend_action: openstack-network-agents-dhcp{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-agents-dhcp{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-agents-metadata{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-l3{{index}}.run + depend_action: openstack-network-agents-metadata{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-server-nova{{index}}.run + depend_action: openstack-network-agents-metadata{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-dhcp{{index}}.run + depend_action: openstack-network-agents-metadata{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-agents-metadata{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: heat{{index}}.run +- type: depends_on + state: success + parent_action: openstack-controller{{index}}.run + depend_action: heat{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=heat-db + depend_action: heat{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=heat-keystone + depend_action: heat{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: horizon{{index}}.run +- type: depends_on + state: success + parent_action: openstack-controller{{index}}.run + depend_action: horizon{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: heat{{index}}.run + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: horizon{{index}}.run + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=murano-keystone + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=murano-db + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: sahara{{index}}.run +- type: depends_on + state: success + parent_action: horizon{{index}}.run + depend_action: sahara{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=sahara-keystone + depend_action: sahara{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=sahara-db + depend_action: sahara{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ceph-mon{{index}}.run +- type: depends_on + state: success + parent_action: openstack-controller{{index}}.run + depend_action: ceph-mon{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ceph-radosgw{{index}}.run +- type: depends_on + state: success + parent_action: apache{{index}}.run + depend_action: ceph-radosgw{{index}}.run +- type: depends_on + state: success + parent_action: ceph-mon{{index}}.run + depend_action: ceph-radosgw{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: swift{{index}}.run +- type: depends_on + state: success + parent_action: ceilometer-controller{{index}}.run + depend_action: swift{{index}}.run +- type: depends_on + state: success + parent_action: openstack-controller{{index}}.run + depend_action: swift{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=swift-keystone + depend_action: swift{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: ceph-radosgw{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: api-proxy{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: murano{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: sahara{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: ceph-mon{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: swift{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: vmware-vcenter{{index}}.run +- type: depends_on + state: success + parent_action: controller_remaining_tasks{{index}}.run + depend_action: vmware-vcenter{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: swift-rebalance-cron{{index}}.run +- type: depends_on + state: success + parent_action: swift{{index}}.run + depend_action: swift-rebalance-cron{{index}}.run diff --git a/f2s/vrs/ironic.yml b/f2s/vrs/ironic.yml new file mode 100644 index 00000000..85939851 --- /dev/null +++ b/f2s/vrs/ironic.yml @@ -0,0 +1,159 @@ +id: ironic +resources: +- id: role_data{{index}} + from: f2s/resources/role_data + location: '{{node}}' + values: + env: '{{env}}' + uid: '{{index}}' +- id: fuel_pkgs{{index}} + from: f2s/resources/fuel_pkgs + location: '{{node}}' + values_from: role_data{{index}} +- id: logging{{index}} + from: f2s/resources/logging + location: '{{node}}' + values_from: role_data{{index}} +- id: tools{{index}} + from: f2s/resources/tools + location: '{{node}}' + values_from: role_data{{index}} +- id: netconfig{{index}} + from: f2s/resources/netconfig + location: '{{node}}' + values_from: role_data{{index}} +- id: connectivity_tests{{index}} + from: f2s/resources/connectivity_tests + location: '{{node}}' + values_from: role_data{{index}} +- id: firewall{{index}} + from: f2s/resources/firewall + location: '{{node}}' + values_from: role_data{{index}} +- id: hosts{{index}} + from: f2s/resources/hosts + location: '{{node}}' + values_from: role_data{{index}} +- id: ironic-conductor{{index}} + from: f2s/resources/ironic-conductor + location: '{{node}}' + values_from: role_data{{index}} +- id: ironic-compute{{index}} + from: f2s/resources/ironic-compute + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-common-config{{index}} + from: f2s/resources/openstack-network-common-config + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-plugins-l2{{index}} + from: f2s/resources/openstack-network-plugins-l2 + location: '{{node}}' + values_from: role_data{{index}} +events: +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: fuel_pkgs{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: logging{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: logging{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=ssl-add-trust-chain + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ironic-conductor{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ironic-conductor{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: ironic-conductor{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ironic-compute{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ironic-compute{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: ironic-compute{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-common-config{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-plugins-l2{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-plugins-l2{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-server-config + depend_action: openstack-network-plugins-l2{{index}}.run diff --git a/f2s/vrs/mongo.yml b/f2s/vrs/mongo.yml new file mode 100644 index 00000000..af866e5c --- /dev/null +++ b/f2s/vrs/mongo.yml @@ -0,0 +1,141 @@ +id: mongo +resources: +- id: role_data{{index}} + from: f2s/resources/role_data + location: '{{node}}' + values: + env: '{{env}}' + uid: '{{index}}' +- id: fuel_pkgs{{index}} + from: f2s/resources/fuel_pkgs + location: '{{node}}' + values_from: role_data{{index}} +- id: logging{{index}} + from: f2s/resources/logging + location: '{{node}}' + values_from: role_data{{index}} +- id: tools{{index}} + from: f2s/resources/tools + location: '{{node}}' + values_from: role_data{{index}} +- id: netconfig{{index}} + from: f2s/resources/netconfig + location: '{{node}}' + values_from: role_data{{index}} +- id: connectivity_tests{{index}} + from: f2s/resources/connectivity_tests + location: '{{node}}' + values_from: role_data{{index}} +- id: firewall{{index}} + from: f2s/resources/firewall + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-keys-saving{{index}} + from: f2s/resources/ssl-keys-saving + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-add-trust-chain{{index}} + from: f2s/resources/ssl-add-trust-chain + location: '{{node}}' + values_from: role_data{{index}} +- id: hosts{{index}} + from: f2s/resources/hosts + location: '{{node}}' + values_from: role_data{{index}} +- id: top-role-mongo{{index}} + from: f2s/resources/top-role-mongo + location: '{{node}}' + values_from: role_data{{index}} +events: +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: fuel_pkgs{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: logging{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: logging{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: ssl-keys-saving{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: ssl-add-trust-chain{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: top-role-mongo{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: top-role-mongo{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: top-role-mongo{{index}}.run diff --git a/f2s/vrs/primary-controller.yml b/f2s/vrs/primary-controller.yml new file mode 100644 index 00000000..ebdfee83 --- /dev/null +++ b/f2s/vrs/primary-controller.yml @@ -0,0 +1,1273 @@ +id: primary-controller +resources: +- id: role_data{{index}} + from: f2s/resources/role_data + location: '{{node}}' + values: + env: '{{env}}' + uid: '{{index}}' +- id: fuel_pkgs{{index}} + from: f2s/resources/fuel_pkgs + location: '{{node}}' + values_from: role_data{{index}} +- id: logging{{index}} + from: f2s/resources/logging + location: '{{node}}' + values_from: role_data{{index}} +- id: tools{{index}} + from: f2s/resources/tools + location: '{{node}}' + values_from: role_data{{index}} +- id: umm{{index}} + from: f2s/resources/umm + location: '{{node}}' + values_from: role_data{{index}} +- id: netconfig{{index}} + from: f2s/resources/netconfig + location: '{{node}}' + values_from: role_data{{index}} +- id: connectivity_tests{{index}} + from: f2s/resources/connectivity_tests + location: '{{node}}' + values_from: role_data{{index}} +- id: firewall{{index}} + from: f2s/resources/firewall + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-keys-saving{{index}} + from: f2s/resources/ssl-keys-saving + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-add-trust-chain{{index}} + from: f2s/resources/ssl-add-trust-chain + location: '{{node}}' + values_from: role_data{{index}} +- id: hosts{{index}} + from: f2s/resources/hosts + location: '{{node}}' + values_from: role_data{{index}} +- id: cluster{{index}} + from: f2s/resources/cluster + location: '{{node}}' + values_from: role_data{{index}} +- id: cluster_health{{index}} + from: f2s/resources/cluster_health + location: '{{node}}' + values_from: role_data{{index}} +- id: cluster-vrouter{{index}} + from: f2s/resources/cluster-vrouter + location: '{{node}}' + values_from: role_data{{index}} +- id: virtual_ips{{index}} + from: f2s/resources/virtual_ips + location: '{{node}}' + values_from: role_data{{index}} +- id: conntrackd{{index}} + from: f2s/resources/conntrackd + location: '{{node}}' + values_from: role_data{{index}} +- id: cluster-haproxy{{index}} + from: f2s/resources/cluster-haproxy + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-ceilometer{{index}} + from: f2s/resources/openstack-haproxy-ceilometer + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-radosgw{{index}} + from: f2s/resources/openstack-haproxy-radosgw + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-horizon{{index}} + from: f2s/resources/openstack-haproxy-horizon + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-swift{{index}} + from: f2s/resources/openstack-haproxy-swift + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-nova{{index}} + from: f2s/resources/openstack-haproxy-nova + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-glance{{index}} + from: f2s/resources/openstack-haproxy-glance + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-sahara{{index}} + from: f2s/resources/openstack-haproxy-sahara + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-cinder{{index}} + from: f2s/resources/openstack-haproxy-cinder + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-keystone{{index}} + from: f2s/resources/openstack-haproxy-keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-murano{{index}} + from: f2s/resources/openstack-haproxy-murano + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-stats{{index}} + from: f2s/resources/openstack-haproxy-stats + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-ironic{{index}} + from: f2s/resources/openstack-haproxy-ironic + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-heat{{index}} + from: f2s/resources/openstack-haproxy-heat + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-neutron{{index}} + from: f2s/resources/openstack-haproxy-neutron + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy-mysqld{{index}} + from: f2s/resources/openstack-haproxy-mysqld + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-haproxy{{index}} + from: f2s/resources/openstack-haproxy + location: '{{node}}' + values_from: role_data{{index}} +- id: dns-server{{index}} + from: f2s/resources/dns-server + location: '{{node}}' + values_from: role_data{{index}} +- id: database{{index}} + from: f2s/resources/database + location: '{{node}}' + values_from: role_data{{index}} +- id: keystone-db{{index}} + from: f2s/resources/keystone-db + location: '{{node}}' + values_from: role_data{{index}} +- id: glance-db{{index}} + from: f2s/resources/glance-db + location: '{{node}}' + values_from: role_data{{index}} +- id: ironic-db{{index}} + from: f2s/resources/ironic-db + location: '{{node}}' + values_from: role_data{{index}} +- id: neutron-db{{index}} + from: f2s/resources/neutron-db + location: '{{node}}' + values_from: role_data{{index}} +- id: murano-db{{index}} + from: f2s/resources/murano-db + location: '{{node}}' + values_from: role_data{{index}} +- id: nova-db{{index}} + from: f2s/resources/nova-db + location: '{{node}}' + values_from: role_data{{index}} +- id: cinder-db{{index}} + from: f2s/resources/cinder-db + location: '{{node}}' + values_from: role_data{{index}} +- id: sahara-db{{index}} + from: f2s/resources/sahara-db + location: '{{node}}' + values_from: role_data{{index}} +- id: heat-db{{index}} + from: f2s/resources/heat-db + location: '{{node}}' + values_from: role_data{{index}} +- id: rabbitmq{{index}} + from: f2s/resources/rabbitmq + location: '{{node}}' + values_from: role_data{{index}} +- id: apache{{index}} + from: f2s/resources/apache + location: '{{node}}' + values_from: role_data{{index}} +- id: api-proxy{{index}} + from: f2s/resources/api-proxy + location: '{{node}}' + values_from: role_data{{index}} +- id: memcached{{index}} + from: f2s/resources/memcached + location: '{{node}}' + values_from: role_data{{index}} +- id: keystone{{index}} + from: f2s/resources/keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: sahara-keystone{{index}} + from: f2s/resources/sahara-keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: neutron-keystone{{index}} + from: f2s/resources/neutron-keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: cinder-keystone{{index}} + from: f2s/resources/cinder-keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: glance-keystone{{index}} + from: f2s/resources/glance-keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: glance{{index}} + from: f2s/resources/glance + location: '{{node}}' + values_from: role_data{{index}} +- id: ironic-keystone{{index}} + from: f2s/resources/ironic-keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: ironic-api{{index}} + from: f2s/resources/ironic-api + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-cinder{{index}} + from: f2s/resources/openstack-cinder + location: '{{node}}' + values_from: role_data{{index}} +- id: ceilometer-keystone{{index}} + from: f2s/resources/ceilometer-keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: ceilometer-controller{{index}} + from: f2s/resources/ceilometer-controller + location: '{{node}}' + values_from: role_data{{index}} +- id: murano-keystone{{index}} + from: f2s/resources/murano-keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: workloads_collector_add{{index}} + from: f2s/resources/workloads_collector_add + location: '{{node}}' + values_from: role_data{{index}} +- id: heat-keystone{{index}} + from: f2s/resources/heat-keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: swift-keystone{{index}} + from: f2s/resources/swift-keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: nova-keystone{{index}} + from: f2s/resources/nova-keystone + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-controller{{index}} + from: f2s/resources/openstack-controller + location: '{{node}}' + values_from: role_data{{index}} +- id: ceph-mon{{index}} + from: f2s/resources/ceph-mon + location: '{{node}}' + values_from: role_data{{index}} +- id: ceph-radosgw{{index}} + from: f2s/resources/ceph-radosgw + location: '{{node}}' + values_from: role_data{{index}} +- id: heat{{index}} + from: f2s/resources/heat + location: '{{node}}' + values_from: role_data{{index}} +- id: swift{{index}} + from: f2s/resources/swift + location: '{{node}}' + values_from: role_data{{index}} +- id: swift-rebalance-cron{{index}} + from: f2s/resources/swift-rebalance-cron + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-common-config{{index}} + from: f2s/resources/openstack-network-common-config + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-server-config{{index}} + from: f2s/resources/openstack-network-server-config + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-plugins-l2{{index}} + from: f2s/resources/openstack-network-plugins-l2 + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-networks{{index}} + from: f2s/resources/openstack-network-networks + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-routers{{index}} + from: f2s/resources/openstack-network-routers + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-agents-l3{{index}} + from: f2s/resources/openstack-network-agents-l3 + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-server-nova{{index}} + from: f2s/resources/openstack-network-server-nova + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-agents-dhcp{{index}} + from: f2s/resources/openstack-network-agents-dhcp + location: '{{node}}' + values_from: role_data{{index}} +- id: openstack-network-agents-metadata{{index}} + from: f2s/resources/openstack-network-agents-metadata + location: '{{node}}' + values_from: role_data{{index}} +- id: horizon{{index}} + from: f2s/resources/horizon + location: '{{node}}' + values_from: role_data{{index}} +- id: murano{{index}} + from: f2s/resources/murano + location: '{{node}}' + values_from: role_data{{index}} +- id: sahara{{index}} + from: f2s/resources/sahara + location: '{{node}}' + values_from: role_data{{index}} +- id: controller_remaining_tasks{{index}} + from: f2s/resources/controller_remaining_tasks + location: '{{node}}' + values_from: role_data{{index}} +- id: vmware-vcenter{{index}} + from: f2s/resources/vmware-vcenter + location: '{{node}}' + values_from: role_data{{index}} +events: +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: fuel_pkgs{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: logging{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: logging{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: umm{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: umm{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: ssl-keys-saving{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: ssl-add-trust-chain{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: cluster{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: cluster{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: cluster{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: cluster_health{{index}}.run +- type: depends_on + state: success + parent_action: cluster{{index}}.run + depend_action: cluster_health{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: cluster-vrouter{{index}}.run +- type: depends_on + state: success + parent_action: cluster{{index}}.run + depend_action: cluster-vrouter{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: virtual_ips{{index}}.run +- type: depends_on + state: success + parent_action: cluster{{index}}.run + depend_action: virtual_ips{{index}}.run +- type: depends_on + state: success + parent_action: cluster-vrouter{{index}}.run + depend_action: virtual_ips{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: conntrackd{{index}}.run +- type: depends_on + state: success + parent_action: virtual_ips{{index}}.run + depend_action: conntrackd{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: cluster-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: cluster{{index}}.run + depend_action: cluster-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: virtual_ips{{index}}.run + depend_action: cluster-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-ceilometer{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-ceilometer{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-radosgw{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-radosgw{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-horizon{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-horizon{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-swift{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-swift{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-nova{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-nova{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-glance{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-glance{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-sahara{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-sahara{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-cinder{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-cinder{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-keystone{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-murano{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-murano{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-stats{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-stats{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-ironic{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-ironic{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-heat{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-heat{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-neutron{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-neutron{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy-mysqld{{index}}.run +- type: depends_on + state: success + parent_action: cluster-haproxy{{index}}.run + depend_action: openstack-haproxy-mysqld{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-ceilometer{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-radosgw{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-horizon{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-nova{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-mysqld{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-glance{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-heat{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-sahara{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-cinder{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-keystone{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-murano{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-stats{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-ironic{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-swift{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy-neutron{{index}}.run + depend_action: openstack-haproxy{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: dns-server{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: dns-server{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: database{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: database{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: keystone-db{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: keystone-db{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: glance-db{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: glance-db{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ironic-db{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: ironic-db{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: neutron-db{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: neutron-db{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: murano-db{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: murano-db{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: nova-db{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: nova-db{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: cinder-db{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: cinder-db{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: sahara-db{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: sahara-db{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: heat-db{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: heat-db{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: rabbitmq{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: rabbitmq{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: apache{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: apache{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: api-proxy{{index}}.run +- type: depends_on + state: success + parent_action: apache{{index}}.run + depend_action: api-proxy{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: memcached{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: memcached{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: keystone-db{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: rabbitmq{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: apache{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: memcached{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: sahara-keystone{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: sahara-keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: neutron-keystone{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: neutron-keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: cinder-keystone{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: cinder-keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: glance-keystone{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: glance-keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: glance{{index}}.run +- type: depends_on + state: success + parent_action: glance-keystone{{index}}.run + depend_action: glance{{index}}.run +- type: depends_on + state: success + parent_action: glance-db{{index}}.run + depend_action: glance{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: glance{{index}}.run +- type: depends_on + state: success + parent_action: rabbitmq{{index}}.run + depend_action: glance{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: glance{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ironic-keystone{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: ironic-keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ironic-api{{index}}.run +- type: depends_on + state: success + parent_action: ironic-db{{index}}.run + depend_action: ironic-api{{index}}.run +- type: depends_on + state: success + parent_action: ironic-keystone{{index}}.run + depend_action: ironic-api{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: ironic-api{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: cinder-keystone{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: rabbitmq{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: cinder-db{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: openstack-cinder{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ceilometer-keystone{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: ceilometer-keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ceilometer-controller{{index}}.run +- type: depends_on + state: success + parent_action: ceilometer-keystone{{index}}.run + depend_action: ceilometer-controller{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: ceilometer-controller{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: murano-keystone{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: murano-keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: workloads_collector_add{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: workloads_collector_add{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: heat-keystone{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: heat-keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: swift-keystone{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: swift-keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: nova-keystone{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: nova-keystone{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: openstack-cinder{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: database{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: nova-keystone{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: ceilometer-controller{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: rabbitmq{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: nova-db{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: keystone{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: ironic-api{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: glance{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: openstack-haproxy{{index}}.run + depend_action: openstack-controller{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ceph-mon{{index}}.run +- type: depends_on + state: success + parent_action: openstack-controller{{index}}.run + depend_action: ceph-mon{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ceph-radosgw{{index}}.run +- type: depends_on + state: success + parent_action: apache{{index}}.run + depend_action: ceph-radosgw{{index}}.run +- type: depends_on + state: success + parent_action: ceph-mon{{index}}.run + depend_action: ceph-radosgw{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: heat{{index}}.run +- type: depends_on + state: success + parent_action: heat-db{{index}}.run + depend_action: heat{{index}}.run +- type: depends_on + state: success + parent_action: openstack-controller{{index}}.run + depend_action: heat{{index}}.run +- type: depends_on + state: success + parent_action: heat-keystone{{index}}.run + depend_action: heat{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: swift{{index}}.run +- type: depends_on + state: success + parent_action: ceilometer-controller{{index}}.run + depend_action: swift{{index}}.run +- type: depends_on + state: success + parent_action: swift-keystone{{index}}.run + depend_action: swift{{index}}.run +- type: depends_on + state: success + parent_action: openstack-controller{{index}}.run + depend_action: swift{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: swift-rebalance-cron{{index}}.run +- type: depends_on + state: success + parent_action: swift{{index}}.run + depend_action: swift-rebalance-cron{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-common-config{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-server-config{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-server-config{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-plugins-l2{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-server-config{{index}}.run + depend_action: openstack-network-plugins-l2{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-plugins-l2{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-networks{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-plugins-l2{{index}}.run + depend_action: openstack-network-networks{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-routers{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-networks{{index}}.run + depend_action: openstack-network-routers{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-agents-l3{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-plugins-l2{{index}}.run + depend_action: openstack-network-agents-l3{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-networks{{index}}.run + depend_action: openstack-network-agents-l3{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-routers{{index}}.run + depend_action: openstack-network-agents-l3{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-server-nova{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-l3{{index}}.run + depend_action: openstack-network-server-nova{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-agents-dhcp{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-l3{{index}}.run + depend_action: openstack-network-agents-dhcp{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-server-nova{{index}}.run + depend_action: openstack-network-agents-dhcp{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-agents-dhcp{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-agents-metadata{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-l3{{index}}.run + depend_action: openstack-network-agents-metadata{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-server-nova{{index}}.run + depend_action: openstack-network-agents-metadata{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-dhcp{{index}}.run + depend_action: openstack-network-agents-metadata{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-agents-metadata{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: horizon{{index}}.run +- type: depends_on + state: success + parent_action: openstack-controller{{index}}.run + depend_action: horizon{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: horizon{{index}}.run + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: murano-keystone{{index}}.run + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: heat{{index}}.run + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: murano-db{{index}}.run + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: sahara{{index}}.run +- type: depends_on + state: success + parent_action: sahara-keystone{{index}}.run + depend_action: sahara{{index}}.run +- type: depends_on + state: success + parent_action: sahara-db{{index}}.run + depend_action: sahara{{index}}.run +- type: depends_on + state: success + parent_action: horizon{{index}}.run + depend_action: sahara{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: ceph-radosgw{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: api-proxy{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: murano{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: sahara{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: ceph-mon{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: swift{{index}}.run + depend_action: controller_remaining_tasks{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: vmware-vcenter{{index}}.run +- type: depends_on + state: success + parent_action: controller_remaining_tasks{{index}}.run + depend_action: vmware-vcenter{{index}}.run diff --git a/f2s/vrs/primary-mongo.yml b/f2s/vrs/primary-mongo.yml new file mode 100644 index 00000000..0bc78a5a --- /dev/null +++ b/f2s/vrs/primary-mongo.yml @@ -0,0 +1,141 @@ +id: primary-mongo +resources: +- id: role_data{{index}} + from: f2s/resources/role_data + location: '{{node}}' + values: + env: '{{env}}' + uid: '{{index}}' +- id: fuel_pkgs{{index}} + from: f2s/resources/fuel_pkgs + location: '{{node}}' + values_from: role_data{{index}} +- id: logging{{index}} + from: f2s/resources/logging + location: '{{node}}' + values_from: role_data{{index}} +- id: tools{{index}} + from: f2s/resources/tools + location: '{{node}}' + values_from: role_data{{index}} +- id: netconfig{{index}} + from: f2s/resources/netconfig + location: '{{node}}' + values_from: role_data{{index}} +- id: connectivity_tests{{index}} + from: f2s/resources/connectivity_tests + location: '{{node}}' + values_from: role_data{{index}} +- id: firewall{{index}} + from: f2s/resources/firewall + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-keys-saving{{index}} + from: f2s/resources/ssl-keys-saving + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-add-trust-chain{{index}} + from: f2s/resources/ssl-add-trust-chain + location: '{{node}}' + values_from: role_data{{index}} +- id: hosts{{index}} + from: f2s/resources/hosts + location: '{{node}}' + values_from: role_data{{index}} +- id: top-role-primary-mongo{{index}} + from: f2s/resources/top-role-primary-mongo + location: '{{node}}' + values_from: role_data{{index}} +events: +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: fuel_pkgs{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: logging{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: logging{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: firewall{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: ssl-keys-saving{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: connectivity_tests{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: ssl-add-trust-chain{{index}}.run + depend_action: hosts{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: top-role-primary-mongo{{index}}.run +- type: depends_on + state: success + parent_action: firewall{{index}}.run + depend_action: top-role-primary-mongo{{index}}.run +- type: depends_on + state: success + parent_action: hosts{{index}}.run + depend_action: top-role-primary-mongo{{index}}.run diff --git a/f2s/vrs/virt.yml b/f2s/vrs/virt.yml new file mode 100644 index 00000000..742eda39 --- /dev/null +++ b/f2s/vrs/virt.yml @@ -0,0 +1,99 @@ +id: virt +resources: +- id: role_data{{index}} + from: f2s/resources/role_data + location: '{{node}}' + values: + env: '{{env}}' + uid: '{{index}}' +- id: ssl-keys-saving{{index}} + from: f2s/resources/ssl-keys-saving + location: '{{node}}' + values_from: role_data{{index}} +- id: ssl-add-trust-chain{{index}} + from: f2s/resources/ssl-add-trust-chain + location: '{{node}}' + values_from: role_data{{index}} +- id: logging{{index}} + from: f2s/resources/logging + location: '{{node}}' + values_from: role_data{{index}} +- id: tools{{index}} + from: f2s/resources/tools + location: '{{node}}' + values_from: role_data{{index}} +- id: netconfig{{index}} + from: f2s/resources/netconfig + location: '{{node}}' + values_from: role_data{{index}} +- id: generate_vms{{index}} + from: f2s/resources/generate_vms + location: '{{node}}' + values_from: role_data{{index}} +- id: connectivity_tests{{index}} + from: f2s/resources/connectivity_tests + location: '{{node}}' + values_from: role_data{{index}} +events: +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=firewall + depend_action: ssl-keys-saving{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: ssl-keys-saving{{index}}.run + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=firewall + depend_action: ssl-add-trust-chain{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: logging{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: logging{{index}}.run + depend_action: tools{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: tools{{index}}.run + depend_action: netconfig{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: generate_vms{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: generate_vms{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: connectivity_tests{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: connectivity_tests{{index}}.run From 1d3409e381af59a6365d90e1721e8865929aac3a Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 15:26:18 +0200 Subject: [PATCH 24/51] Save generated resources with inputs --- f2s/resources/apache/actions/run.pp | 13 + f2s/resources/apache/meta.yaml | 12 + f2s/resources/api-proxy/actions/run.pp | 16 + f2s/resources/api-proxy/meta.yaml | 16 + .../ceilometer-compute/actions/run.pp | 59 +++ f2s/resources/ceilometer-compute/meta.yaml | 10 + .../ceilometer-controller/actions/run.pp | 111 ++++++ f2s/resources/ceilometer-controller/meta.yaml | 44 +++ .../ceilometer-keystone/actions/run.pp | 41 +++ f2s/resources/ceilometer-keystone/meta.yaml | 20 ++ .../ceilometer-radosgw-user/actions/run.pp | 20 ++ .../ceilometer-radosgw-user/meta.yaml | 14 + f2s/resources/ceph-compute/actions/run.pp | 97 +++++ f2s/resources/ceph-compute/meta.yaml | 10 + f2s/resources/ceph-mon/actions/run.pp | 95 +++++ f2s/resources/ceph-mon/meta.yaml | 32 ++ f2s/resources/ceph-radosgw/actions/run.pp | 103 ++++++ f2s/resources/ceph-radosgw/meta.yaml | 26 ++ .../ceph_create_pools/actions/run.pp | 80 +++++ f2s/resources/ceph_create_pools/meta.yaml | 12 + f2s/resources/cinder-db/actions/run.pp | 53 +++ f2s/resources/cinder-db/meta.yaml | 20 ++ f2s/resources/cinder-keystone/actions/run.pp | 51 +++ f2s/resources/cinder-keystone/meta.yaml | 20 ++ f2s/resources/cluster-haproxy/actions/run.pp | 20 ++ f2s/resources/cluster-haproxy/meta.yaml | 24 ++ f2s/resources/cluster-vrouter/actions/run.pp | 7 + f2s/resources/cluster-vrouter/meta.yaml | 12 + f2s/resources/cluster/actions/run.pp | 49 +++ f2s/resources/cluster/meta.yaml | 16 + f2s/resources/cluster_health/actions/run.pp | 20 ++ f2s/resources/cluster_health/meta.yaml | 24 ++ .../configure_default_route/actions/run.pp | 11 + .../configure_default_route/meta.yaml | 10 + .../connectivity_tests/actions/run.pp | 5 + f2s/resources/connectivity_tests/meta.yaml | 12 + f2s/resources/conntrackd/actions/run.pp | 79 ++++ f2s/resources/conntrackd/meta.yaml | 14 + .../controller_remaining_tasks/actions/run.pp | 49 +++ .../controller_remaining_tasks/meta.yaml | 16 + f2s/resources/database/actions/run.pp | 132 +++++++ f2s/resources/database/meta.yaml | 30 ++ .../actions/run.pp | 41 +++ .../disable_keystone_service_token/meta.yaml | 12 + f2s/resources/dns-client/actions/run.pp | 8 + f2s/resources/dns-client/meta.yaml | 12 + f2s/resources/dns-server/actions/run.pp | 16 + f2s/resources/dns-server/meta.yaml | 18 + .../dump_rabbitmq_definitions/actions/run.pp | 28 ++ .../dump_rabbitmq_definitions/meta.yaml | 12 + .../actions/run.pp | 10 + .../enable_cinder_volume_service/meta.yaml | 10 + .../actions/run.pp | 10 + .../enable_nova_compute_service/meta.yaml | 10 + f2s/resources/enable_rados/actions/run.pp | 17 + f2s/resources/enable_rados/meta.yaml | 10 + f2s/resources/firewall/actions/run.pp | 132 +++++++ f2s/resources/firewall/meta.yaml | 16 + f2s/resources/fuel_pkgs/actions/run.pp | 10 + f2s/resources/fuel_pkgs/meta.yaml | 10 + f2s/resources/generate_vms/actions/run.pp | 49 +++ f2s/resources/generate_vms/meta.yaml | 10 + f2s/resources/glance-db/actions/run.pp | 53 +++ f2s/resources/glance-db/meta.yaml | 22 ++ f2s/resources/glance-keystone/actions/run.pp | 42 +++ f2s/resources/glance-keystone/meta.yaml | 20 ++ f2s/resources/glance/actions/run.pp | 128 +++++++ f2s/resources/glance/meta.yaml | 46 +++ f2s/resources/globals/actions/run.pp | 293 +++++++++++++++ f2s/resources/globals/meta.yaml | 124 +++++++ f2s/resources/heat-db/actions/run.pp | 53 +++ f2s/resources/heat-db/meta.yaml | 20 ++ f2s/resources/heat-keystone/actions/run.pp | 59 +++ f2s/resources/heat-keystone/meta.yaml | 20 ++ f2s/resources/heat/actions/run.pp | 156 ++++++++ f2s/resources/heat/meta.yaml | 52 +++ f2s/resources/hiera/actions/run.pp | 75 ++++ f2s/resources/hiera/meta.yaml | 8 + f2s/resources/horizon/actions/run.pp | 68 ++++ f2s/resources/horizon/meta.yaml | 44 +++ f2s/resources/hosts/actions/run.pp | 5 + f2s/resources/hosts/meta.yaml | 10 + f2s/resources/ironic-api/actions/run.pp | 61 ++++ f2s/resources/ironic-api/meta.yaml | 8 + f2s/resources/ironic-compute/actions/run.pp | 98 +++++ f2s/resources/ironic-compute/meta.yaml | 10 + f2s/resources/ironic-conductor/actions/run.pp | 121 +++++++ f2s/resources/ironic-conductor/meta.yaml | 10 + f2s/resources/ironic-db/actions/run.pp | 51 +++ f2s/resources/ironic-db/meta.yaml | 20 ++ f2s/resources/ironic-keystone/actions/run.pp | 39 ++ f2s/resources/ironic-keystone/meta.yaml | 20 ++ f2s/resources/keystone-db/actions/run.pp | 54 +++ f2s/resources/keystone-db/meta.yaml | 22 ++ f2s/resources/keystone/actions/run.pp | 236 ++++++++++++ f2s/resources/keystone/meta.yaml | 74 ++++ f2s/resources/logging/actions/run.pp | 67 ++++ f2s/resources/logging/meta.yaml | 24 ++ f2s/resources/memcached/actions/run.pp | 8 + f2s/resources/memcached/meta.yaml | 12 + f2s/resources/murano-db/actions/run.pp | 57 +++ f2s/resources/murano-db/meta.yaml | 22 ++ f2s/resources/murano-keystone/actions/run.pp | 36 ++ f2s/resources/murano-keystone/meta.yaml | 22 ++ f2s/resources/murano/actions/run.pp | 163 +++++++++ f2s/resources/murano/meta.yaml | 56 +++ f2s/resources/netconfig/actions/run.pp | 106 ++++++ f2s/resources/netconfig/meta.yaml | 24 ++ f2s/resources/neutron-db/actions/run.pp | 59 +++ f2s/resources/neutron-db/meta.yaml | 26 ++ f2s/resources/neutron-keystone/actions/run.pp | 50 +++ f2s/resources/neutron-keystone/meta.yaml | 22 ++ f2s/resources/nova-db/actions/run.pp | 53 +++ f2s/resources/nova-db/meta.yaml | 20 ++ f2s/resources/nova-keystone/actions/run.pp | 56 +++ f2s/resources/nova-keystone/meta.yaml | 20 ++ f2s/resources/ntp-check/actions/run.pp | 6 + f2s/resources/ntp-check/meta.yaml | 12 + f2s/resources/ntp-client/actions/run.pp | 26 ++ f2s/resources/ntp-client/meta.yaml | 10 + f2s/resources/ntp-server/actions/run.pp | 31 ++ f2s/resources/ntp-server/meta.yaml | 12 + f2s/resources/openstack-cinder/actions/run.pp | 107 ++++++ f2s/resources/openstack-cinder/meta.yaml | 56 +++ .../openstack-controller/actions/run.pp | 233 ++++++++++++ f2s/resources/openstack-controller/meta.yaml | 110 ++++++ .../actions/run.pp | 23 ++ .../openstack-haproxy-ceilometer/meta.yaml | 16 + .../openstack-haproxy-cinder/actions/run.pp | 24 ++ .../openstack-haproxy-cinder/meta.yaml | 26 ++ .../openstack-haproxy-glance/actions/run.pp | 26 ++ .../openstack-haproxy-glance/meta.yaml | 24 ++ .../openstack-haproxy-heat/actions/run.pp | 24 ++ .../openstack-haproxy-heat/meta.yaml | 26 ++ .../openstack-haproxy-horizon/actions/run.pp | 24 ++ .../openstack-haproxy-horizon/meta.yaml | 26 ++ .../openstack-haproxy-ironic/actions/run.pp | 22 ++ .../openstack-haproxy-ironic/meta.yaml | 8 + .../openstack-haproxy-keystone/actions/run.pp | 29 ++ .../openstack-haproxy-keystone/meta.yaml | 28 ++ .../openstack-haproxy-murano/actions/run.pp | 24 ++ .../openstack-haproxy-murano/meta.yaml | 18 + .../openstack-haproxy-mysqld/actions/run.pp | 31 ++ .../openstack-haproxy-mysqld/meta.yaml | 32 ++ .../openstack-haproxy-neutron/actions/run.pp | 22 ++ .../openstack-haproxy-neutron/meta.yaml | 24 ++ .../openstack-haproxy-nova/actions/run.pp | 25 ++ .../openstack-haproxy-nova/meta.yaml | 24 ++ .../openstack-haproxy-radosgw/actions/run.pp | 34 ++ .../openstack-haproxy-radosgw/meta.yaml | 16 + .../openstack-haproxy-sahara/actions/run.pp | 24 ++ .../openstack-haproxy-sahara/meta.yaml | 18 + .../openstack-haproxy-stats/actions/run.pp | 7 + .../openstack-haproxy-stats/meta.yaml | 16 + .../openstack-haproxy-swift/actions/run.pp | 37 ++ .../openstack-haproxy-swift/meta.yaml | 28 ++ .../openstack-haproxy/actions/run.pp | 3 + f2s/resources/openstack-haproxy/meta.yaml | 10 + .../actions/run.pp | 39 ++ .../openstack-network-agents-dhcp/meta.yaml | 18 + .../actions/run.pp | 59 +++ .../openstack-network-agents-l3/meta.yaml | 20 ++ .../actions/run.pp | 57 +++ .../meta.yaml | 28 ++ .../actions/run.pp | 110 ++++++ .../openstack-network-common-config/meta.yaml | 34 ++ .../actions/run.pp | 267 ++++++++++++++ .../openstack-network-compute-nova/meta.yaml | 10 + .../openstack-network-networks/actions/run.pp | 106 ++++++ .../openstack-network-networks/meta.yaml | 18 + .../actions/run.pp | 171 +++++++++ .../openstack-network-plugins-l2/meta.yaml | 26 ++ .../openstack-network-routers/actions/run.pp | 32 ++ .../openstack-network-routers/meta.yaml | 18 + .../actions/run.pp | 95 +++++ .../openstack-network-server-config/meta.yaml | 30 ++ .../actions/run.pp | 81 +++++ .../openstack-network-server-nova/meta.yaml | 22 ++ f2s/resources/pre_hiera_config/actions/run.pp | 75 ++++ f2s/resources/pre_hiera_config/meta.yaml | 8 + f2s/resources/public_vip_ping/actions/run.pp | 17 + f2s/resources/public_vip_ping/meta.yaml | 14 + f2s/resources/rabbitmq/actions/run.pp | 165 +++++++++ f2s/resources/rabbitmq/meta.yaml | 40 +++ f2s/resources/sahara-db/actions/run.pp | 57 +++ f2s/resources/sahara-db/meta.yaml | 22 ++ f2s/resources/sahara-keystone/actions/run.pp | 34 ++ f2s/resources/sahara-keystone/meta.yaml | 20 ++ f2s/resources/sahara/actions/run.pp | 156 ++++++++ f2s/resources/sahara/meta.yaml | 52 +++ .../ssl-add-trust-chain/actions/run.pp | 42 +++ f2s/resources/ssl-add-trust-chain/meta.yaml | 14 + f2s/resources/ssl-keys-saving/actions/run.pp | 22 ++ f2s/resources/ssl-keys-saving/meta.yaml | 12 + f2s/resources/swift-keystone/actions/run.pp | 45 +++ f2s/resources/swift-keystone/meta.yaml | 20 ++ .../swift-rebalance-cron/actions/run.pp | 24 ++ f2s/resources/swift-rebalance-cron/meta.yaml | 20 ++ f2s/resources/swift/actions/run.pp | 147 ++++++++ f2s/resources/swift/meta.yaml | 60 ++++ f2s/resources/tools/actions/run.pp | 42 +++ f2s/resources/tools/meta.yaml | 14 + .../top-role-ceph-osd/actions/run.pp | 57 +++ f2s/resources/top-role-ceph-osd/meta.yaml | 10 + .../top-role-cinder-vmware/actions/run.pp | 11 + .../top-role-cinder-vmware/meta.yaml | 10 + f2s/resources/top-role-cinder/actions/run.pp | 308 ++++++++++++++++ f2s/resources/top-role-cinder/meta.yaml | 10 + .../top-role-compute-vmware/actions/run.pp | 18 + .../top-role-compute-vmware/meta.yaml | 10 + f2s/resources/top-role-compute/actions/run.pp | 339 ++++++++++++++++++ f2s/resources/top-role-compute/meta.yaml | 10 + f2s/resources/top-role-mongo/actions/run.pp | 32 ++ f2s/resources/top-role-mongo/meta.yaml | 10 + .../top-role-primary-mongo/actions/run.pp | 32 ++ .../top-role-primary-mongo/meta.yaml | 10 + f2s/resources/umm/actions/run.pp | 3 + f2s/resources/umm/meta.yaml | 10 + f2s/resources/update_hosts/actions/run.pp | 5 + f2s/resources/update_hosts/meta.yaml | 10 + f2s/resources/updatedb/actions/run.pp | 21 ++ f2s/resources/updatedb/meta.yaml | 12 + f2s/resources/virtual_ips/actions/run.pp | 3 + f2s/resources/virtual_ips/meta.yaml | 14 + f2s/resources/vmware-vcenter/actions/run.pp | 19 + f2s/resources/vmware-vcenter/meta.yaml | 24 ++ .../workloads_collector_add/actions/run.pp | 21 ++ .../workloads_collector_add/meta.yaml | 14 + requirements.txt | 4 +- 229 files changed, 9681 insertions(+), 2 deletions(-) create mode 100644 f2s/resources/apache/actions/run.pp create mode 100644 f2s/resources/apache/meta.yaml create mode 100644 f2s/resources/api-proxy/actions/run.pp create mode 100644 f2s/resources/api-proxy/meta.yaml create mode 100644 f2s/resources/ceilometer-compute/actions/run.pp create mode 100644 f2s/resources/ceilometer-compute/meta.yaml create mode 100644 f2s/resources/ceilometer-controller/actions/run.pp create mode 100644 f2s/resources/ceilometer-controller/meta.yaml create mode 100644 f2s/resources/ceilometer-keystone/actions/run.pp create mode 100644 f2s/resources/ceilometer-keystone/meta.yaml create mode 100644 f2s/resources/ceilometer-radosgw-user/actions/run.pp create mode 100644 f2s/resources/ceilometer-radosgw-user/meta.yaml create mode 100644 f2s/resources/ceph-compute/actions/run.pp create mode 100644 f2s/resources/ceph-compute/meta.yaml create mode 100644 f2s/resources/ceph-mon/actions/run.pp create mode 100644 f2s/resources/ceph-mon/meta.yaml create mode 100644 f2s/resources/ceph-radosgw/actions/run.pp create mode 100644 f2s/resources/ceph-radosgw/meta.yaml create mode 100644 f2s/resources/ceph_create_pools/actions/run.pp create mode 100644 f2s/resources/ceph_create_pools/meta.yaml create mode 100644 f2s/resources/cinder-db/actions/run.pp create mode 100644 f2s/resources/cinder-db/meta.yaml create mode 100644 f2s/resources/cinder-keystone/actions/run.pp create mode 100644 f2s/resources/cinder-keystone/meta.yaml create mode 100644 f2s/resources/cluster-haproxy/actions/run.pp create mode 100644 f2s/resources/cluster-haproxy/meta.yaml create mode 100644 f2s/resources/cluster-vrouter/actions/run.pp create mode 100644 f2s/resources/cluster-vrouter/meta.yaml create mode 100644 f2s/resources/cluster/actions/run.pp create mode 100644 f2s/resources/cluster/meta.yaml create mode 100644 f2s/resources/cluster_health/actions/run.pp create mode 100644 f2s/resources/cluster_health/meta.yaml create mode 100644 f2s/resources/configure_default_route/actions/run.pp create mode 100644 f2s/resources/configure_default_route/meta.yaml create mode 100644 f2s/resources/connectivity_tests/actions/run.pp create mode 100644 f2s/resources/connectivity_tests/meta.yaml create mode 100644 f2s/resources/conntrackd/actions/run.pp create mode 100644 f2s/resources/conntrackd/meta.yaml create mode 100644 f2s/resources/controller_remaining_tasks/actions/run.pp create mode 100644 f2s/resources/controller_remaining_tasks/meta.yaml create mode 100644 f2s/resources/database/actions/run.pp create mode 100644 f2s/resources/database/meta.yaml create mode 100644 f2s/resources/disable_keystone_service_token/actions/run.pp create mode 100644 f2s/resources/disable_keystone_service_token/meta.yaml create mode 100644 f2s/resources/dns-client/actions/run.pp create mode 100644 f2s/resources/dns-client/meta.yaml create mode 100644 f2s/resources/dns-server/actions/run.pp create mode 100644 f2s/resources/dns-server/meta.yaml create mode 100644 f2s/resources/dump_rabbitmq_definitions/actions/run.pp create mode 100644 f2s/resources/dump_rabbitmq_definitions/meta.yaml create mode 100644 f2s/resources/enable_cinder_volume_service/actions/run.pp create mode 100644 f2s/resources/enable_cinder_volume_service/meta.yaml create mode 100644 f2s/resources/enable_nova_compute_service/actions/run.pp create mode 100644 f2s/resources/enable_nova_compute_service/meta.yaml create mode 100644 f2s/resources/enable_rados/actions/run.pp create mode 100644 f2s/resources/enable_rados/meta.yaml create mode 100644 f2s/resources/firewall/actions/run.pp create mode 100644 f2s/resources/firewall/meta.yaml create mode 100644 f2s/resources/fuel_pkgs/actions/run.pp create mode 100644 f2s/resources/fuel_pkgs/meta.yaml create mode 100644 f2s/resources/generate_vms/actions/run.pp create mode 100644 f2s/resources/generate_vms/meta.yaml create mode 100644 f2s/resources/glance-db/actions/run.pp create mode 100644 f2s/resources/glance-db/meta.yaml create mode 100644 f2s/resources/glance-keystone/actions/run.pp create mode 100644 f2s/resources/glance-keystone/meta.yaml create mode 100644 f2s/resources/glance/actions/run.pp create mode 100644 f2s/resources/glance/meta.yaml create mode 100644 f2s/resources/globals/actions/run.pp create mode 100644 f2s/resources/globals/meta.yaml create mode 100644 f2s/resources/heat-db/actions/run.pp create mode 100644 f2s/resources/heat-db/meta.yaml create mode 100644 f2s/resources/heat-keystone/actions/run.pp create mode 100644 f2s/resources/heat-keystone/meta.yaml create mode 100644 f2s/resources/heat/actions/run.pp create mode 100644 f2s/resources/heat/meta.yaml create mode 100644 f2s/resources/hiera/actions/run.pp create mode 100644 f2s/resources/hiera/meta.yaml create mode 100644 f2s/resources/horizon/actions/run.pp create mode 100644 f2s/resources/horizon/meta.yaml create mode 100644 f2s/resources/hosts/actions/run.pp create mode 100644 f2s/resources/hosts/meta.yaml create mode 100644 f2s/resources/ironic-api/actions/run.pp create mode 100644 f2s/resources/ironic-api/meta.yaml create mode 100644 f2s/resources/ironic-compute/actions/run.pp create mode 100644 f2s/resources/ironic-compute/meta.yaml create mode 100644 f2s/resources/ironic-conductor/actions/run.pp create mode 100644 f2s/resources/ironic-conductor/meta.yaml create mode 100644 f2s/resources/ironic-db/actions/run.pp create mode 100644 f2s/resources/ironic-db/meta.yaml create mode 100644 f2s/resources/ironic-keystone/actions/run.pp create mode 100644 f2s/resources/ironic-keystone/meta.yaml create mode 100644 f2s/resources/keystone-db/actions/run.pp create mode 100644 f2s/resources/keystone-db/meta.yaml create mode 100644 f2s/resources/keystone/actions/run.pp create mode 100644 f2s/resources/keystone/meta.yaml create mode 100644 f2s/resources/logging/actions/run.pp create mode 100644 f2s/resources/logging/meta.yaml create mode 100644 f2s/resources/memcached/actions/run.pp create mode 100644 f2s/resources/memcached/meta.yaml create mode 100644 f2s/resources/murano-db/actions/run.pp create mode 100644 f2s/resources/murano-db/meta.yaml create mode 100644 f2s/resources/murano-keystone/actions/run.pp create mode 100644 f2s/resources/murano-keystone/meta.yaml create mode 100644 f2s/resources/murano/actions/run.pp create mode 100644 f2s/resources/murano/meta.yaml create mode 100644 f2s/resources/netconfig/actions/run.pp create mode 100644 f2s/resources/netconfig/meta.yaml create mode 100644 f2s/resources/neutron-db/actions/run.pp create mode 100644 f2s/resources/neutron-db/meta.yaml create mode 100644 f2s/resources/neutron-keystone/actions/run.pp create mode 100644 f2s/resources/neutron-keystone/meta.yaml create mode 100644 f2s/resources/nova-db/actions/run.pp create mode 100644 f2s/resources/nova-db/meta.yaml create mode 100644 f2s/resources/nova-keystone/actions/run.pp create mode 100644 f2s/resources/nova-keystone/meta.yaml create mode 100644 f2s/resources/ntp-check/actions/run.pp create mode 100644 f2s/resources/ntp-check/meta.yaml create mode 100644 f2s/resources/ntp-client/actions/run.pp create mode 100644 f2s/resources/ntp-client/meta.yaml create mode 100644 f2s/resources/ntp-server/actions/run.pp create mode 100644 f2s/resources/ntp-server/meta.yaml create mode 100644 f2s/resources/openstack-cinder/actions/run.pp create mode 100644 f2s/resources/openstack-cinder/meta.yaml create mode 100644 f2s/resources/openstack-controller/actions/run.pp create mode 100644 f2s/resources/openstack-controller/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-ceilometer/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-ceilometer/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-cinder/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-cinder/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-glance/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-glance/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-heat/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-heat/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-horizon/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-horizon/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-ironic/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-ironic/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-keystone/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-keystone/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-murano/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-murano/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-mysqld/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-mysqld/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-neutron/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-neutron/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-nova/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-nova/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-radosgw/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-radosgw/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-sahara/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-sahara/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-stats/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-stats/meta.yaml create mode 100644 f2s/resources/openstack-haproxy-swift/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy-swift/meta.yaml create mode 100644 f2s/resources/openstack-haproxy/actions/run.pp create mode 100644 f2s/resources/openstack-haproxy/meta.yaml create mode 100644 f2s/resources/openstack-network-agents-dhcp/actions/run.pp create mode 100644 f2s/resources/openstack-network-agents-dhcp/meta.yaml create mode 100644 f2s/resources/openstack-network-agents-l3/actions/run.pp create mode 100644 f2s/resources/openstack-network-agents-l3/meta.yaml create mode 100644 f2s/resources/openstack-network-agents-metadata/actions/run.pp create mode 100644 f2s/resources/openstack-network-agents-metadata/meta.yaml create mode 100644 f2s/resources/openstack-network-common-config/actions/run.pp create mode 100644 f2s/resources/openstack-network-common-config/meta.yaml create mode 100644 f2s/resources/openstack-network-compute-nova/actions/run.pp create mode 100644 f2s/resources/openstack-network-compute-nova/meta.yaml create mode 100644 f2s/resources/openstack-network-networks/actions/run.pp create mode 100644 f2s/resources/openstack-network-networks/meta.yaml create mode 100644 f2s/resources/openstack-network-plugins-l2/actions/run.pp create mode 100644 f2s/resources/openstack-network-plugins-l2/meta.yaml create mode 100644 f2s/resources/openstack-network-routers/actions/run.pp create mode 100644 f2s/resources/openstack-network-routers/meta.yaml create mode 100644 f2s/resources/openstack-network-server-config/actions/run.pp create mode 100644 f2s/resources/openstack-network-server-config/meta.yaml create mode 100644 f2s/resources/openstack-network-server-nova/actions/run.pp create mode 100644 f2s/resources/openstack-network-server-nova/meta.yaml create mode 100644 f2s/resources/pre_hiera_config/actions/run.pp create mode 100644 f2s/resources/pre_hiera_config/meta.yaml create mode 100644 f2s/resources/public_vip_ping/actions/run.pp create mode 100644 f2s/resources/public_vip_ping/meta.yaml create mode 100644 f2s/resources/rabbitmq/actions/run.pp create mode 100644 f2s/resources/rabbitmq/meta.yaml create mode 100644 f2s/resources/sahara-db/actions/run.pp create mode 100644 f2s/resources/sahara-db/meta.yaml create mode 100644 f2s/resources/sahara-keystone/actions/run.pp create mode 100644 f2s/resources/sahara-keystone/meta.yaml create mode 100644 f2s/resources/sahara/actions/run.pp create mode 100644 f2s/resources/sahara/meta.yaml create mode 100644 f2s/resources/ssl-add-trust-chain/actions/run.pp create mode 100644 f2s/resources/ssl-add-trust-chain/meta.yaml create mode 100644 f2s/resources/ssl-keys-saving/actions/run.pp create mode 100644 f2s/resources/ssl-keys-saving/meta.yaml create mode 100644 f2s/resources/swift-keystone/actions/run.pp create mode 100644 f2s/resources/swift-keystone/meta.yaml create mode 100644 f2s/resources/swift-rebalance-cron/actions/run.pp create mode 100644 f2s/resources/swift-rebalance-cron/meta.yaml create mode 100644 f2s/resources/swift/actions/run.pp create mode 100644 f2s/resources/swift/meta.yaml create mode 100644 f2s/resources/tools/actions/run.pp create mode 100644 f2s/resources/tools/meta.yaml create mode 100644 f2s/resources/top-role-ceph-osd/actions/run.pp create mode 100644 f2s/resources/top-role-ceph-osd/meta.yaml create mode 100644 f2s/resources/top-role-cinder-vmware/actions/run.pp create mode 100644 f2s/resources/top-role-cinder-vmware/meta.yaml create mode 100644 f2s/resources/top-role-cinder/actions/run.pp create mode 100644 f2s/resources/top-role-cinder/meta.yaml create mode 100644 f2s/resources/top-role-compute-vmware/actions/run.pp create mode 100644 f2s/resources/top-role-compute-vmware/meta.yaml create mode 100644 f2s/resources/top-role-compute/actions/run.pp create mode 100644 f2s/resources/top-role-compute/meta.yaml create mode 100644 f2s/resources/top-role-mongo/actions/run.pp create mode 100644 f2s/resources/top-role-mongo/meta.yaml create mode 100644 f2s/resources/top-role-primary-mongo/actions/run.pp create mode 100644 f2s/resources/top-role-primary-mongo/meta.yaml create mode 100644 f2s/resources/umm/actions/run.pp create mode 100644 f2s/resources/umm/meta.yaml create mode 100644 f2s/resources/update_hosts/actions/run.pp create mode 100644 f2s/resources/update_hosts/meta.yaml create mode 100644 f2s/resources/updatedb/actions/run.pp create mode 100644 f2s/resources/updatedb/meta.yaml create mode 100644 f2s/resources/virtual_ips/actions/run.pp create mode 100644 f2s/resources/virtual_ips/meta.yaml create mode 100644 f2s/resources/vmware-vcenter/actions/run.pp create mode 100644 f2s/resources/vmware-vcenter/meta.yaml create mode 100644 f2s/resources/workloads_collector_add/actions/run.pp create mode 100644 f2s/resources/workloads_collector_add/meta.yaml diff --git a/f2s/resources/apache/actions/run.pp b/f2s/resources/apache/actions/run.pp new file mode 100644 index 00000000..f1dbfb9c --- /dev/null +++ b/f2s/resources/apache/actions/run.pp @@ -0,0 +1,13 @@ +notice('MODULAR: apache.pp') + +# adjustments to defaults for LP#1485644 for scale +sysctl::value { 'net.core.somaxconn': value => '4096' } +sysctl::value { 'net.ipv4.tcp_max_syn_backlog': value => '8192' } + +class { 'osnailyfacter::apache': + purge_configs => true, + listen_ports => hiera_array('apache_ports', ['80', '8888']), +} + +include ::osnailyfacter::apache_mpm + diff --git a/f2s/resources/apache/meta.yaml b/f2s/resources/apache/meta.yaml new file mode 100644 index 00000000..8a642df7 --- /dev/null +++ b/f2s/resources/apache/meta.yaml @@ -0,0 +1,12 @@ +id: apache +handler: puppetv2 +version: '8.0' +inputs: + apache_ports: + value: null + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/api-proxy/actions/run.pp b/f2s/resources/api-proxy/actions/run.pp new file mode 100644 index 00000000..80fd7158 --- /dev/null +++ b/f2s/resources/api-proxy/actions/run.pp @@ -0,0 +1,16 @@ +notice('MODULAR: api-proxy.pp') + +$max_header_size = hiera('max_header_size', '81900') + +# Apache and listen ports +class { 'osnailyfacter::apache': + listen_ports => hiera_array('apache_ports', ['80', '8888']), +} + +# API proxy vhost +class {'osnailyfacter::apache_api_proxy': + master_ip => hiera('master_ip'), + max_header_size => $max_header_size, +} + +include ::tweaks::apache_wrappers diff --git a/f2s/resources/api-proxy/meta.yaml b/f2s/resources/api-proxy/meta.yaml new file mode 100644 index 00000000..786ec7f9 --- /dev/null +++ b/f2s/resources/api-proxy/meta.yaml @@ -0,0 +1,16 @@ +id: api-proxy +handler: puppetv2 +version: '8.0' +inputs: + apache_ports: + value: null + fqdn: + value: null + master_ip: + value: null + max_header_size: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/ceilometer-compute/actions/run.pp b/f2s/resources/ceilometer-compute/actions/run.pp new file mode 100644 index 00000000..53fde2a5 --- /dev/null +++ b/f2s/resources/ceilometer-compute/actions/run.pp @@ -0,0 +1,59 @@ +notice('MODULAR: ceilometer/compute.pp') + +$use_syslog = hiera('use_syslog', true) +$use_stderr = hiera('use_stderr', false) +$syslog_log_facility = hiera('syslog_log_facility_ceilometer', 'LOG_LOCAL0') +$rabbit_hash = hiera_hash('rabbit_hash') +$management_vip = hiera('management_vip') +$service_endpoint = hiera('service_endpoint') + +$default_ceilometer_hash = { + 'enabled' => false, + 'db_password' => 'ceilometer', + 'user_password' => 'ceilometer', + 'metering_secret' => 'ceilometer', + 'http_timeout' => '600', + 'event_time_to_live' => '604800', + 'metering_time_to_live' => '604800', +} + +$region = hiera('region', 'RegionOne') +$ceilometer_hash = hiera_hash('ceilometer_hash', $default_ceilometer_hash) +$ceilometer_region = pick($ceilometer_hash['region'], $region) +$ceilometer_enabled = $ceilometer_hash['enabled'] +$amqp_password = $rabbit_hash['password'] +$amqp_user = $rabbit_hash['user'] +$ceilometer_user_password = $ceilometer_hash['user_password'] +$ceilometer_metering_secret = $ceilometer_hash['metering_secret'] +$verbose = pick($ceilometer_hash['verbose'], hiera('verbose', true)) +$debug = pick($ceilometer_hash['debug'], hiera('debug', false)) + +if ($ceilometer_enabled) { + class { 'openstack::ceilometer': + verbose => $verbose, + debug => $debug, + use_syslog => $use_syslog, + use_stderr => $use_stderr, + syslog_log_facility => $syslog_log_facility, + amqp_hosts => hiera('amqp_hosts',''), + amqp_user => $amqp_user, + amqp_password => $amqp_password, + keystone_user => $ceilometer_hash['user'], + keystone_tenant => $ceilometer_hash['tenant'], + keystone_region => $ceilometer_region, + keystone_host => $service_endpoint, + keystone_password => $ceilometer_user_password, + on_compute => true, + metering_secret => $ceilometer_metering_secret, + event_time_to_live => $ceilometer_hash['event_time_to_live'], + metering_time_to_live => $ceilometer_hash['metering_time_to_live'], + http_timeout => $ceilometer_hash['http_timeout'], + } + + # We need to restart nova-compute service in orderto apply new settings + include ::nova::params + service { 'nova-compute': + ensure => 'running', + name => $::nova::params::compute_service_name, + } +} diff --git a/f2s/resources/ceilometer-compute/meta.yaml b/f2s/resources/ceilometer-compute/meta.yaml new file mode 100644 index 00000000..a96b8ea0 --- /dev/null +++ b/f2s/resources/ceilometer-compute/meta.yaml @@ -0,0 +1,10 @@ +id: ceilometer-compute +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/ceilometer-controller/actions/run.pp b/f2s/resources/ceilometer-controller/actions/run.pp new file mode 100644 index 00000000..0a062b5c --- /dev/null +++ b/f2s/resources/ceilometer-controller/actions/run.pp @@ -0,0 +1,111 @@ +notice('MODULAR: ceilometer/controller.pp') + +$default_ceilometer_hash = { + 'enabled' => false, + 'db_password' => 'ceilometer', + 'user_password' => 'ceilometer', + 'metering_secret' => 'ceilometer', + 'http_timeout' => '600', + 'event_time_to_live' => '604800', + 'metering_time_to_live' => '604800', +} + +$ceilometer_hash = hiera_hash('ceilometer', $default_ceilometer_hash) +$verbose = pick($ceilometer_hash['verbose'], hiera('verbose', true)) +$debug = pick($ceilometer_hash['debug'], hiera('debug', false)) +$use_syslog = hiera('use_syslog', true) +$use_stderr = hiera('use_stderr', false) +$syslog_log_facility = hiera('syslog_log_facility_ceilometer', 'LOG_LOCAL0') +$nodes_hash = hiera('nodes') +$storage_hash = hiera('storage') +$rabbit_hash = hiera_hash('rabbit_hash') +$management_vip = hiera('management_vip') +$region = hiera('region', 'RegionOne') +$ceilometer_region = pick($ceilometer_hash['region'], $region) +$mongo_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('mongo_roles')) +$mongo_address_map = get_node_to_ipaddr_map_by_network_role($mongo_nodes, 'mongo/db') + +$default_mongo_hash = { + 'enabled' => false, +} + +$mongo_hash = hiera_hash('mongo', $default_mongo_hash) + +if $mongo_hash['enabled'] and $ceilometer_hash['enabled'] { + $exteranl_mongo_hash = hiera_hash('external_mongo') + $ceilometer_db_user = $exteranl_mongo_hash['mongo_user'] + $ceilometer_db_password = $exteranl_mongo_hash['mongo_password'] + $ceilometer_db_dbname = $exteranl_mongo_hash['mongo_db_name'] + $external_mongo = true +} else { + $ceilometer_db_user = 'ceilometer' + $ceilometer_db_password = $ceilometer_hash['db_password'] + $ceilometer_db_dbname = 'ceilometer' + $external_mongo = false + $exteranl_mongo_hash = {} +} + +$ceilometer_enabled = $ceilometer_hash['enabled'] +$ceilometer_user_password = $ceilometer_hash['user_password'] +$ceilometer_metering_secret = $ceilometer_hash['metering_secret'] +$ceilometer_db_type = 'mongodb' +$swift_rados_backend = $storage_hash['objects_ceph'] +$amqp_password = $rabbit_hash['password'] +$amqp_user = $rabbit_hash['user'] +$rabbit_ha_queues = true +$service_endpoint = hiera('service_endpoint') +$ha_mode = pick($ceilometer_hash['ha_mode'], true) + +prepare_network_config(hiera('network_scheme', {})) +$api_bind_address = get_network_role_property('ceilometer/api', 'ipaddr') + +if $ceilometer_hash['enabled'] { + if $external_mongo { + $mongo_hosts = $exteranl_mongo_hash['hosts_ip'] + if $exteranl_mongo_hash['mongo_replset'] { + $mongo_replicaset = $exteranl_mongo_hash['mongo_replset'] + } else { + $mongo_replicaset = undef + } + } else { + $mongo_hosts = join(values($mongo_address_map), ',') + # MongoDB is alsways configured with replica set + $mongo_replicaset = 'ceilometer' + } +} + +############################################################################### + +if ($ceilometer_enabled) { + class { 'openstack::ceilometer': + verbose => $verbose, + debug => $debug, + use_syslog => $use_syslog, + use_stderr => $use_stderr, + syslog_log_facility => $syslog_log_facility, + db_type => $ceilometer_db_type, + db_host => $mongo_hosts, + db_user => $ceilometer_db_user, + db_password => $ceilometer_db_password, + db_dbname => $ceilometer_db_dbname, + swift_rados_backend => $swift_rados_backend, + metering_secret => $ceilometer_metering_secret, + amqp_hosts => hiera('amqp_hosts',''), + amqp_user => $amqp_user, + amqp_password => $amqp_password, + rabbit_ha_queues => $rabbit_ha_queues, + keystone_host => $service_endpoint, + keystone_password => $ceilometer_user_password, + keystone_user => $ceilometer_hash['user'], + keystone_tenant => $ceilometer_hash['tenant'], + keystone_region => $ceilometer_region, + host => $api_bind_address, + ha_mode => $ha_mode, + on_controller => true, + ext_mongo => $external_mongo, + mongo_replicaset => $mongo_replicaset, + event_time_to_live => $ceilometer_hash['event_time_to_live'], + metering_time_to_live => $ceilometer_hash['metering_time_to_live'], + http_timeout => $ceilometer_hash['http_timeout'], + } +} diff --git a/f2s/resources/ceilometer-controller/meta.yaml b/f2s/resources/ceilometer-controller/meta.yaml new file mode 100644 index 00000000..871b296d --- /dev/null +++ b/f2s/resources/ceilometer-controller/meta.yaml @@ -0,0 +1,44 @@ +id: ceilometer-controller +handler: puppetv2 +version: '8.0' +inputs: + ceilometer: + value: null + debug: + value: null + fqdn: + value: null + management_vip: + value: null + mongo: + value: null + mongo_roles: + value: null + network_metadata: + value: null + network_scheme: + value: null + nodes: + value: null + puppet_modules: + value: null + rabbit: + value: null + rabbit_hash: + value: null + region: + value: null + role: + value: null + service_endpoint: + value: null + storage: + value: null + syslog_log_facility_ceilometer: + value: null + use_stderr: + value: null + use_syslog: + value: null + verbose: + value: null diff --git a/f2s/resources/ceilometer-keystone/actions/run.pp b/f2s/resources/ceilometer-keystone/actions/run.pp new file mode 100644 index 00000000..c6ddaef1 --- /dev/null +++ b/f2s/resources/ceilometer-keystone/actions/run.pp @@ -0,0 +1,41 @@ +notice('MODULAR: ceilometer/keystone.pp') + +$ceilometer_hash = hiera_hash('ceilometer', {}) +$public_vip = hiera('public_vip') +$public_ssl_hash = hiera('public_ssl') +$public_address = $public_ssl_hash['services'] ? { + true => $public_ssl_hash['hostname'], + default => $public_vip, +} +$public_protocol = $public_ssl_hash['services'] ? { + true => 'https', + default => 'http', +} +$admin_address = hiera('management_vip') +$region = pick($ceilometer_hash['region'], hiera('region', 'RegionOne')) +$password = $ceilometer_hash['user_password'] +$auth_name = pick($ceilometer_hash['auth_name'], 'ceilometer') +$configure_endpoint = pick($ceilometer_hash['configure_endpoint'], true) +$configure_user = pick($ceilometer_hash['configure_user'], true) +$configure_user_role = pick($ceilometer_hash['configure_user_role'], true) +$service_name = pick($ceilometer_hash['service_name'], 'ceilometer') +$tenant = pick($ceilometer_hash['tenant'], 'services') + +validate_string($public_address) +validate_string($password) + +$public_url = "${public_protocol}://${public_address}:8777" +$admin_url = "http://${admin_address}:8777" + +class { '::ceilometer::keystone::auth': + password => $password, + auth_name => $auth_name, + configure_endpoint => $configure_endpoint, + configure_user => $configure_user, + configure_user_role => $configure_user_role, + service_name => $service_name, + public_url => $public_url, + internal_url => $admin_url, + admin_url => $admin_url, + region => $region, +} diff --git a/f2s/resources/ceilometer-keystone/meta.yaml b/f2s/resources/ceilometer-keystone/meta.yaml new file mode 100644 index 00000000..5163496e --- /dev/null +++ b/f2s/resources/ceilometer-keystone/meta.yaml @@ -0,0 +1,20 @@ +id: ceilometer-keystone +handler: puppetv2 +version: '8.0' +inputs: + ceilometer: + value: null + fqdn: + value: null + management_vip: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null diff --git a/f2s/resources/ceilometer-radosgw-user/actions/run.pp b/f2s/resources/ceilometer-radosgw-user/actions/run.pp new file mode 100644 index 00000000..4d12f91b --- /dev/null +++ b/f2s/resources/ceilometer-radosgw-user/actions/run.pp @@ -0,0 +1,20 @@ +notice('MODULAR: ceilometer/radosgw_user.pp') + +$default_ceilometer_hash = { + 'enabled' => false, +} + +$ceilometer_hash = hiera_hash('ceilometer', $default_ceilometer_hash) + +if $ceilometer_hash['enabled'] { + include ceilometer::params + + ceilometer_radosgw_user { 'ceilometer': + caps => {'buckets' => 'read', 'usage' => 'read'}, + } ~> + service { $::ceilometer::params::agent_central_service_name: + ensure => 'running', + enable => true, + provider => 'pacemaker', + } +} diff --git a/f2s/resources/ceilometer-radosgw-user/meta.yaml b/f2s/resources/ceilometer-radosgw-user/meta.yaml new file mode 100644 index 00000000..145ea0a7 --- /dev/null +++ b/f2s/resources/ceilometer-radosgw-user/meta.yaml @@ -0,0 +1,14 @@ +id: ceilometer-radosgw-user +handler: puppetv2 +version: '8.0' +inputs: + ceilometer: + value: null + fqdn: + value: null + puppet_modules: + value: null + role: + value: null + storage: + value: null diff --git a/f2s/resources/ceph-compute/actions/run.pp b/f2s/resources/ceph-compute/actions/run.pp new file mode 100644 index 00000000..757231e6 --- /dev/null +++ b/f2s/resources/ceph-compute/actions/run.pp @@ -0,0 +1,97 @@ +notice('MODULAR: ceph/ceph_compute.pp') + +$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public') +$storage_hash = hiera_hash('storage_hash', {}) +$use_neutron = hiera('use_neutron') +$public_vip = hiera('public_vip') +$management_vip = hiera('management_vip') +$use_syslog = hiera('use_syslog', true) +$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') +$keystone_hash = hiera_hash('keystone_hash', {}) +# Cinder settings +$cinder_pool = 'volumes' +# Glance settings +$glance_pool = 'images' +#Nova Compute settings +$compute_user = 'compute' +$compute_pool = 'compute' + + +if ($storage_hash['images_ceph']) { + $glance_backend = 'ceph' +} elsif ($storage_hash['images_vcenter']) { + $glance_backend = 'vmware' +} else { + $glance_backend = 'swift' +} + +if ($storage_hash['volumes_ceph'] or + $storage_hash['images_ceph'] or + $storage_hash['objects_ceph'] or + $storage_hash['ephemeral_ceph'] +) { + $use_ceph = true +} else { + $use_ceph = false +} + +if $use_ceph { + $ceph_primary_monitor_node = hiera('ceph_primary_monitor_node') + $primary_mons = keys($ceph_primary_monitor_node) + $primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name'] + + prepare_network_config(hiera_hash('network_scheme')) + $ceph_cluster_network = get_network_role_property('ceph/replication', 'network') + $ceph_public_network = get_network_role_property('ceph/public', 'network') + + class {'ceph': + primary_mon => $primary_mon, + mon_hosts => keys($mon_address_map), + mon_ip_addresses => values($mon_address_map), + cluster_node_address => $public_vip, + osd_pool_default_size => $storage_hash['osd_pool_size'], + osd_pool_default_pg_num => $storage_hash['pg_num'], + osd_pool_default_pgp_num => $storage_hash['pg_num'], + use_rgw => false, + glance_backend => $glance_backend, + rgw_pub_ip => $public_vip, + rgw_adm_ip => $management_vip, + rgw_int_ip => $management_vip, + cluster_network => $ceph_cluster_network, + public_network => $ceph_public_network, + use_syslog => $use_syslog, + syslog_log_level => hiera('syslog_log_level_ceph', 'info'), + syslog_log_facility => $syslog_log_facility_ceph, + rgw_keystone_admin_token => $keystone_hash['admin_token'], + ephemeral_ceph => $storage_hash['ephemeral_ceph'] + } + + + service { $::ceph::params::service_nova_compute :} + + ceph::pool {$compute_pool: + user => $compute_user, + acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}, allow rwx pool=${compute_pool}'", + keyring_owner => 'nova', + pg_num => $storage_hash['pg_num'], + pgp_num => $storage_hash['pg_num'], + } + + include ceph::nova_compute + + if ($storage_hash['ephemeral_ceph']) { + include ceph::ephemeral + Class['ceph::conf'] -> Class['ceph::ephemeral'] ~> + Service[$::ceph::params::service_nova_compute] + } + + Class['ceph::conf'] -> + Ceph::Pool[$compute_pool] -> + Class['ceph::nova_compute'] ~> + Service[$::ceph::params::service_nova_compute] + + Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ], + cwd => '/root', + } + +} diff --git a/f2s/resources/ceph-compute/meta.yaml b/f2s/resources/ceph-compute/meta.yaml new file mode 100644 index 00000000..8690c5f0 --- /dev/null +++ b/f2s/resources/ceph-compute/meta.yaml @@ -0,0 +1,10 @@ +id: ceph-compute +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/ceph-mon/actions/run.pp b/f2s/resources/ceph-mon/actions/run.pp new file mode 100644 index 00000000..f9d66765 --- /dev/null +++ b/f2s/resources/ceph-mon/actions/run.pp @@ -0,0 +1,95 @@ +notice('MODULAR: ceph/mon.pp') + +$storage_hash = hiera('storage', {}) +$use_neutron = hiera('use_neutron') +$public_vip = hiera('public_vip') +$management_vip = hiera('management_vip') +$use_syslog = hiera('use_syslog', true) +$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') +$keystone_hash = hiera('keystone', {}) +$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public') + +if ($storage_hash['images_ceph']) { + $glance_backend = 'ceph' +} elsif ($storage_hash['images_vcenter']) { + $glance_backend = 'vmware' +} else { + $glance_backend = 'swift' +} + +if ($storage_hash['volumes_ceph'] or + $storage_hash['images_ceph'] or + $storage_hash['objects_ceph'] or + $storage_hash['ephemeral_ceph'] +) { + $use_ceph = true +} else { + $use_ceph = false +} + +if $use_ceph { + $ceph_primary_monitor_node = hiera('ceph_primary_monitor_node') + $primary_mons = keys($ceph_primary_monitor_node) + $primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name'] + + prepare_network_config(hiera_hash('network_scheme')) + $ceph_cluster_network = get_network_role_property('ceph/replication', 'network') + $ceph_public_network = get_network_role_property('ceph/public', 'network') + $mon_addr = get_network_role_property('ceph/public', 'ipaddr') + + class {'ceph': + primary_mon => $primary_mon, + mon_hosts => keys($mon_address_map), + mon_ip_addresses => values($mon_address_map), + mon_addr => $mon_addr, + cluster_node_address => $public_vip, + osd_pool_default_size => $storage_hash['osd_pool_size'], + osd_pool_default_pg_num => $storage_hash['pg_num'], + osd_pool_default_pgp_num => $storage_hash['pg_num'], + use_rgw => false, + glance_backend => $glance_backend, + rgw_pub_ip => $public_vip, + rgw_adm_ip => $management_vip, + rgw_int_ip => $management_vip, + cluster_network => $ceph_cluster_network, + public_network => $ceph_public_network, + use_syslog => $use_syslog, + syslog_log_level => hiera('syslog_log_level_ceph', 'info'), + syslog_log_facility => $syslog_log_facility_ceph, + rgw_keystone_admin_token => $keystone_hash['admin_token'], + ephemeral_ceph => $storage_hash['ephemeral_ceph'] + } + + if ($storage_hash['volumes_ceph']) { + include ::cinder::params + service { 'cinder-volume': + ensure => 'running', + name => $::cinder::params::volume_service, + hasstatus => true, + hasrestart => true, + } + + service { 'cinder-backup': + ensure => 'running', + name => $::cinder::params::backup_service, + hasstatus => true, + hasrestart => true, + } + + Class['ceph'] ~> Service['cinder-volume'] + Class['ceph'] ~> Service['cinder-backup'] + } + + if ($storage_hash['images_ceph']) { + include ::glance::params + service { 'glance-api': + ensure => 'running', + name => $::glance::params::api_service_name, + hasstatus => true, + hasrestart => true, + } + + Class['ceph'] ~> Service['glance-api'] + } + +} diff --git a/f2s/resources/ceph-mon/meta.yaml b/f2s/resources/ceph-mon/meta.yaml new file mode 100644 index 00000000..19d61849 --- /dev/null +++ b/f2s/resources/ceph-mon/meta.yaml @@ -0,0 +1,32 @@ +id: ceph-mon +handler: puppetv2 +version: '8.0' +inputs: + ceph_monitor_nodes: + value: null + ceph_primary_monitor_node: + value: null + fqdn: + value: null + keystone: + value: null + management_vip: + value: null + network_scheme: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null + storage: + value: null + syslog_log_facility_ceph: + value: null + syslog_log_level_ceph: + value: null + use_neutron: + value: null + use_syslog: + value: null diff --git a/f2s/resources/ceph-radosgw/actions/run.pp b/f2s/resources/ceph-radosgw/actions/run.pp new file mode 100644 index 00000000..cf5f131b --- /dev/null +++ b/f2s/resources/ceph-radosgw/actions/run.pp @@ -0,0 +1,103 @@ +notice('MODULAR: ceph/radosgw.pp') + +$storage_hash = hiera('storage', {}) +$use_neutron = hiera('use_neutron') +$public_vip = hiera('public_vip') +$keystone_hash = hiera('keystone', {}) +$management_vip = hiera('management_vip') +$service_endpoint = hiera('service_endpoint') +$public_ssl_hash = hiera('public_ssl') +$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public') + +if ($storage_hash['volumes_ceph'] or + $storage_hash['images_ceph'] or + $storage_hash['objects_ceph'] +) { + $use_ceph = true +} else { + $use_ceph = false +} + +if $use_ceph and $storage_hash['objects_ceph'] { + $ceph_primary_monitor_node = hiera('ceph_primary_monitor_node') + $primary_mons = keys($ceph_primary_monitor_node) + $primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name'] + + prepare_network_config(hiera_hash('network_scheme')) + $ceph_cluster_network = get_network_role_property('ceph/replication', 'network') + $ceph_public_network = get_network_role_property('ceph/public', 'network') + $rgw_ip_address = get_network_role_property('ceph/radosgw', 'ipaddr') + + # Apache and listen ports + class { 'osnailyfacter::apache': + listen_ports => hiera_array('apache_ports', ['80', '8888']), + } + if ($::osfamily == 'Debian'){ + apache::mod {'rewrite': } + apache::mod {'fastcgi': } + } + include ::tweaks::apache_wrappers + include ceph::params + + $haproxy_stats_url = "http://${service_endpoint}:10000/;csv" + + haproxy_backend_status { 'keystone-admin' : + name => 'keystone-2', + count => '200', + step => '6', + url => $haproxy_stats_url, + } + + haproxy_backend_status { 'keystone-public' : + name => 'keystone-1', + count => '200', + step => '6', + url => $haproxy_stats_url, + } + + Haproxy_backend_status['keystone-admin'] -> Class ['ceph::keystone'] + Haproxy_backend_status['keystone-public'] -> Class ['ceph::keystone'] + + class { 'ceph::radosgw': + # SSL + use_ssl => false, + public_ssl => $public_ssl_hash['services'], + + # Ceph + primary_mon => $primary_mon, + pub_ip => $public_vip, + adm_ip => $management_vip, + int_ip => $management_vip, + + # RadosGW settings + rgw_host => $::hostname, + rgw_ip => $rgw_ip_address, + rgw_port => '6780', + swift_endpoint_port => '8080', + rgw_keyring_path => '/etc/ceph/keyring.radosgw.gateway', + rgw_socket_path => '/tmp/radosgw.sock', + rgw_log_file => '/var/log/ceph/radosgw.log', + rgw_data => '/var/lib/ceph/radosgw', + rgw_dns_name => "*.${::domain}", + rgw_print_continue => true, + + #rgw Keystone settings + rgw_use_pki => false, + rgw_use_keystone => true, + rgw_keystone_url => "${service_endpoint}:35357", + rgw_keystone_admin_token => $keystone_hash['admin_token'], + rgw_keystone_token_cache_size => '10', + rgw_keystone_accepted_roles => '_member_, Member, admin, swiftoperator', + rgw_keystone_revocation_interval => '1000000', + rgw_nss_db_path => '/etc/ceph/nss', + + #rgw Log settings + use_syslog => hiera('use_syslog', true), + syslog_facility => hiera('syslog_log_facility_ceph', 'LOG_LOCAL0'), + syslog_level => hiera('syslog_log_level_ceph', 'info'), + } + + Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ], + cwd => '/root', + } +} diff --git a/f2s/resources/ceph-radosgw/meta.yaml b/f2s/resources/ceph-radosgw/meta.yaml new file mode 100644 index 00000000..c577ef39 --- /dev/null +++ b/f2s/resources/ceph-radosgw/meta.yaml @@ -0,0 +1,26 @@ +id: ceph-radosgw +handler: puppetv2 +version: '8.0' +inputs: + ceph_monitor_nodes: + value: null + fqdn: + value: null + keystone: + value: null + management_vip: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null + service_endpoint: + value: null + storage: + value: null + use_neutron: + value: null diff --git a/f2s/resources/ceph_create_pools/actions/run.pp b/f2s/resources/ceph_create_pools/actions/run.pp new file mode 100644 index 00000000..6fdb2ee4 --- /dev/null +++ b/f2s/resources/ceph_create_pools/actions/run.pp @@ -0,0 +1,80 @@ +notice('MODULAR: ceph/ceph_pools') + +$storage_hash = hiera('storage', {}) +$osd_pool_default_pg_num = $storage_hash['pg_num'] +$osd_pool_default_pgp_num = $storage_hash['pg_num'] +# Cinder settings +$cinder_user = 'volumes' +$cinder_pool = 'volumes' +# Cinder Backup settings +$cinder_backup_user = 'backups' +$cinder_backup_pool = 'backups' +# Glance settings +$glance_user = 'images' +$glance_pool = 'images' + + +Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ], + cwd => '/root', +} + +# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279 +ceph::pool {$glance_pool: + user => $glance_user, + acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${glance_pool}'", + keyring_owner => 'glance', + pg_num => $osd_pool_default_pg_num, + pgp_num => $osd_pool_default_pg_num, +} + +ceph::pool {$cinder_pool: + user => $cinder_user, + acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}'", + keyring_owner => 'cinder', + pg_num => $osd_pool_default_pg_num, + pgp_num => $osd_pool_default_pg_num, +} + +ceph::pool {$cinder_backup_pool: + user => $cinder_backup_user, + acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_backup_pool}, allow rx pool=${cinder_pool}'", + keyring_owner => 'cinder', + pg_num => $osd_pool_default_pg_num, + pgp_num => $osd_pool_default_pg_num, +} + +Ceph::Pool[$glance_pool] -> Ceph::Pool[$cinder_pool] -> Ceph::Pool[$cinder_backup_pool] + +if ($storage_hash['volumes_ceph']) { + include ::cinder::params + service { 'cinder-volume': + ensure => 'running', + name => $::cinder::params::volume_service, + hasstatus => true, + hasrestart => true, + } + + Ceph::Pool[$cinder_pool] ~> Service['cinder-volume'] + + service { 'cinder-backup': + ensure => 'running', + name => $::cinder::params::backup_service, + hasstatus => true, + hasrestart => true, + } + + Ceph::Pool[$cinder_backup_pool] ~> Service['cinder-backup'] +} + +if ($storage_hash['images_ceph']) { + include ::glance::params + service { 'glance-api': + ensure => 'running', + name => $::glance::params::api_service_name, + hasstatus => true, + hasrestart => true, + } + + Ceph::Pool[$glance_pool] ~> Service['glance-api'] +} + diff --git a/f2s/resources/ceph_create_pools/meta.yaml b/f2s/resources/ceph_create_pools/meta.yaml new file mode 100644 index 00000000..97de7472 --- /dev/null +++ b/f2s/resources/ceph_create_pools/meta.yaml @@ -0,0 +1,12 @@ +id: ceph_create_pools +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null + storage: + value: null diff --git a/f2s/resources/cinder-db/actions/run.pp b/f2s/resources/cinder-db/actions/run.pp new file mode 100644 index 00000000..e51e3383 --- /dev/null +++ b/f2s/resources/cinder-db/actions/run.pp @@ -0,0 +1,53 @@ +notice('MODULAR: cinder/db.pp') + +$cinder_hash = hiera_hash('cinder', {}) +$mysql_hash = hiera_hash('mysql_hash', {}) +$management_vip = hiera('management_vip', undef) +$database_vip = hiera('database_vip', undef) + +$mysql_root_user = pick($mysql_hash['root_user'], 'root') +$mysql_db_create = pick($mysql_hash['db_create'], true) +$mysql_root_password = $mysql_hash['root_password'] + +$db_user = pick($cinder_hash['db_user'], 'cinder') +$db_name = pick($cinder_hash['db_name'], 'cinder') +$db_password = pick($cinder_hash['db_password'], $mysql_root_password) + +$db_host = pick($cinder_hash['db_host'], $database_vip) +$db_create = pick($cinder_hash['db_create'], $mysql_db_create) +$db_root_user = pick($cinder_hash['root_user'], $mysql_root_user) +$db_root_password = pick($cinder_hash['root_password'], $mysql_root_password) + +$allowed_hosts = [ $::hostname, 'localhost', '127.0.0.1', '%' ] + +validate_string($mysql_root_user) + +if $db_create { + + class { 'galera::client': + custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), + } + + class { 'cinder::db::mysql': + user => $db_user, + password => $db_password, + dbname => $db_name, + allowed_hosts => $allowed_hosts, + } + + class { 'osnailyfacter::mysql_access': + db_host => $db_host, + db_user => $db_root_user, + db_password => $db_root_password, + } + + Class['galera::client'] -> + Class['osnailyfacter::mysql_access'] -> + Class['cinder::db::mysql'] + +} + +class mysql::config {} +include mysql::config +class mysql::server {} +include mysql::server diff --git a/f2s/resources/cinder-db/meta.yaml b/f2s/resources/cinder-db/meta.yaml new file mode 100644 index 00000000..6865e031 --- /dev/null +++ b/f2s/resources/cinder-db/meta.yaml @@ -0,0 +1,20 @@ +id: cinder-db +handler: puppetv2 +version: '8.0' +inputs: + cinder: + value: null + database_vip: + value: null + fqdn: + value: null + management_vip: + value: null + mysql_custom_setup_class: + value: null + mysql_hash: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/cinder-keystone/actions/run.pp b/f2s/resources/cinder-keystone/actions/run.pp new file mode 100644 index 00000000..1b93e142 --- /dev/null +++ b/f2s/resources/cinder-keystone/actions/run.pp @@ -0,0 +1,51 @@ +notice('MODULAR: cinder/keystone.pp') + +$cinder_hash = hiera_hash('cinder', {}) +$public_ssl_hash = hiera('public_ssl') +$public_vip = hiera('public_vip') +$public_address = $public_ssl_hash['services'] ? { + true => $public_ssl_hash['hostname'], + default => $public_vip, +} +$public_protocol = $public_ssl_hash['services'] ? { + true => 'https', + default => 'http', +} +$admin_protocol = 'http' +$admin_address = hiera('management_vip') +$region = pick($cinder_hash['region'], hiera('region', 'RegionOne')) + +$password = $cinder_hash['user_password'] +$auth_name = pick($cinder_hash['auth_name'], 'cinder') +$configure_endpoint = pick($cinder_hash['configure_endpoint'], true) +$configure_user = pick($cinder_hash['configure_user'], true) +$configure_user_role = pick($cinder_hash['configure_user_role'], true) +$service_name = pick($cinder_hash['service_name'], 'cinder') +$tenant = pick($cinder_hash['tenant'], 'services') + +$port = '8776' + +$public_url = "${public_protocol}://${public_address}:${port}/v1/%(tenant_id)s" +$admin_url = "${admin_protocol}://${admin_address}:${port}/v1/%(tenant_id)s" + +$public_url_v2 = "${public_protocol}://${public_address}:${port}/v2/%(tenant_id)s" +$admin_url_v2 = "${admin_protocol}://${admin_address}:${port}/v2/%(tenant_id)s" + +validate_string($public_address) +validate_string($password) + +class { '::cinder::keystone::auth': + password => $password, + auth_name => $auth_name, + configure_endpoint => $configure_endpoint, + configure_user => $configure_user, + configure_user_role => $configure_user_role, + service_name => $service_name, + public_url => $public_url, + internal_url => $admin_url, + admin_url => $admin_url, + public_url_v2 => $public_url_v2, + internal_url_v2 => $admin_url_v2, + admin_url_v2 => $admin_url_v2, + region => $region, +} diff --git a/f2s/resources/cinder-keystone/meta.yaml b/f2s/resources/cinder-keystone/meta.yaml new file mode 100644 index 00000000..c2ec3ec4 --- /dev/null +++ b/f2s/resources/cinder-keystone/meta.yaml @@ -0,0 +1,20 @@ +id: cinder-keystone +handler: puppetv2 +version: '8.0' +inputs: + cinder: + value: null + fqdn: + value: null + management_vip: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null diff --git a/f2s/resources/cluster-haproxy/actions/run.pp b/f2s/resources/cluster-haproxy/actions/run.pp new file mode 100644 index 00000000..9c604867 --- /dev/null +++ b/f2s/resources/cluster-haproxy/actions/run.pp @@ -0,0 +1,20 @@ +notice('MODULAR: cluster-haproxy.pp') + +$network_scheme = hiera('network_scheme', {}) +$management_vip = hiera('management_vip') +$database_vip = hiera('database_vip', '') +$service_endpoint = hiera('service_endpoint', '') +$primary_controller = hiera('primary_controller') +$haproxy_hash = hiera_hash('haproxy', {}) + +#FIXME(mattymo): Replace with only VIPs for roles assigned to this node +$stats_ipaddresses = delete_undef_values([$management_vip, $database_vip, $service_endpoint, '127.0.0.1']) + +class { 'cluster::haproxy': + haproxy_maxconn => '16000', + haproxy_bufsize => '32768', + primary_controller => $primary_controller, + debug => pick($haproxy_hash['debug'], hiera('debug', false)), + other_networks => direct_networks($network_scheme['endpoints']), + stats_ipaddresses => $stats_ipaddresses +} diff --git a/f2s/resources/cluster-haproxy/meta.yaml b/f2s/resources/cluster-haproxy/meta.yaml new file mode 100644 index 00000000..c1e45fb8 --- /dev/null +++ b/f2s/resources/cluster-haproxy/meta.yaml @@ -0,0 +1,24 @@ +id: cluster-haproxy +handler: puppetv2 +version: '8.0' +inputs: + database_vip: + value: null + debug: + value: null + fqdn: + value: null + haproxy: + value: null + management_vip: + value: null + network_scheme: + value: null + primary_controller: + value: null + puppet_modules: + value: null + role: + value: null + service_endpoint: + value: null diff --git a/f2s/resources/cluster-vrouter/actions/run.pp b/f2s/resources/cluster-vrouter/actions/run.pp new file mode 100644 index 00000000..09125d94 --- /dev/null +++ b/f2s/resources/cluster-vrouter/actions/run.pp @@ -0,0 +1,7 @@ +notice('MODULAR: cluster-vrouter.pp') + +$network_scheme = hiera('network_scheme', {}) + +class { 'cluster::vrouter_ocf': + other_networks => direct_networks($network_scheme['endpoints']), +} diff --git a/f2s/resources/cluster-vrouter/meta.yaml b/f2s/resources/cluster-vrouter/meta.yaml new file mode 100644 index 00000000..6f6fd6b5 --- /dev/null +++ b/f2s/resources/cluster-vrouter/meta.yaml @@ -0,0 +1,12 @@ +id: cluster-vrouter +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + network_scheme: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/cluster/actions/run.pp b/f2s/resources/cluster/actions/run.pp new file mode 100644 index 00000000..87aa1242 --- /dev/null +++ b/f2s/resources/cluster/actions/run.pp @@ -0,0 +1,49 @@ +notice('MODULAR: cluster.pp') + +if !(hiera('role') in hiera('corosync_roles')) { + fail('The node role is not in corosync roles') +} + +prepare_network_config(hiera_hash('network_scheme')) + +$corosync_nodes = corosync_nodes( + get_nodes_hash_by_roles( + hiera_hash('network_metadata'), + hiera('corosync_roles') + ), + 'mgmt/corosync' +) + +class { 'cluster': + internal_address => get_network_role_property('mgmt/corosync', 'ipaddr'), + corosync_nodes => $corosync_nodes, +} + +pcmk_nodes { 'pacemaker' : + nodes => $corosync_nodes, + add_pacemaker_nodes => false, +} + +Service <| title == 'corosync' |> { + subscribe => File['/etc/corosync/service.d'], + require => File['/etc/corosync/corosync.conf'], +} + +Service['corosync'] -> Pcmk_nodes<||> +Pcmk_nodes<||> -> Service<| provider == 'pacemaker' |> + +# Sometimes during first start pacemaker can not connect to corosync +# via IPC due to pacemaker and corosync processes are run under different users +if($::operatingsystem == 'Ubuntu') { + $pacemaker_run_uid = 'hacluster' + $pacemaker_run_gid = 'haclient' + + file {'/etc/corosync/uidgid.d/pacemaker': + content =>"uidgid { + uid: ${pacemaker_run_uid} + gid: ${pacemaker_run_gid} +}" + } + + File['/etc/corosync/corosync.conf'] -> File['/etc/corosync/uidgid.d/pacemaker'] -> Service <| title == 'corosync' |> +} diff --git a/f2s/resources/cluster/meta.yaml b/f2s/resources/cluster/meta.yaml new file mode 100644 index 00000000..a01566c4 --- /dev/null +++ b/f2s/resources/cluster/meta.yaml @@ -0,0 +1,16 @@ +id: cluster +handler: puppetv2 +version: '8.0' +inputs: + corosync_roles: + value: null + fqdn: + value: null + network_metadata: + value: null + network_scheme: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/cluster_health/actions/run.pp b/f2s/resources/cluster_health/actions/run.pp new file mode 100644 index 00000000..d6906cd6 --- /dev/null +++ b/f2s/resources/cluster_health/actions/run.pp @@ -0,0 +1,20 @@ +notice('MODULAR: cluster/health.pp') + +if !(hiera('role') in hiera('corosync_roles')) { + fail('The node role is not in corosync roles') +} + +# load the mounted filesystems from our custom fact, remove boot +$mount_points = delete(split($::mounts, ','), '/boot') + +$disks = hiera('corosync_disks', $mount_points) +$min_disk_free = hiera('corosync_min_disk_space', '512M') +$disk_unit = hiera('corosync_disk_unit', 'M') +$monitor_interval = hiera('corosync_disk_monitor_interval', '15s') + +class { 'cluster::sysinfo': + disks => $disks, + min_disk_free => $min_disk_free, + disk_unit => $disk_unit, + monitor_interval => $monitor_interval, +} diff --git a/f2s/resources/cluster_health/meta.yaml b/f2s/resources/cluster_health/meta.yaml new file mode 100644 index 00000000..9ffaf7b6 --- /dev/null +++ b/f2s/resources/cluster_health/meta.yaml @@ -0,0 +1,24 @@ +id: cluster_health +handler: puppetv2 +version: '8.0' +inputs: + corosync_disk_monitor: + value: null + corosync_disk_monitor_interval: + value: null + corosync_disk_unit: + value: null + corosync_disks: + value: null + corosync_min_disk_space: + value: null + corosync_monitor_interval: + value: null + corosync_roles: + value: null + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/configure_default_route/actions/run.pp b/f2s/resources/configure_default_route/actions/run.pp new file mode 100644 index 00000000..775cc48e --- /dev/null +++ b/f2s/resources/configure_default_route/actions/run.pp @@ -0,0 +1,11 @@ +notice('MODULAR: configure_default_route.pp') + +$network_scheme = hiera('network_scheme') +$management_vrouter_vip = hiera('management_vrouter_vip') + +prepare_network_config($network_scheme) +$management_int = get_network_role_property('management', 'interface') +$fw_admin_int = get_network_role_property('fw-admin', 'interface') +$ifconfig = configure_default_route($network_scheme, $management_vrouter_vip, $fw_admin_int, $management_int ) + +notice ($ifconfig) diff --git a/f2s/resources/configure_default_route/meta.yaml b/f2s/resources/configure_default_route/meta.yaml new file mode 100644 index 00000000..bc69b391 --- /dev/null +++ b/f2s/resources/configure_default_route/meta.yaml @@ -0,0 +1,10 @@ +id: configure_default_route +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/connectivity_tests/actions/run.pp b/f2s/resources/connectivity_tests/actions/run.pp new file mode 100644 index 00000000..54296d8b --- /dev/null +++ b/f2s/resources/connectivity_tests/actions/run.pp @@ -0,0 +1,5 @@ +notice('MODULAR: connectivity_tests.pp') +# Pull the list of repos from hiera +$repo_setup = hiera('repo_setup') +# test that the repos are accessible +url_available($repo_setup['repos']) diff --git a/f2s/resources/connectivity_tests/meta.yaml b/f2s/resources/connectivity_tests/meta.yaml new file mode 100644 index 00000000..a1408eec --- /dev/null +++ b/f2s/resources/connectivity_tests/meta.yaml @@ -0,0 +1,12 @@ +id: connectivity_tests +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + repo_setup: + value: null + role: + value: null diff --git a/f2s/resources/conntrackd/actions/run.pp b/f2s/resources/conntrackd/actions/run.pp new file mode 100644 index 00000000..360d98ff --- /dev/null +++ b/f2s/resources/conntrackd/actions/run.pp @@ -0,0 +1,79 @@ +notice('MODULAR: conntrackd.pp') + +prepare_network_config(hiera('network_scheme', {})) +$vrouter_name = hiera('vrouter_name', 'pub') + +case $operatingsystem { + Centos: { $conntrackd_package = 'conntrack-tools' } + Ubuntu: { $conntrackd_package = 'conntrackd' } +} + + +### CONNTRACKD for CentOS 6 doesn't work under namespaces ## + +if $operatingsystem == 'Ubuntu' { + $bind_address = get_network_role_property('mgmt/vip', 'ipaddr') + $mgmt_bridge = get_network_role_property('mgmt/vip', 'interface') + + package { $conntrackd_package: + ensure => installed, + } -> + + file { '/etc/conntrackd/conntrackd.conf': + content => template('cluster/conntrackd.conf.erb'), + } -> + + cs_resource {'p_conntrackd': + ensure => present, + primitive_class => 'ocf', + provided_by => 'fuel', + primitive_type => 'ns_conntrackd', + metadata => { + 'migration-threshold' => 'INFINITY', + 'failure-timeout' => '180s' + }, + parameters => { + 'bridge' => $mgmt_bridge, + }, + complex_type => 'master', + ms_metadata => { + 'notify' => 'true', + 'ordered' => 'false', + 'interleave' => 'true', + 'clone-node-max' => '1', + 'master-max' => '1', + 'master-node-max' => '1', + 'target-role' => 'Master' + }, + operations => { + 'monitor' => { + 'interval' => '30', + 'timeout' => '60' + }, + 'monitor:Master' => { + 'role' => 'Master', + 'interval' => '27', + 'timeout' => '60' + }, + }, + } + + cs_colocation { "conntrackd-with-${vrouter_name}-vip": + primitives => [ 'master_p_conntrackd:Master', "vip__vrouter_${vrouter_name}" ], + } + + File['/etc/conntrackd/conntrackd.conf'] -> Cs_resource['p_conntrackd'] -> Service['p_conntrackd'] -> Cs_colocation["conntrackd-with-${vrouter_name}-vip"] + + service { 'p_conntrackd': + ensure => 'running', + enable => true, + provider => 'pacemaker', + } + + # Workaround to ensure log is rotated properly + file { '/etc/logrotate.d/conntrackd': + content => template('openstack/95-conntrackd.conf.erb'), + } + + Package[$conntrackd_package] -> File['/etc/logrotate.d/conntrackd'] +} diff --git a/f2s/resources/conntrackd/meta.yaml b/f2s/resources/conntrackd/meta.yaml new file mode 100644 index 00000000..45f8f638 --- /dev/null +++ b/f2s/resources/conntrackd/meta.yaml @@ -0,0 +1,14 @@ +id: conntrackd +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + network_scheme: + value: null + puppet_modules: + value: null + role: + value: null + vrouter_name: + value: null diff --git a/f2s/resources/controller_remaining_tasks/actions/run.pp b/f2s/resources/controller_remaining_tasks/actions/run.pp new file mode 100644 index 00000000..d20ddc5d --- /dev/null +++ b/f2s/resources/controller_remaining_tasks/actions/run.pp @@ -0,0 +1,49 @@ +notice('MODULAR: controller.pp') + +# Pulling hiera +$primary_controller = hiera('primary_controller') +$neutron_mellanox = hiera('neutron_mellanox', false) +$use_neutron = hiera('use_neutron', false) + +# Do the stuff +if $neutron_mellanox { + $mellanox_mode = $neutron_mellanox['plugin'] +} else { + $mellanox_mode = 'disabled' +} + +if $primary_controller { + if ($mellanox_mode == 'ethernet') { + $test_vm_pkg = 'cirros-testvm-mellanox' + } else { + $test_vm_pkg = 'cirros-testvm' + } + package { 'cirros-testvm' : + ensure => 'installed', + name => $test_vm_pkg, + } +} + +Exec { logoutput => true } + +if ($::mellanox_mode == 'ethernet') { + $ml2_eswitch = $neutron_mellanox['ml2_eswitch'] + class { 'mellanox_openstack::controller': + eswitch_vnic_type => $ml2_eswitch['vnic_type'], + eswitch_apply_profile_patch => $ml2_eswitch['apply_profile_patch'], + } +} + +# NOTE(bogdando) for nodes with pacemaker, we should use OCF instead of monit + +# BP https://blueprints.launchpad.net/mos/+spec/include-openstackclient +package { 'python-openstackclient' : + ensure => installed, +} + +# Reduce swapiness on controllers, see LP#1413702 +sysctl::value { 'vm.swappiness': + value => '10' +} + +# vim: set ts=2 sw=2 et : diff --git a/f2s/resources/controller_remaining_tasks/meta.yaml b/f2s/resources/controller_remaining_tasks/meta.yaml new file mode 100644 index 00000000..e5d4d401 --- /dev/null +++ b/f2s/resources/controller_remaining_tasks/meta.yaml @@ -0,0 +1,16 @@ +id: controller_remaining_tasks +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + neutron_mellanox: + value: null + primary_controller: + value: null + puppet_modules: + value: null + role: + value: null + use_neutron: + value: null diff --git a/f2s/resources/database/actions/run.pp b/f2s/resources/database/actions/run.pp new file mode 100644 index 00000000..b056e3fb --- /dev/null +++ b/f2s/resources/database/actions/run.pp @@ -0,0 +1,132 @@ +notice('MODULAR: database.pp') + +prepare_network_config(hiera('network_scheme', {})) +$use_syslog = hiera('use_syslog', true) +$primary_controller = hiera('primary_controller') +$mysql_hash = hiera_hash('mysql', {}) +$management_vip = hiera('management_vip') +$database_vip = hiera('database_vip', $management_vip) + +$network_scheme = hiera('network_scheme', {}) +$mgmt_iface = get_network_role_property('mgmt/database', 'interface') +$direct_networks = split(direct_networks($network_scheme['endpoints'], $mgmt_iface, 'netmask'), ' ') +$access_networks = flatten(['localhost', '127.0.0.1', '240.0.0.0/255.255.0.0', $direct_networks]) + +$haproxy_stats_port = '10000' +$haproxy_stats_url = "http://${database_vip}:${haproxy_stats_port}/;csv" + +$mysql_database_password = $mysql_hash['root_password'] +$enabled = pick($mysql_hash['enabled'], true) + +$galera_node_address = get_network_role_property('mgmt/database', 'ipaddr') +$galera_nodes = values(get_node_to_ipaddr_map_by_network_role(hiera_hash('database_nodes'), 'mgmt/database')) +$galera_primary_controller = hiera('primary_database', $primary_controller) +$mysql_bind_address = '0.0.0.0' +$galera_cluster_name = 'openstack' + +$mysql_skip_name_resolve = true +$custom_setup_class = hiera('mysql_custom_setup_class', 'galera') + +# Get galera gcache factor based on cluster node's count +$galera_gcache_factor = count(unique(filter_hash(hiera('nodes', []), 'uid'))) + +$status_user = 'clustercheck' +$status_password = $mysql_hash['wsrep_password'] +$backend_port = '3307' +$backend_timeout = '10' + +############################################################################# +validate_string($status_password) +validate_string($mysql_database_password) +validate_string($status_password) + +if $enabled { + + if $custom_setup_class { + file { '/etc/mysql/my.cnf': + ensure => absent, + require => Class['mysql::server'] + } + $config_hash_real = { + 'config_file' => '/etc/my.cnf' + } + } else { + $config_hash_real = { } + } + + if '/var/lib/mysql' in split($::mounts, ',') { + $ignore_db_dirs = ['lost+found'] + } else { + $ignore_db_dirs = [] + } + + class { 'mysql::server': + bind_address => '0.0.0.0', + etc_root_password => true, + root_password => $mysql_database_password, + old_root_password => '', + galera_cluster_name => $galera_cluster_name, + primary_controller => $galera_primary_controller, + galera_node_address => $galera_node_address, + galera_nodes => $galera_nodes, + galera_gcache_factor => $galera_gcache_factor, + enabled => $enabled, + custom_setup_class => $custom_setup_class, + mysql_skip_name_resolve => $mysql_skip_name_resolve, + use_syslog => $use_syslog, + config_hash => $config_hash_real, + ignore_db_dirs => $ignore_db_dirs, + } + + class { 'osnailyfacter::mysql_user': + password => $mysql_database_password, + access_networks => $access_networks, + } + + exec { 'initial_access_config': + command => '/bin/ln -sf /etc/mysql/conf.d/password.cnf /root/.my.cnf', + } + + if ($custom_mysql_setup_class == 'percona_packages' and $::osfamily == 'RedHat') { + # This is a work around to prevent the conflict between the + # MySQL-shared-wsrep package (included as a dependency for MySQL-python) and + # the Percona shared package Percona-XtraDB-Cluster-shared-56. They both + # provide the libmysql client libraries. Since we are requiring the + # installation of the Percona package here before mysql::python, the python + # client is happy and the server installation won't fail due to the + # installation of our shared package + package { 'Percona-XtraDB-Cluster-shared-56': + ensure => 'present', + before => Class['mysql::python'], + } + } + + $management_networks = get_routable_networks_for_network_role($network_scheme, 'mgmt/database', ' ') + + class { 'openstack::galera::status': + status_user => $status_user, + status_password => $status_password, + status_allow => $galera_node_address, + backend_host => $galera_node_address, + backend_port => $backend_port, + backend_timeout => $backend_timeout, + only_from => "127.0.0.1 240.0.0.2 ${management_networks}", + } + + haproxy_backend_status { 'mysql': + name => 'mysqld', + url => $haproxy_stats_url, + } + + class { 'osnailyfacter::mysql_access': + db_password => $mysql_database_password, + } + + Class['mysql::server'] -> + Class['osnailyfacter::mysql_user'] -> + Exec['initial_access_config'] -> + Class['openstack::galera::status'] -> + Haproxy_backend_status['mysql'] -> + Class['osnailyfacter::mysql_access'] + +} diff --git a/f2s/resources/database/meta.yaml b/f2s/resources/database/meta.yaml new file mode 100644 index 00000000..4da75f77 --- /dev/null +++ b/f2s/resources/database/meta.yaml @@ -0,0 +1,30 @@ +id: database +handler: puppetv2 +version: '8.0' +inputs: + database_nodes: + value: null + database_vip: + value: null + fqdn: + value: null + management_vip: + value: null + mysql: + value: null + mysql_custom_setup_class: + value: null + network_scheme: + value: null + nodes: + value: null + primary_controller: + value: null + primary_database: + value: null + puppet_modules: + value: null + role: + value: null + use_syslog: + value: null diff --git a/f2s/resources/disable_keystone_service_token/actions/run.pp b/f2s/resources/disable_keystone_service_token/actions/run.pp new file mode 100644 index 00000000..2708a261 --- /dev/null +++ b/f2s/resources/disable_keystone_service_token/actions/run.pp @@ -0,0 +1,41 @@ +notice('MODULAR: service_token_off.pp') + +#################################################################### +# Used as singular by post-deployment action to disable admin_token +# + +$keystone_params = hiera_hash('keystone_hash', {}) + +if $keystone_params['service_token_off'] { + + include ::keystone::params + include ::tweaks::apache_wrappers + + keystone_config { + 'DEFAULT/admin_token': ensure => absent; + } + + # Get paste.ini source + $keystone_paste_ini = $::keystone::params::paste_config ? { + undef => '/etc/keystone/keystone-paste.ini', + default => $::keystone::params::paste_config, + } + + # Remove admin_token_auth middleware from public/admin/v3 pipelines + exec { 'remove_admin_token_auth_middleware': + path => ['/bin', '/usr/bin'], + command => "sed -i.dist 's/ admin_token_auth//' $keystone_paste_ini", + onlyif => "fgrep -q ' admin_token_auth' $keystone_paste_ini", + } + + service { 'httpd': + ensure => 'running', + name => $::tweaks::apache_wrappers::service_name, + enable => true, + } + + # Restart service that changes to take effect + Keystone_config<||> ~> Service['httpd'] + Exec['remove_admin_token_auth_middleware'] ~> Service['httpd'] + +} diff --git a/f2s/resources/disable_keystone_service_token/meta.yaml b/f2s/resources/disable_keystone_service_token/meta.yaml new file mode 100644 index 00000000..19882fda --- /dev/null +++ b/f2s/resources/disable_keystone_service_token/meta.yaml @@ -0,0 +1,12 @@ +id: disable_keystone_service_token +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + keystone_hash: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/dns-client/actions/run.pp b/f2s/resources/dns-client/actions/run.pp new file mode 100644 index 00000000..f7207b37 --- /dev/null +++ b/f2s/resources/dns-client/actions/run.pp @@ -0,0 +1,8 @@ +notice('MODULAR: dns-client.pp') + +$management_vip = hiera('management_vrouter_vip') + +class { 'osnailyfacter::resolvconf': + management_vip => $management_vip, +} + diff --git a/f2s/resources/dns-client/meta.yaml b/f2s/resources/dns-client/meta.yaml new file mode 100644 index 00000000..a53f2682 --- /dev/null +++ b/f2s/resources/dns-client/meta.yaml @@ -0,0 +1,12 @@ +id: dns-client +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + management_vrouter_vip: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/dns-server/actions/run.pp b/f2s/resources/dns-server/actions/run.pp new file mode 100644 index 00000000..54f0ad75 --- /dev/null +++ b/f2s/resources/dns-server/actions/run.pp @@ -0,0 +1,16 @@ +notice('MODULAR: dns-server.pp') + +$dns_servers = hiera('external_dns') +$primary_controller = hiera('primary_controller') +$master_ip = hiera('master_ip') +$management_vrouter_vip = hiera('management_vrouter_vip') + +class { 'osnailyfacter::dnsmasq': + external_dns => strip(split($dns_servers['dns_list'], ',')), + master_ip => $master_ip, + management_vrouter_vip => $management_vrouter_vip, +} -> + +class { 'cluster::dns_ocf': + primary_controller => $primary_controller, +} diff --git a/f2s/resources/dns-server/meta.yaml b/f2s/resources/dns-server/meta.yaml new file mode 100644 index 00000000..7faa8ea2 --- /dev/null +++ b/f2s/resources/dns-server/meta.yaml @@ -0,0 +1,18 @@ +id: dns-server +handler: puppetv2 +version: '8.0' +inputs: + external_dns: + value: null + fqdn: + value: null + management_vrouter_vip: + value: null + master_ip: + value: null + primary_controller: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/dump_rabbitmq_definitions/actions/run.pp b/f2s/resources/dump_rabbitmq_definitions/actions/run.pp new file mode 100644 index 00000000..063d90e1 --- /dev/null +++ b/f2s/resources/dump_rabbitmq_definitions/actions/run.pp @@ -0,0 +1,28 @@ +notice('MODULAR: dump_rabbitmq_definitions.pp') + +$definitions_dump_file = '/etc/rabbitmq/definitions' +$rabbit_hash = hiera_hash('rabbit_hash', + { + 'user' => false, + 'password' => false, + } + ) +$rabbit_enabled = pick($rabbit_hash['enabled'], true) + + +if ($rabbit_enabled) { + $rabbit_api_endpoint = 'http://localhost:15672/api/definitions' + $rabbit_credentials = "${rabbit_hash['user']}:${rabbit_hash['password']}" + + exec { 'rabbitmq-dump-definitions': + path => ['/usr/bin', '/usr/sbin', '/sbin', '/bin'], + command => "curl -u ${rabbit_credentials} ${rabbit_api_endpoint} -o ${definitions_dump_file}", + } + + file { $definitions_dump_file: + ensure => file, + owner => 'root', + group => 'root', + mode => '0600', + } +} diff --git a/f2s/resources/dump_rabbitmq_definitions/meta.yaml b/f2s/resources/dump_rabbitmq_definitions/meta.yaml new file mode 100644 index 00000000..44e9109c --- /dev/null +++ b/f2s/resources/dump_rabbitmq_definitions/meta.yaml @@ -0,0 +1,12 @@ +id: dump_rabbitmq_definitions +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + rabbit_hash: + value: null + role: + value: null diff --git a/f2s/resources/enable_cinder_volume_service/actions/run.pp b/f2s/resources/enable_cinder_volume_service/actions/run.pp new file mode 100644 index 00000000..3dbe8e63 --- /dev/null +++ b/f2s/resources/enable_cinder_volume_service/actions/run.pp @@ -0,0 +1,10 @@ +include cinder::params + +$volume_service = $::cinder::params::volume_service + +service { $volume_service: + ensure => running, + enable => true, + hasstatus => true, + hasrestart => true, +} diff --git a/f2s/resources/enable_cinder_volume_service/meta.yaml b/f2s/resources/enable_cinder_volume_service/meta.yaml new file mode 100644 index 00000000..404d0853 --- /dev/null +++ b/f2s/resources/enable_cinder_volume_service/meta.yaml @@ -0,0 +1,10 @@ +id: enable_cinder_volume_service +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/enable_nova_compute_service/actions/run.pp b/f2s/resources/enable_nova_compute_service/actions/run.pp new file mode 100644 index 00000000..8738f70e --- /dev/null +++ b/f2s/resources/enable_nova_compute_service/actions/run.pp @@ -0,0 +1,10 @@ +include nova::params + +$compute_service_name = $::nova::params::compute_service_name + +service { $compute_service_name: + ensure => running, + enable => true, + hasstatus => true, + hasrestart => true, +} diff --git a/f2s/resources/enable_nova_compute_service/meta.yaml b/f2s/resources/enable_nova_compute_service/meta.yaml new file mode 100644 index 00000000..dd1bd484 --- /dev/null +++ b/f2s/resources/enable_nova_compute_service/meta.yaml @@ -0,0 +1,10 @@ +id: enable_nova_compute_service +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/enable_rados/actions/run.pp b/f2s/resources/enable_rados/actions/run.pp new file mode 100644 index 00000000..e9ebbab5 --- /dev/null +++ b/f2s/resources/enable_rados/actions/run.pp @@ -0,0 +1,17 @@ +include ::ceph::params + +$radosgw_service = $::ceph::params::service_radosgw + +# ensure the service is running and will start on boot +service { $radosgw_service: + ensure => running, + enable => true, +} + +# The Ubuntu upstart script is incompatible with the upstart provider +# This will force the service to fall back to the debian init script +if ($::operatingsystem == 'Ubuntu') { + Service['radosgw'] { + provider => 'debian' + } +} diff --git a/f2s/resources/enable_rados/meta.yaml b/f2s/resources/enable_rados/meta.yaml new file mode 100644 index 00000000..b2706e00 --- /dev/null +++ b/f2s/resources/enable_rados/meta.yaml @@ -0,0 +1,10 @@ +id: enable_rados +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/firewall/actions/run.pp b/f2s/resources/firewall/actions/run.pp new file mode 100644 index 00000000..e22fe133 --- /dev/null +++ b/f2s/resources/firewall/actions/run.pp @@ -0,0 +1,132 @@ +notice('MODULAR: firewall.pp') + +$network_scheme = hiera_hash('network_scheme') +$ironic_hash = hiera_hash('ironic', {}) + +# Workaround for fuel bug with firewall +firewall {'003 remote rabbitmq ': + sport => [ 4369, 5672, 41055, 55672, 61613 ], + source => hiera('master_ip'), + proto => 'tcp', + action => 'accept', + require => Class['openstack::firewall'], +} + +firewall {'004 remote puppet ': + sport => [ 8140 ], + source => hiera('master_ip'), + proto => 'tcp', + action => 'accept', + require => Class['openstack::firewall'], +} + +# allow local rabbitmq admin traffic for LP#1383258 +firewall {'005 local rabbitmq admin': + sport => [ 15672 ], + iniface => 'lo', + proto => 'tcp', + action => 'accept', + require => Class['openstack::firewall'], +} + +# reject all non-local rabbitmq admin traffic for LP#1450443 +firewall {'006 reject non-local rabbitmq admin': + sport => [ 15672 ], + proto => 'tcp', + action => 'drop', + require => Class['openstack::firewall'], +} + +# allow connections from haproxy namespace +firewall {'030 allow connections from haproxy namespace': + source => '240.0.0.2', + action => 'accept', + require => Class['openstack::firewall'], +} + +prepare_network_config(hiera_hash('network_scheme')) +class { 'openstack::firewall' : + nova_vnc_ip_range => get_routable_networks_for_network_role($network_scheme, 'nova/api'), + nova_api_ip_range => get_network_role_property('nova/api', 'network'), + libvirt_network => get_network_role_property('management', 'network'), + keystone_network => get_network_role_property('keystone/api', 'network'), +} + +if $ironic_hash['enabled'] { + $nodes_hash = hiera('nodes', {}) + $roles = node_roles($nodes_hash, hiera('uid')) + $network_metadata = hiera_hash('network_metadata', {}) + $baremetal_int = get_network_role_property('ironic/baremetal', 'interface') + $baremetal_vip = $network_metadata['vips']['baremetal']['ipaddr'] + $baremetal_ipaddr = get_network_role_property('ironic/baremetal', 'ipaddr') + $baremetal_network = get_network_role_property('ironic/baremetal', 'network') + + firewallchain { 'baremetal:filter:IPv4': + ensure => present, + } -> + firewall { '999 drop all baremetal': + chain => 'baremetal', + action => 'drop', + proto => 'all', + } -> + firewall {'00 baremetal-filter': + proto => 'all', + iniface => $baremetal_int, + jump => 'baremetal', + require => Class['openstack::firewall'], + } + + if member($roles, 'controller') or member($roles, 'primary-controller') { + firewall { '100 allow baremetal ping from VIP': + chain => 'baremetal', + source => $baremetal_vip, + destination => $baremetal_ipaddr, + proto => 'icmp', + icmp => 'echo-request', + action => 'accept', + } + firewall { '207 ironic-api' : + dport => '6385', + proto => 'tcp', + action => 'accept', + } + } + + if member($roles, 'ironic') { + firewall { '101 allow baremetal-related': + chain => 'baremetal', + source => $baremetal_network, + destination => $baremetal_ipaddr, + proto => 'all', + state => ['RELATED', 'ESTABLISHED'], + action => 'accept', + } + + firewall { '102 allow baremetal-rsyslog': + chain => 'baremetal', + source => $baremetal_network, + destination => $baremetal_ipaddr, + proto => 'udp', + dport => '514', + action => 'accept', + } + + firewall { '103 allow baremetal-TFTP': + chain => 'baremetal', + source => $baremetal_network, + destination => $baremetal_ipaddr, + proto => 'udp', + dport => '69', + action => 'accept', + } + + k_mod {'nf_conntrack_tftp': + ensure => 'present' + } + + file_line {'nf_conntrack_tftp_on_boot': + path => '/etc/modules', + line => 'nf_conntrack_tftp', + } + } +} diff --git a/f2s/resources/firewall/meta.yaml b/f2s/resources/firewall/meta.yaml new file mode 100644 index 00000000..927da034 --- /dev/null +++ b/f2s/resources/firewall/meta.yaml @@ -0,0 +1,16 @@ +id: firewall +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + ironic: + value: null + master_ip: + value: null + network_scheme: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/fuel_pkgs/actions/run.pp b/f2s/resources/fuel_pkgs/actions/run.pp new file mode 100644 index 00000000..d425f78a --- /dev/null +++ b/f2s/resources/fuel_pkgs/actions/run.pp @@ -0,0 +1,10 @@ +notice('MODULAR: fuel_pkgs.pp') + +$fuel_packages = [ + 'fuel-ha-utils', + 'fuel-misc', +] + +package { $fuel_packages : + ensure => 'latest', +} diff --git a/f2s/resources/fuel_pkgs/meta.yaml b/f2s/resources/fuel_pkgs/meta.yaml new file mode 100644 index 00000000..364f7324 --- /dev/null +++ b/f2s/resources/fuel_pkgs/meta.yaml @@ -0,0 +1,10 @@ +id: fuel_pkgs +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/generate_vms/actions/run.pp b/f2s/resources/generate_vms/actions/run.pp new file mode 100644 index 00000000..29a8201e --- /dev/null +++ b/f2s/resources/generate_vms/actions/run.pp @@ -0,0 +1,49 @@ +notice('MODULAR: generate_vms.pp') + +$libvirt_dir = '/etc/libvirt/qemu' +$template_dir = '/var/lib/nova' +$packages = ['qemu-utils', 'qemu-kvm', 'libvirt-bin', 'xmlstarlet'] +$libvirt_service_name = 'libvirtd' + +$vms = hiera_array('vms_conf') + +define vm_config { + $details = $name + $id = $details['id'] + + file { "${template_dir}/template_${id}_vm.xml": + owner => 'root', + group => 'root', + content => template('osnailyfacter/vm_libvirt.erb'), + } +} + +package { $packages: + ensure => 'installed', +} + +service { $libvirt_service_name: + ensure => 'running', + require => Package[$packages], + before => Exec['generate_vms'], +} + +file { "${libvirt_dir}/autostart": + ensure => 'directory', + require => Package[$packages], +} + +file { "${template_dir}": + ensure => 'directory', +} + +vm_config { $vms: + before => Exec['generate_vms'], + require => File["${template_dir}"], +} + +exec { 'generate_vms': + command => "/usr/bin/generate_vms.sh ${libvirt_dir} ${template_dir}", + path => ['/usr/sbin', '/usr/bin' , '/sbin', '/bin'], + require => [File["${template_dir}"], File["${libvirt_dir}/autostart"]], +} diff --git a/f2s/resources/generate_vms/meta.yaml b/f2s/resources/generate_vms/meta.yaml new file mode 100644 index 00000000..dc6c9553 --- /dev/null +++ b/f2s/resources/generate_vms/meta.yaml @@ -0,0 +1,10 @@ +id: generate_vms +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/glance-db/actions/run.pp b/f2s/resources/glance-db/actions/run.pp new file mode 100644 index 00000000..bdec1277 --- /dev/null +++ b/f2s/resources/glance-db/actions/run.pp @@ -0,0 +1,53 @@ +notice('MODULAR: glance/db.pp') + +$glance_hash = hiera_hash('glance', {}) +$mysql_hash = hiera_hash('mysql', {}) +$management_vip = hiera('management_vip') +$database_vip = hiera('database_vip') + +$mysql_root_user = pick($mysql_hash['root_user'], 'root') +$mysql_db_create = pick($mysql_hash['db_create'], true) +$mysql_root_password = $mysql_hash['root_password'] + +$db_user = pick($glance_hash['db_user'], 'glance') +$db_name = pick($glance_hash['db_name'], 'glance') +$db_password = pick($glance_hash['db_password'], $mysql_root_password) + +$db_host = pick($glance_hash['db_host'], $database_vip) +$db_create = pick($glance_hash['db_create'], $mysql_db_create) +$db_root_user = pick($glance_hash['root_user'], $mysql_root_user) +$db_root_password = pick($glance_hash['root_password'], $mysql_root_password) + +$allowed_hosts = [ hiera('node_name'), 'localhost', '127.0.0.1', '%' ] + +validate_string($mysql_root_user) +validate_string($database_vip) + + +if $db_create { + class { 'galera::client': + custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), + } + + class { 'glance::db::mysql': + user => $db_user, + password => $db_password, + dbname => $db_name, + allowed_hosts => $allowed_hosts, + } + + class { 'osnailyfacter::mysql_access': + db_host => $db_host, + db_user => $db_root_user, + db_password => $db_root_password, + } + + Class['galera::client'] -> + Class['osnailyfacter::mysql_access'] -> + Class['glance::db::mysql'] +} + +class mysql::config {} +include mysql::config +class mysql::server {} +include mysql::server diff --git a/f2s/resources/glance-db/meta.yaml b/f2s/resources/glance-db/meta.yaml new file mode 100644 index 00000000..47107d52 --- /dev/null +++ b/f2s/resources/glance-db/meta.yaml @@ -0,0 +1,22 @@ +id: glance-db +handler: puppetv2 +version: '8.0' +inputs: + database_vip: + value: null + fqdn: + value: null + glance: + value: null + management_vip: + value: null + mysql: + value: null + mysql_custom_setup_class: + value: null + node_name: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/glance-keystone/actions/run.pp b/f2s/resources/glance-keystone/actions/run.pp new file mode 100644 index 00000000..6a06be29 --- /dev/null +++ b/f2s/resources/glance-keystone/actions/run.pp @@ -0,0 +1,42 @@ +notice('MODULAR: glance/keystone.pp') + +$glance_hash = hiera_hash('glance', {}) +$public_vip = hiera('public_vip') +$public_ssl_hash = hiera('public_ssl') +$admin_address = hiera('management_vip') +$region = pick($glance_hash['region'], hiera('region', 'RegionOne')) +$password = $glance_hash['user_password'] +$auth_name = pick($glance_hash['auth_name'], 'glance') +$configure_endpoint = pick($glance_hash['configure_endpoint'], true) +$configure_user = pick($glance_hash['configure_user'], true) +$configure_user_role = pick($glance_hash['configure_user_role'], true) +$service_name = pick($glance_hash['service_name'], 'glance') +$tenant = pick($glance_hash['tenant'], 'services') + +$public_address = $public_ssl_hash['services'] ? { + true => $public_ssl_hash['hostname'], + default => $public_vip, +} +$public_protocol = $public_ssl_hash['services'] ? { + true => 'https', + default => 'http', +} + +$public_url = "${public_protocol}://${public_address}:9292" +$admin_url = "http://${admin_address}:9292" + +validate_string($public_address) +validate_string($password) + +class { '::glance::keystone::auth': + password => $password, + auth_name => $auth_name, + configure_endpoint => $configure_endpoint, + configure_user => $configure_user, + configure_user_role => $configure_user_role, + service_name => $service_name, + public_url => $public_url, + admin_url => $admin_url, + internal_url => $admin_url, + region => $region, +} diff --git a/f2s/resources/glance-keystone/meta.yaml b/f2s/resources/glance-keystone/meta.yaml new file mode 100644 index 00000000..59a923d2 --- /dev/null +++ b/f2s/resources/glance-keystone/meta.yaml @@ -0,0 +1,20 @@ +id: glance-keystone +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + glance: + value: null + management_vip: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null diff --git a/f2s/resources/glance/actions/run.pp b/f2s/resources/glance/actions/run.pp new file mode 100644 index 00000000..66d8de90 --- /dev/null +++ b/f2s/resources/glance/actions/run.pp @@ -0,0 +1,128 @@ +notice('MODULAR: glance.pp') + +$network_scheme = hiera_hash('network_scheme', {}) +$network_metadata = hiera_hash('network_metadata', {}) +prepare_network_config($network_scheme) + +$glance_hash = hiera_hash('glance', {}) +$verbose = pick($glance_hash['verbose'], hiera('verbose', true)) +$debug = pick($glance_hash['debug'], hiera('debug', false)) +$management_vip = hiera('management_vip') +$database_vip = hiera('database_vip') +$service_endpoint = hiera('service_endpoint') +$storage_hash = hiera('storage') +$use_syslog = hiera('use_syslog', true) +$use_stderr = hiera('use_stderr', false) +$syslog_log_facility = hiera('syslog_log_facility_glance') +$rabbit_hash = hiera_hash('rabbit_hash', {}) +$max_pool_size = hiera('max_pool_size') +$max_overflow = hiera('max_overflow') +$ceilometer_hash = hiera_hash('ceilometer', {}) +$region = hiera('region','RegionOne') +$glance_endpoint = $management_vip +$service_workers = pick($glance_hash['glance_workers'], min(max($::processorcount, 2), 16)) + +$db_type = 'mysql' +$db_host = pick($glance_hash['db_host'], $database_vip) +$api_bind_address = get_network_role_property('glance/api', 'ipaddr') +$enabled = true +$max_retries = '-1' +$idle_timeout = '3600' +$auth_uri = "http://${service_endpoint}:5000/" + +$rabbit_password = $rabbit_hash['password'] +$rabbit_user = $rabbit_hash['user'] +$rabbit_hosts = split(hiera('amqp_hosts',''), ',') +$rabbit_virtual_host = '/' + +$glance_db_user = pick($glance_hash['db_user'], 'glance') +$glance_db_dbname = pick($glance_hash['db_name'], 'glance') +$glance_db_password = $glance_hash['db_password'] +$glance_user = pick($glance_hash['user'],'glance') +$glance_user_password = $glance_hash['user_password'] +$glance_tenant = pick($glance_hash['tenant'],'services') +$glance_vcenter_host = $glance_hash['vc_host'] +$glance_vcenter_user = $glance_hash['vc_user'] +$glance_vcenter_password = $glance_hash['vc_password'] +$glance_vcenter_datacenter = $glance_hash['vc_datacenter'] +$glance_vcenter_datastore = $glance_hash['vc_datastore'] +$glance_vcenter_image_dir = $glance_hash['vc_image_dir'] +$glance_vcenter_api_retry_count = '20' +$glance_image_cache_max_size = $glance_hash['image_cache_max_size'] +$glance_pipeline = pick($glance_hash['pipeline'], 'keystone') +$glance_large_object_size = pick($glance_hash['large_object_size'], '5120') + +$rados_connect_timeout = '30' + +if ($storage_hash['images_ceph']) { + $glance_backend = 'ceph' + $glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ] + $glance_show_image_direct_url = pick($glance_hash['show_image_direct_url'], true) +} elsif ($storage_hash['images_vcenter']) { + $glance_backend = 'vmware' + $glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ] + $glance_show_image_direct_url = pick($glance_hash['show_image_direct_url'], true) +} else { + $glance_backend = 'swift' + $glance_known_stores = [ 'glance.store.swift.Store', 'glance.store.http.Store' ] + $swift_store_large_object_size = $glance_large_object_size + $glance_show_image_direct_url = pick($glance_hash['show_image_direct_url'], false) +} + +############################################################################### + +class { 'openstack::glance': + verbose => $verbose, + debug => $debug, + db_type => $db_type, + db_host => $db_host, + glance_db_user => $glance_db_user, + glance_db_dbname => $glance_db_dbname, + glance_db_password => $glance_db_password, + glance_user => $glance_user, + glance_user_password => $glance_user_password, + glance_tenant => $glance_tenant, + glance_vcenter_host => $glance_vcenter_host, + glance_vcenter_user => $glance_vcenter_user, + glance_vcenter_password => $glance_vcenter_password, + glance_vcenter_datacenter => $glance_vcenter_datacenter, + glance_vcenter_datastore => $glance_vcenter_datastore, + glance_vcenter_image_dir => $glance_vcenter_image_dir, + glance_vcenter_api_retry_count => $glance_vcenter_api_retry_count, + auth_uri => $auth_uri, + keystone_host => $service_endpoint, + region => $region, + bind_host => $api_bind_address, + enabled => $enabled, + glance_backend => $glance_backend, + registry_host => $glance_endpoint, + use_syslog => $use_syslog, + use_stderr => $use_stderr, + show_image_direct_url => $glance_show_image_direct_url, + swift_store_large_object_size => $swift_store_large_object_size, + pipeline => $glance_pipeline, + syslog_log_facility => $syslog_log_facility, + glance_image_cache_max_size => $glance_image_cache_max_size, + max_retries => $max_retries, + max_pool_size => $max_pool_size, + max_overflow => $max_overflow, + idle_timeout => $idle_timeout, + rabbit_password => $rabbit_password, + rabbit_userid => $rabbit_user, + rabbit_hosts => $rabbit_hosts, + rabbit_virtual_host => $rabbit_virtual_host, + known_stores => $glance_known_stores, + ceilometer => $ceilometer_hash[enabled], + service_workers => $service_workers, + rados_connect_timeout => $rados_connect_timeout, +} + +####### Disable upstart startup on install ####### +if($::operatingsystem == 'Ubuntu') { + tweaks::ubuntu_service_override { 'glance-api': + package_name => 'glance-api', + } + tweaks::ubuntu_service_override { 'glance-registry': + package_name => 'glance-registry', + } +} diff --git a/f2s/resources/glance/meta.yaml b/f2s/resources/glance/meta.yaml new file mode 100644 index 00000000..e65d4f01 --- /dev/null +++ b/f2s/resources/glance/meta.yaml @@ -0,0 +1,46 @@ +id: glance +handler: puppetv2 +version: '8.0' +inputs: + amqp_hosts: + value: null + ceilometer: + value: null + database_vip: + value: null + debug: + value: null + fqdn: + value: null + glance: + value: null + management_vip: + value: null + max_overflow: + value: null + max_pool_size: + value: null + network_metadata: + value: null + network_scheme: + value: null + puppet_modules: + value: null + rabbit_hash: + value: null + region: + value: null + role: + value: null + service_endpoint: + value: null + storage: + value: null + syslog_log_facility_glance: + value: null + use_stderr: + value: null + use_syslog: + value: null + verbose: + value: null diff --git a/f2s/resources/globals/actions/run.pp b/f2s/resources/globals/actions/run.pp new file mode 100644 index 00000000..e8d712fd --- /dev/null +++ b/f2s/resources/globals/actions/run.pp @@ -0,0 +1,293 @@ +notice('MODULAR: globals.pp') + +$service_token_off = false +$globals_yaml_file = '/etc/hiera/globals.yaml' + +# remove cached globals values before anything else +remove_file($globals_yaml_file) + +$network_scheme = hiera_hash('network_scheme', {}) +if empty($network_scheme) { + fail("Network_scheme not given in the astute.yaml") +} +$network_metadata = hiera_hash('network_metadata', {}) +if empty($network_metadata) { + fail("Network_metadata not given in the astute.yaml") +} + +$node_name = regsubst(hiera('fqdn', $::hostname), '\..*$', '') +$node = $network_metadata['nodes'][$node_name] +if empty($node) { + fail("Node hostname is not defined in the astute.yaml") +} + +prepare_network_config($network_scheme) + +# DEPRICATED +$nodes_hash = hiera('nodes', {}) + +$deployment_mode = hiera('deployment_mode', 'ha_compact') +$roles = $node['node_roles'] +$storage_hash = hiera('storage', {}) +$syslog_hash = hiera('syslog', {}) +$base_syslog_hash = hiera('base_syslog', {}) +$sahara_hash = hiera('sahara', {}) +$murano_hash = hiera('murano', {}) +$heat_hash = hiera_hash('heat', {}) +$vcenter_hash = hiera('vcenter', {}) +$nova_hash = hiera_hash('nova', {}) +$mysql_hash = hiera('mysql', {}) +$rabbit_hash = hiera_hash('rabbit', {}) +$glance_hash = hiera_hash('glance', {}) +$swift_hash = hiera('swift', {}) +$cinder_hash = hiera_hash('cinder', {}) +$ceilometer_hash = hiera('ceilometer',{}) +$access_hash = hiera_hash('access', {}) +$mp_hash = hiera('mp', {}) +$keystone_hash = merge({'service_token_off' => $service_token_off}, + hiera_hash('keystone', {})) + +$node_role = hiera('role') +$dns_nameservers = hiera('dns_nameservers', []) +$use_ceilometer = $ceilometer_hash['enabled'] +$use_neutron = hiera('quantum', false) +$use_ovs = hiera('use_ovs', $use_neutron) +$verbose = true +$debug = hiera('debug', false) +$use_monit = false +$master_ip = hiera('master_ip') +$use_syslog = hiera('use_syslog', true) +$syslog_log_facility_glance = hiera('syslog_log_facility_glance', 'LOG_LOCAL2') +$syslog_log_facility_cinder = hiera('syslog_log_facility_cinder', 'LOG_LOCAL3') +$syslog_log_facility_neutron = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4') +$syslog_log_facility_nova = hiera('syslog_log_facility_nova','LOG_LOCAL6') +$syslog_log_facility_keystone = hiera('syslog_log_facility_keystone', 'LOG_LOCAL7') +$syslog_log_facility_murano = hiera('syslog_log_facility_murano', 'LOG_LOCAL0') +$syslog_log_facility_heat = hiera('syslog_log_facility_heat','LOG_LOCAL0') +$syslog_log_facility_sahara = hiera('syslog_log_facility_sahara','LOG_LOCAL0') +$syslog_log_facility_ceilometer = hiera('syslog_log_facility_ceilometer','LOG_LOCAL0') +$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') + +$nova_report_interval = hiera('nova_report_interval', 60) +$nova_service_down_time = hiera('nova_service_down_time', 180) +$apache_ports = hiera_array('apache_ports', ['80', '8888', '5000', '35357']) + +$openstack_version = hiera('openstack_version', + { + 'keystone' => 'installed', + 'glance' => 'installed', + 'horizon' => 'installed', + 'nova' => 'installed', + 'novncproxy' => 'installed', + 'cinder' => 'installed', + } +) + +$nova_rate_limits = hiera('nova_rate_limits', + { + 'POST' => 100000, + 'POST_SERVERS' => 100000, + 'PUT' => 1000, + 'GET' => 100000, + 'DELETE' => 100000 + } +) + +$cinder_rate_limits = hiera('cinder_rate_limits', + { + 'POST' => 100000, + 'POST_SERVERS' => 100000, + 'PUT' => 100000, + 'GET' => 100000, + 'DELETE' => 100000 + } +) + +$default_gateway = get_default_gateways() +$public_vip = $network_metadata['vips']['public']['ipaddr'] +$management_vip = $network_metadata['vips']['management']['ipaddr'] +$public_vrouter_vip = $network_metadata['vips']['vrouter_pub']['ipaddr'] +$management_vrouter_vip = $network_metadata['vips']['vrouter']['ipaddr'] + +$database_vip = is_hash($network_metadata['vips']['database']) ? { + true => pick($network_metadata['vips']['database']['ipaddr'], $management_vip), + default => $management_vip +} +$service_endpoint = is_hash($network_metadata['vips']['service_endpoint']) ? { + true => pick($network_metadata['vips']['service_endpoint']['ipaddr'], $management_vip), + default => $management_vip +} + +if $use_neutron { + $novanetwork_params = {} + $neutron_config = hiera_hash('quantum_settings') + $network_provider = 'neutron' + $neutron_db_password = $neutron_config['database']['passwd'] + $neutron_user_password = $neutron_config['keystone']['admin_password'] + $neutron_metadata_proxy_secret = $neutron_config['metadata']['metadata_proxy_shared_secret'] + $base_mac = $neutron_config['L2']['base_mac'] + $management_network_range = get_network_role_property('mgmt/vip', 'network') +} else { + $neutron_config = {} + $novanetwork_params = hiera('novanetwork_parameters') + $network_size = $novanetwork_params['network_size'] + $num_networks = $novanetwork_params['num_networks'] + $network_provider = 'nova' + if ( $novanetwork_params['network_manager'] == 'FlatDHCPManager') { + $private_int = get_network_role_property('novanetwork/fixed', 'interface') + } else { + $private_int = get_network_role_property('novanetwork/vlan', 'interface') + $vlan_start = $novanetwork_params['vlan_start'] + $network_config = { + 'vlan_start' => $vlan_start, + } + } + $network_manager = "nova.network.manager.${novanetwork_params['network_manager']}" + $management_network_range = hiera('management_network_range') +} + +if $node_role == 'primary-controller' { + $primary_controller = true +} else { + $primary_controller = false +} + +$controllers_hash = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) +$mountpoints = filter_hash($mp_hash,'point') + +# AMQP configuration +$queue_provider = hiera('queue_provider','rabbitmq') +$rabbit_ha_queues = true + +if !$rabbit_hash['user'] { + $rabbit_hash['user'] = 'nova' +} + +$amqp_port = hiera('amqp_ports', '5673') +if hiera('amqp_hosts', false) { + # using pre-defined in astute.yaml RabbitMQ servers + $amqp_hosts = hiera('amqp_hosts') +} else { + # using RabbitMQ servers on controllers + # todo(sv): switch from 'controller' nodes to 'rmq' nodes as soon as it was implemented as additional node-role + $controllers_with_amqp_server = get_node_to_ipaddr_map_by_network_role($controllers_hash, 'mgmt/messaging') + $amqp_nodes = ipsort(values($controllers_with_amqp_server)) + # amqp_hosts() randomize order of RMQ endpoints and put local one first + $amqp_hosts = amqp_hosts($amqp_nodes, $amqp_port, get_network_role_property('mgmt/messaging', 'ipaddr')) +} + +# MySQL and SQLAlchemy backend configuration +$custom_mysql_setup_class = hiera('custom_mysql_setup_class', 'galera') +$max_pool_size = hiera('max_pool_size', min($::processorcount * 5 + 0, 30 + 0)) +$max_overflow = hiera('max_overflow', min($::processorcount * 5 + 0, 60 + 0)) +$max_retries = hiera('max_retries', '-1') +$idle_timeout = hiera('idle_timeout','3600') +$nova_db_password = $nova_hash['db_password'] +$sql_connection = "mysql://nova:${nova_db_password}@${database_vip}/nova?read_timeout = 6 0" +$mirror_type = hiera('mirror_type', 'external') +$multi_host = hiera('multi_host', true) + +# Determine who should get the volume service +if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) { + $manage_volumes = 'iscsi' +} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) { + $manage_volumes = 'vmdk' +} elsif ($storage_hash['volumes_ceph']) { + $manage_volumes = 'ceph' +} else { + $manage_volumes = false +} + +# Define ceph-related variables +$ceph_primary_monitor_node = get_nodes_hash_by_roles($network_metadata, ['primary-controller']) +$ceph_monitor_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) +$ceph_rgw_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +#Determine who should be the default backend +if ($storage_hash['images_ceph']) { + $glance_backend = 'ceph' + $glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ] +} elsif ($storage_hash['images_vcenter']) { + $glance_backend = 'vmware' + $glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ] +} else { + $glance_backend = 'file' + $glance_known_stores = false +} + +# Define ceilometer-related variables: +# todo: use special node-roles instead controllers in the future +$ceilometer_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +# Define memcached-related variables: +$memcache_roles = hiera('memcache_roles', ['primary-controller', 'controller']) + +# Define node roles, that will carry corosync/pacemaker +$corosync_roles = hiera('corosync_roles', ['primary-controller', 'controller']) + +# Define cinder-related variables +# todo: use special node-roles instead controllers in the future +$cinder_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +# Define horizon-related variables: +# todo: use special node-roles instead controllers in the future +$horizon_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +# Define swift-related variables +# todo(sv): use special node-roles instead controllers in the future +$swift_master_role = 'primary-controller' +$swift_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) +$swift_proxies = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) +$swift_proxy_caches = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) # memcache for swift +$is_primary_swift_proxy = $primary_controller + +# Define murano-related variables +$murano_roles = ['primary-controller', 'controller'] + +# Define heat-related variables: +$heat_roles = ['primary-controller', 'controller'] + +# Define sahara-related variable +$sahara_roles = ['primary-controller', 'controller'] + +# Define ceilometer-releated parameters +if !$ceilometer_hash['event_time_to_live'] { $ceilometer_hash['event_time_to_live'] = '604800'} +if !$ceilometer_hash['metering_time_to_live'] { $ceilometer_hash['metering_time_to_live'] = '604800' } +if !$ceilometer_hash['http_timeout'] { $ceilometer_hash['http_timeout'] = '600' } + +# Define database-related variables: +# todo: use special node-roles instead controllers in the future +$database_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +# Define Nova-API variables: +# todo: use special node-roles instead controllers in the future +$nova_api_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +# Define mongo-related variables +$mongo_roles = ['primary-mongo', 'mongo'] + +# Define neutron-related variables: +# todo: use special node-roles instead controllers in the future +$neutron_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) + +#Define Ironic-related variables: +$ironic_api_nodes = $controllers_hash + +# Change nova_hash to add vnc port to it +# TODO(sbog): change this when we will get rid of global hashes +$public_ssl_hash = hiera('public_ssl') +if $public_ssl_hash['services'] { + $nova_hash['vncproxy_protocol'] = 'https' +} else { + $nova_hash['vncproxy_protocol'] = 'http' +} + +# save all these global variables into hiera yaml file for later use +# by other manifests with hiera function +file { $globals_yaml_file : + ensure => 'present', + mode => '0644', + owner => 'root', + group => 'root', + content => template('osnailyfacter/globals_yaml.erb') +} diff --git a/f2s/resources/globals/meta.yaml b/f2s/resources/globals/meta.yaml new file mode 100644 index 00000000..f2fb6373 --- /dev/null +++ b/f2s/resources/globals/meta.yaml @@ -0,0 +1,124 @@ +id: globals +handler: puppetv2 +version: '8.0' +inputs: + access: + value: null + amqp_hosts: + value: null + amqp_ports: + value: null + apache_ports: + value: null + base_syslog: + value: null + ceilometer: + value: null + cinder: + value: null + cinder_rate_limits: + value: null + corosync_roles: + value: null + custom_mysql_setup_class: + value: null + debug: + value: null + deployment_mode: + value: null + dns_nameservers: + value: null + fqdn: + value: null + glance: + value: null + heat: + value: null + idle_timeout: + value: null + keystone: + value: null + master_ip: + value: null + max_overflow: + value: null + max_pool_size: + value: null + max_retries: + value: null + memcache_roles: + value: null + mirror_type: + value: null + mp: + value: null + multi_host: + value: null + murano: + value: null + mysql: + value: null + network_metadata: + value: null + network_scheme: + value: null + nodes: + value: null + nova: + value: null + nova_rate_limits: + value: null + nova_report_interval: + value: null + nova_service_down_time: + value: null + openstack_version: + value: null + public_ssl: + value: null + puppet_modules: + value: null + quantum: + value: null + quantum_settings: + value: null + queue_provider: + value: null + rabbit: + value: null + role: + value: null + sahara: + value: null + storage: + value: null + swift: + value: null + syslog: + value: null + syslog_log_facility_ceilometer: + value: null + syslog_log_facility_ceph: + value: null + syslog_log_facility_cinder: + value: null + syslog_log_facility_glance: + value: null + syslog_log_facility_heat: + value: null + syslog_log_facility_keystone: + value: null + syslog_log_facility_murano: + value: null + syslog_log_facility_neutron: + value: null + syslog_log_facility_nova: + value: null + syslog_log_facility_sahara: + value: null + use_ovs: + value: null + use_syslog: + value: null + vcenter: + value: null diff --git a/f2s/resources/heat-db/actions/run.pp b/f2s/resources/heat-db/actions/run.pp new file mode 100644 index 00000000..ed7c4c9a --- /dev/null +++ b/f2s/resources/heat-db/actions/run.pp @@ -0,0 +1,53 @@ +notice('MODULAR: heat/db.pp') + +$heat_hash = hiera_hash('heat', {}) +$mysql_hash = hiera_hash('mysql', {}) +$management_vip = hiera('management_vip', undef) +$database_vip = hiera('database_vip', undef) + +$mysql_root_user = pick($mysql_hash['root_user'], 'root') +$mysql_db_create = pick($mysql_hash['db_create'], true) +$mysql_root_password = $mysql_hash['root_password'] + +$db_user = pick($heat_hash['db_user'], 'heat') +$db_name = pick($heat_hash['db_name'], 'heat') +$db_password = pick($heat_hash['db_password'], $mysql_root_password) + +$db_host = pick($heat_hash['db_host'], $database_vip) +$db_create = pick($heat_hash['db_create'], $mysql_db_create) +$db_root_user = pick($heat_hash['root_user'], $mysql_root_user) +$db_root_password = pick($heat_hash['root_password'], $mysql_root_password) + +$allowed_hosts = [ $::hostname, 'localhost', '127.0.0.1', '%' ] + +validate_string($mysql_root_user) + +if $db_create { + + class { 'galera::client': + custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), + } + + class { 'heat::db::mysql': + user => $db_user, + password => $db_password, + dbname => $db_name, + allowed_hosts => $allowed_hosts, + } + + class { 'osnailyfacter::mysql_access': + db_host => $db_host, + db_user => $db_root_user, + db_password => $db_root_password, + } + + Class['galera::client'] -> + Class['osnailyfacter::mysql_access'] -> + Class['heat::db::mysql'] + +} + +class mysql::config {} +include mysql::config +class mysql::server {} +include mysql::server diff --git a/f2s/resources/heat-db/meta.yaml b/f2s/resources/heat-db/meta.yaml new file mode 100644 index 00000000..5dad6756 --- /dev/null +++ b/f2s/resources/heat-db/meta.yaml @@ -0,0 +1,20 @@ +id: heat-db +handler: puppetv2 +version: '8.0' +inputs: + database_vip: + value: null + fqdn: + value: null + heat: + value: null + management_vip: + value: null + mysql: + value: null + mysql_custom_setup_class: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/heat-keystone/actions/run.pp b/f2s/resources/heat-keystone/actions/run.pp new file mode 100644 index 00000000..50034745 --- /dev/null +++ b/f2s/resources/heat-keystone/actions/run.pp @@ -0,0 +1,59 @@ +notice('MODULAR: heat/keystone.pp') + +$heat_hash = hiera_hash('heat', {}) +$public_vip = hiera('public_vip') +$admin_address = hiera('management_vip') +$region = pick($heat_hash['region'], hiera('region', 'RegionOne')) +$public_ssl_hash = hiera('public_ssl') +$public_address = $public_ssl_hash['services'] ? { + true => $public_ssl_hash['hostname'], + default => $public_vip, +} +$public_protocol = $public_ssl_hash['services'] ? { + true => 'https', + default => 'http', +} + +$password = $heat_hash['user_password'] +$auth_name = pick($heat_hash['auth_name'], 'heat') +$configure_endpoint = pick($heat_hash['configure_endpoint'], true) +$configure_user = pick($heat_hash['configure_user'], true) +$configure_user_role = pick($heat_hash['configure_user_role'], true) +$service_name = pick($heat_hash['service_name'], 'heat') +$tenant = pick($heat_hash['tenant'], 'services') + +validate_string($public_address) +validate_string($password) + +$public_url = "${public_protocol}://${public_address}:8004/v1/%(tenant_id)s" +$admin_url = "http://${admin_address}:8004/v1/%(tenant_id)s" +$public_url_cfn = "${public_protocol}://${public_address}:8000/v1" +$admin_url_cfn = "http://${admin_address}:8000/v1" + + + +class { '::heat::keystone::auth' : + password => $password, + auth_name => $auth_name, + region => $region, + tenant => $keystone_tenant, + email => "${auth_name}@localhost", + configure_endpoint => true, + trusts_delegated_roles => $trusts_delegated_roles, + public_url => $public_url, + internal_url => $admin_url, + admin_url => $admin_url, +} + +class { '::heat::keystone::auth_cfn' : + password => $password, + auth_name => "${auth_name}-cfn", + service_type => 'cloudformation', + region => $region, + tenant => $keystone_tenant, + email => "${auth_name}-cfn@localhost", + configure_endpoint => true, + public_url => $public_url_cfn, + internal_url => $admin_url_cfn, + admin_url => $admin_url_cfn, +} diff --git a/f2s/resources/heat-keystone/meta.yaml b/f2s/resources/heat-keystone/meta.yaml new file mode 100644 index 00000000..f6376fe6 --- /dev/null +++ b/f2s/resources/heat-keystone/meta.yaml @@ -0,0 +1,20 @@ +id: heat-keystone +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + heat: + value: null + management_vip: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null diff --git a/f2s/resources/heat/actions/run.pp b/f2s/resources/heat/actions/run.pp new file mode 100644 index 00000000..528e2eb3 --- /dev/null +++ b/f2s/resources/heat/actions/run.pp @@ -0,0 +1,156 @@ +notice('MODULAR: heat.pp') + +prepare_network_config(hiera('network_scheme', {})) +$management_vip = hiera('management_vip') +$heat_hash = hiera_hash('heat', {}) +$rabbit_hash = hiera_hash('rabbit_hash', {}) +$max_retries = hiera('max_retries') +$max_pool_size = hiera('max_pool_size') +$max_overflow = hiera('max_overflow') +$idle_timeout = hiera('idle_timeout') +$service_endpoint = hiera('service_endpoint') +$debug = pick($heat_hash['debug'], hiera('debug', false)) +$verbose = pick($heat_hash['verbose'], hiera('verbose', true)) +$use_stderr = hiera('use_stderr', false) +$use_syslog = hiera('use_syslog', true) +$syslog_log_facility_heat = hiera('syslog_log_facility_heat') +$deployment_mode = hiera('deployment_mode') +$bind_address = get_network_role_property('heat/api', 'ipaddr') +$database_password = $heat_hash['db_password'] +$keystone_user = pick($heat_hash['user'], 'heat') +$keystone_tenant = pick($heat_hash['tenant'], 'services') +$db_host = pick($heat_hash['db_host'], hiera('database_vip')) +$database_user = pick($heat_hash['db_user'], 'heat') +$database_name = hiera('heat_db_name', 'heat') +$read_timeout = '60' +$sql_connection = "mysql://${database_user}:${database_password}@${db_host}/${database_name}?read_timeout=${read_timeout}" +$region = hiera('region', 'RegionOne') +$public_ssl_hash = hiera('public_ssl') + +####### Disable upstart startup on install ####### +if $::operatingsystem == 'Ubuntu' { + tweaks::ubuntu_service_override { 'heat-api-cloudwatch': + package_name => 'heat-api-cloudwatch', + } + tweaks::ubuntu_service_override { 'heat-api-cfn': + package_name => 'heat-api-cfn', + } + tweaks::ubuntu_service_override { 'heat-api': + package_name => 'heat-api', + } + tweaks::ubuntu_service_override { 'heat-engine': + package_name => 'heat-engine', + } + + Tweaks::Ubuntu_service_override['heat-api'] -> Service['heat-api'] + Tweaks::Ubuntu_service_override['heat-api-cfn'] -> Service['heat-api-cfn'] + Tweaks::Ubuntu_service_override['heat-api-cloudwatch'] -> Service['heat-api-cloudwatch'] + Tweaks::Ubuntu_service_override['heat-engine'] -> Service['heat-engine'] +} + +class { 'openstack::heat' : + external_ip => $management_vip, + keystone_auth => pick($heat_hash['keystone_auth'], true), + api_bind_host => $bind_address, + api_cfn_bind_host => $bind_address, + api_cloudwatch_bind_host => $bind_address, + keystone_host => $service_endpoint, + keystone_user => $keystone_user, + keystone_password => $heat_hash['user_password'], + keystone_tenant => $keystone_tenant, + keystone_ec2_uri => "http://${service_endpoint}:5000/v2.0", + region => $region, + public_ssl => $public_ssl_hash['services'], + rpc_backend => 'rabbit', + amqp_hosts => split(hiera('amqp_hosts',''), ','), + amqp_user => $rabbit_hash['user'], + amqp_password => $rabbit_hash['password'], + sql_connection => $sql_connection, + db_host => $db_host, + db_password => $database_password, + max_retries => $max_retries, + max_pool_size => $max_pool_size, + max_overflow => $max_overflow, + idle_timeout => $idle_timeout, + debug => $debug, + verbose => $verbose, + use_syslog => $use_syslog, + use_stderr => $use_stderr, + syslog_log_facility => $syslog_log_facility_heat, + auth_encryption_key => $heat_hash['auth_encryption_key'], +} + +if hiera('heat_ha_engine', true){ + if ($deployment_mode == 'ha') or ($deployment_mode == 'ha_compact') { + include ::heat_ha::engine + } +} + +#------------------------------ + +class heat::docker_resource ( + $enabled = true, + $package_name = 'heat-docker', +) { + if $enabled { + package { 'heat-docker': + ensure => installed, + name => $package_name, + } + + Package['heat-docker'] ~> Service<| title == 'heat-engine' |> + } +} + +if $::osfamily == 'RedHat' { + $docker_resource_package_name = 'openstack-heat-docker' +} elsif $::osfamily == 'Debian' { + $docker_resource_package_name = 'heat-docker' +} + +class { 'heat::docker_resource' : + package_name => $docker_resource_package_name, +} + +$haproxy_stats_url = "http://${service_endpoint}:10000/;csv" + +haproxy_backend_status { 'keystone-admin' : + name => 'keystone-2', + count => '200', + step => '6', + url => $haproxy_stats_url, +} + +class { 'heat::keystone::domain' : + auth_url => "http://${service_endpoint}:35357/v2.0", + keystone_admin => $keystone_user, + keystone_password => $heat_hash['user_password'], + keystone_tenant => $keystone_tenant, + domain_name => 'heat', + domain_admin => 'heat_admin', + domain_password => $heat_hash['user_password'], +} + +Class['heat'] -> +Haproxy_backend_status['keystone-admin'] -> +Class['heat::keystone::domain'] ~> +Service<| title == 'heat-engine' |> + +###################### + +exec { 'wait_for_heat_config' : + command => 'sync && sleep 3', + provider => 'shell', +} + +Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-api'] +Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-api-cfn'] +Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-api-cloudwatch'] +Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-engine'] + +###################### + +class mysql::server {} +class mysql::config {} +include mysql::server +include mysql::config diff --git a/f2s/resources/heat/meta.yaml b/f2s/resources/heat/meta.yaml new file mode 100644 index 00000000..56e72f52 --- /dev/null +++ b/f2s/resources/heat/meta.yaml @@ -0,0 +1,52 @@ +id: heat +handler: puppetv2 +version: '8.0' +inputs: + amqp_hosts: + value: null + database_vip: + value: null + debug: + value: null + deployment_mode: + value: null + fqdn: + value: null + heat: + value: null + heat_db_name: + value: null + heat_ha_engine: + value: null + idle_timeout: + value: null + management_vip: + value: null + max_overflow: + value: null + max_pool_size: + value: null + max_retries: + value: null + network_scheme: + value: null + public_ssl: + value: null + puppet_modules: + value: null + rabbit_hash: + value: null + region: + value: null + role: + value: null + service_endpoint: + value: null + syslog_log_facility_heat: + value: null + use_stderr: + value: null + use_syslog: + value: null + verbose: + value: null diff --git a/f2s/resources/hiera/actions/run.pp b/f2s/resources/hiera/actions/run.pp new file mode 100644 index 00000000..e23a1cb7 --- /dev/null +++ b/f2s/resources/hiera/actions/run.pp @@ -0,0 +1,75 @@ +notice('MODULAR: hiera.pp') + +$deep_merge_package_name = $::osfamily ? { + /RedHat/ => 'rubygem-deep_merge', + /Debian/ => 'ruby-deep-merge', +} + +$data_dir = '/etc/hiera' +$data = [ + 'override/node/%{::fqdn}', + 'override/class/%{calling_class}', + 'override/module/%{calling_module}', + 'override/plugins', + 'override/common', + 'class/%{calling_class}', + 'module/%{calling_module}', + 'nodes', + 'globals', + 'astute' +] +$astute_data_file = '/etc/astute.yaml' +$hiera_main_config = '/etc/hiera.yaml' +$hiera_puppet_config = '/etc/puppet/hiera.yaml' +$hiera_data_file = "${data_dir}/astute.yaml" + +File { + owner => 'root', + group => 'root', + mode => '0644', +} + +$hiera_config_content = inline_template(' +--- +:backends: + - yaml + +:hierarchy: +<% @data.each do |name| -%> + - <%= name %> +<% end -%> + +:yaml: + :datadir: <%= @data_dir %> +:merge_behavior: deeper +:logger: noop +') + +file { 'hiera_data_dir' : + ensure => 'directory', + path => $data_dir, +} + +file { 'hiera_config' : + ensure => 'present', + path => $hiera_main_config, + content => $hiera_config_content, +} + +file { 'hiera_data_astute' : + ensure => 'symlink', + path => $hiera_data_file, + target => $astute_data_file, +} + +file { 'hiera_puppet_config' : + ensure => 'symlink', + path => $hiera_puppet_config, + target => $hiera_main_config, +} + +# needed to support the 'deeper' merge_behavior setting for hiera +package { 'rubygem-deep_merge': + ensure => present, + name => $deep_merge_package_name, +} diff --git a/f2s/resources/hiera/meta.yaml b/f2s/resources/hiera/meta.yaml new file mode 100644 index 00000000..00e83c27 --- /dev/null +++ b/f2s/resources/hiera/meta.yaml @@ -0,0 +1,8 @@ +id: hiera +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null diff --git a/f2s/resources/horizon/actions/run.pp b/f2s/resources/horizon/actions/run.pp new file mode 100644 index 00000000..4368443b --- /dev/null +++ b/f2s/resources/horizon/actions/run.pp @@ -0,0 +1,68 @@ +notice('MODULAR: horizon.pp') + +prepare_network_config(hiera('network_scheme', {})) +$horizon_hash = hiera_hash('horizon', {}) +$service_endpoint = hiera('service_endpoint') +$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles')) +$memcache_address_map = get_node_to_ipaddr_map_by_network_role($memcache_nodes, 'mgmt/memcache') +$bind_address = get_network_role_property('horizon', 'ipaddr') +$neutron_advanced_config = hiera_hash('neutron_advanced_configuration', {}) +$public_ssl = hiera('public_ssl') +$ssl_no_verify = $public_ssl['horizon'] + +if $horizon_hash['secret_key'] { + $secret_key = $horizon_hash['secret_key'] +} else { + $secret_key = 'dummy_secret_key' +} + +$neutron_dvr = pick($neutron_advanced_config['neutron_dvr'], false) + +$keystone_scheme = 'http' +$keystone_host = $service_endpoint +$keystone_port = '5000' +$keystone_api = 'v2.0' +$keystone_url = "${keystone_scheme}://${keystone_host}:${keystone_port}/${keystone_api}" + +$neutron_options = {'enable_distributed_router' => $neutron_dvr} + +class { 'openstack::horizon': + secret_key => $secret_key, + cache_server_ip => ipsort(values($memcache_address_map)), + package_ensure => hiera('horizon_package_ensure', 'installed'), + bind_address => $bind_address, + cache_server_port => hiera('memcache_server_port', '11211'), + cache_backend => 'django.core.cache.backends.memcached.MemcachedCache', + cache_options => {'SOCKET_TIMEOUT' => 1,'SERVER_RETRIES' => 1,'DEAD_RETRY' => 1}, + neutron => hiera('use_neutron'), + keystone_url => $keystone_url, + use_ssl => hiera('horizon_use_ssl', false), + ssl_no_verify => $ssl_no_verify, + verbose => pick($horizon_hash['verbose'], hiera('verbose', true)), + debug => pick($horizon_hash['debug'], hiera('debug')), + use_syslog => hiera('use_syslog', true), + nova_quota => hiera('nova_quota'), + servername => hiera('public_vip'), + neutron_options => $neutron_options, +} + +$haproxy_stats_url = "http://${service_endpoint}:10000/;csv" + +haproxy_backend_status { 'keystone-admin' : + name => 'keystone-2', + count => '30', + step => '3', + url => $haproxy_stats_url, +} + +haproxy_backend_status { 'keystone-public' : + name => 'keystone-1', + count => '30', + step => '3', + url => $haproxy_stats_url, +} + +Class['openstack::horizon'] -> Haproxy_backend_status['keystone-admin'] +Class['openstack::horizon'] -> Haproxy_backend_status['keystone-public'] + +include ::tweaks::apache_wrappers diff --git a/f2s/resources/horizon/meta.yaml b/f2s/resources/horizon/meta.yaml new file mode 100644 index 00000000..36337d34 --- /dev/null +++ b/f2s/resources/horizon/meta.yaml @@ -0,0 +1,44 @@ +id: horizon +handler: puppetv2 +version: '8.0' +inputs: + apache_ports: + value: null + debug: + value: null + fqdn: + value: null + horizon: + value: null + horizon_package_ensure: + value: null + horizon_use_ssl: + value: null + memcache_roles: + value: null + memcache_server_port: + value: null + network_metadata: + value: null + network_scheme: + value: null + neutron_advanced_configuration: + value: null + nova_quota: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null + service_endpoint: + value: null + use_neutron: + value: null + use_syslog: + value: null + verbose: + value: null diff --git a/f2s/resources/hosts/actions/run.pp b/f2s/resources/hosts/actions/run.pp new file mode 100644 index 00000000..e82bddff --- /dev/null +++ b/f2s/resources/hosts/actions/run.pp @@ -0,0 +1,5 @@ +notice('MODULAR: hosts.pp') + +class { "l23network::hosts_file": + nodes => hiera('nodes'), +} diff --git a/f2s/resources/hosts/meta.yaml b/f2s/resources/hosts/meta.yaml new file mode 100644 index 00000000..8d8ece6a --- /dev/null +++ b/f2s/resources/hosts/meta.yaml @@ -0,0 +1,10 @@ +id: hosts +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + nodes: + value: null + puppet_modules: + value: null diff --git a/f2s/resources/ironic-api/actions/run.pp b/f2s/resources/ironic-api/actions/run.pp new file mode 100644 index 00000000..b4fc31ef --- /dev/null +++ b/f2s/resources/ironic-api/actions/run.pp @@ -0,0 +1,61 @@ +notice('MODULAR: ironic/ironic.pp') + +$ironic_hash = hiera_hash('ironic', {}) +$public_vip = hiera('public_vip') +$management_vip = hiera('management_vip') + +$network_metadata = hiera_hash('network_metadata', {}) + +$database_vip = hiera('database_vip') +$keystone_endpoint = hiera('service_endpoint') +$neutron_endpoint = hiera('neutron_endpoint', $management_vip) +$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") +$debug = hiera('debug', false) +$verbose = hiera('verbose', true) +$use_syslog = hiera('use_syslog', true) +$syslog_log_facility_ironic = hiera('syslog_log_facility_ironic', 'LOG_USER') +$rabbit_hash = hiera_hash('rabbit_hash', {}) +$rabbit_ha_queues = hiera('rabbit_ha_queues') +$amqp_hosts = hiera('amqp_hosts') +$amqp_port = hiera('amqp_port', '5673') +$rabbit_hosts = split($amqp_hosts, ',') +$neutron_config = hiera_hash('quantum_settings') + +$db_host = pick($ironic_hash['db_host'], $database_vip) +$db_user = pick($ironic_hash['db_user'], 'ironic') +$db_name = pick($ironic_hash['db_name'], 'ironic') +$db_password = pick($ironic_hash['db_password'], 'ironic') +$database_connection = "mysql://${db_name}:${db_password}@${db_host}/${db_name}?charset=utf8&read_timeout=60" + +$ironic_tenant = pick($ironic_hash['tenant'],'services') +$ironic_user = pick($ironic_hash['auth_name'],'ironic') +$ironic_user_password = pick($ironic_hash['user_password'],'ironic') + +prepare_network_config(hiera('network_scheme', {})) + +$baremetal_vip = $network_metadata['vips']['baremetal']['ipaddr'] + +class { 'ironic': + verbose => $verbose, + debug => $debug, + rabbit_hosts => $rabbit_hosts, + rabbit_port => $amqp_port, + rabbit_userid => $rabbit_hash['user'], + rabbit_password => $rabbit_hash['password'], + amqp_durable_queues => $rabbit_ha_queues, + use_syslog => $use_syslog, + log_facility => $syslog_log_facility_ironic, + database_connection => $database_connection, + glance_api_servers => $glance_api_servers, +} + +class { 'ironic::client': } + +class { 'ironic::api': + host_ip => get_network_role_property('ironic/api', 'ipaddr'), + auth_host => $keystone_endpoint, + admin_tenant_name => $ironic_tenant, + admin_user => $ironic_user, + admin_password => $ironic_user_password, + neutron_url => "http://${neutron_endpoint}:9696", +} diff --git a/f2s/resources/ironic-api/meta.yaml b/f2s/resources/ironic-api/meta.yaml new file mode 100644 index 00000000..caebd7d7 --- /dev/null +++ b/f2s/resources/ironic-api/meta.yaml @@ -0,0 +1,8 @@ +id: ironic-api +handler: puppetv2 +version: '8.0' +inputs: + ironic: + value: null + puppet_modules: + value: null diff --git a/f2s/resources/ironic-compute/actions/run.pp b/f2s/resources/ironic-compute/actions/run.pp new file mode 100644 index 00000000..b637a8e6 --- /dev/null +++ b/f2s/resources/ironic-compute/actions/run.pp @@ -0,0 +1,98 @@ +##################################################################################### +### ironic-compute is additional compute role with compute_driver=ironic. ### +### It can't be assigned with nova-compute to the same node. It doesn't include ### +### openstack::compute class it is configured separately. ### +##################################################################################### + +notice('MODULAR: ironic/ironic-compute.pp') + +$ironic_hash = hiera_hash('ironic', {}) +$nova_hash = hiera_hash('nova', {}) +$management_vip = hiera('management_vip') +$database_vip = hiera('database_vip') +$service_endpoint = hiera('service_endpoint') +$neutron_endpoint = hiera('neutron_endpoint', $management_vip) +$ironic_endpoint = hiera('ironic_endpoint', $management_vip) +$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") +$debug = hiera('debug', false) +$verbose = hiera('verbose', true) +$use_syslog = hiera('use_syslog', true) +$syslog_log_facility_ironic = hiera('syslog_log_facility_ironic', 'LOG_LOCAL0') +$syslog_log_facility_nova = hiera('syslog_log_facility_nova', 'LOG_LOCAL6') +$amqp_hosts = hiera('amqp_hosts') +$rabbit_hash = hiera_hash('rabbit_hash') +$nova_report_interval = hiera('nova_report_interval') +$nova_service_down_time = hiera('nova_service_down_time') +$neutron_config = hiera_hash('quantum_settings') + +$ironic_tenant = pick($ironic_hash['tenant'],'services') +$ironic_user = pick($ironic_hash['auth_name'],'ironic') +$ironic_user_password = pick($ironic_hash['user_password'],'ironic') + +$db_host = pick($nova_hash['db_host'], $database_vip) +$db_user = pick($nova_hash['db_user'], 'nova') +$db_name = pick($nova_hash['db_name'], 'nova') +$db_password = pick($nova_hash['db_password'], 'nova') +$database_connection = "mysql://${db_name}:${db_password}@${db_host}/${db_name}?read_timeout=60" + +$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles')) +$cache_server_ip = ipsort(values(get_node_to_ipaddr_map_by_network_role($memcache_nodes,'mgmt/memcache'))) +$memcached_addresses = suffix($cache_server_ip, inline_template(":<%= @cache_server_port %>")) +$notify_on_state_change = 'vm_and_task_state' + + +class { '::nova': + install_utilities => false, + ensure_package => installed, + database_connection => $database_connection, + rpc_backend => 'nova.openstack.common.rpc.impl_kombu', + #FIXME(bogdando) we have to split amqp_hosts until all modules synced + rabbit_hosts => split($amqp_hosts, ','), + rabbit_userid => $rabbit_hash['user'], + rabbit_password => $rabbit_hash['password'], + image_service => 'nova.image.glance.GlanceImageService', + glance_api_servers => $glance_api_servers, + verbose => $verbose, + debug => $debug, + use_syslog => $use_syslog, + log_facility => $syslog_log_facility_nova, + state_path => $nova_hash['state_path'], + report_interval => $nova_report_interval, + service_down_time => $nova_service_down_time, + notify_on_state_change => $notify_on_state_change, + memcached_servers => $memcached_addresses, +} + + +class { '::nova::compute': + ensure_package => installed, + enabled => true, + vnc_enabled => false, + force_config_drive => $nova_hash['force_config_drive'], + #NOTE(bogdando) default became true in 4.0.0 puppet-nova (was false) + neutron_enabled => true, + default_availability_zone => $nova_hash['default_availability_zone'], + default_schedule_zone => $nova_hash['default_schedule_zone'], + reserved_host_memory => '0', +} + + +class { 'nova::compute::ironic': + admin_url => "http://${service_endpoint}:35357/v2.0", + admin_user => $ironic_user, + admin_tenant_name => $ironic_tenant, + admin_passwd => $ironic_user_password, + api_endpoint => "http://${ironic_endpoint}:6385/v1", +} + +class { 'nova::network::neutron': + neutron_admin_password => $neutron_config['keystone']['admin_password'], + neutron_url => "http://${neutron_endpoint}:9696", + neutron_admin_auth_url => "http://${service_endpoint}:35357/v2.0", +} + +file { '/etc/nova/nova-compute.conf': + ensure => absent, + require => Package['nova-compute'], +} ~> Service['nova-compute'] + diff --git a/f2s/resources/ironic-compute/meta.yaml b/f2s/resources/ironic-compute/meta.yaml new file mode 100644 index 00000000..8debec41 --- /dev/null +++ b/f2s/resources/ironic-compute/meta.yaml @@ -0,0 +1,10 @@ +id: ironic-compute +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/ironic-conductor/actions/run.pp b/f2s/resources/ironic-conductor/actions/run.pp new file mode 100644 index 00000000..1dc47155 --- /dev/null +++ b/f2s/resources/ironic-conductor/actions/run.pp @@ -0,0 +1,121 @@ +notice('MODULAR: ironic/ironic-conductor.pp') + +$network_scheme = hiera('network_scheme', {}) +prepare_network_config($network_scheme) +$baremetal_address = get_network_role_property('ironic/baremetal', 'ipaddr') +$ironic_hash = hiera_hash('ironic', {}) +$management_vip = hiera('management_vip') + +$network_metadata = hiera_hash('network_metadata', {}) +$baremetal_vip = $network_metadata['vips']['baremetal']['ipaddr'] + +$database_vip = hiera('database_vip') +$service_endpoint = hiera('service_endpoint') +$neutron_endpoint = hiera('neutron_endpoint', $management_vip) +$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") +$amqp_hosts = hiera('amqp_hosts') +$rabbit_hosts = split($amqp_hosts, ',') +$debug = hiera('debug', false) +$verbose = hiera('verbose', true) +$use_syslog = hiera('use_syslog', true) +$syslog_log_facility_ironic = hiera('syslog_log_facility_ironic', 'LOG_USER') +$rabbit_hash = hiera_hash('rabbit_hash') +$rabbit_ha_queues = hiera('rabbit_ha_queues') + +$ironic_tenant = pick($ironic_hash['tenant'],'services') +$ironic_user = pick($ironic_hash['auth_name'],'ironic') +$ironic_user_password = pick($ironic_hash['user_password'],'ironic') +$ironic_swift_tempurl_key = pick($ironic_hash['swift_tempurl_key'],'ironic') + +$db_host = pick($ironic_hash['db_host'], $database_vip) +$db_user = pick($ironic_hash['db_user'], 'ironic') +$db_name = pick($ironic_hash['db_name'], 'ironic') +$db_password = pick($ironic_hash['db_password'], 'ironic') +$database_connection = "mysql://${db_name}:${db_password}@${db_host}/${db_name}?charset=utf8&read_timeout=60" + +$tftp_root = '/var/lib/ironic/tftpboot' + +package { 'ironic-fa-deploy': + ensure => 'present', +} + +class { '::ironic': + verbose => $verbose, + debug => $debug, + enabled_drivers => ['fuel_ssh', 'fuel_ipmitool', 'fake'], + rabbit_hosts => $rabbit_hosts, + rabbit_userid => $rabbit_hash['user'], + rabbit_password => $rabbit_hash['password'], + amqp_durable_queues => $rabbit_ha_queues, + use_syslog => $use_syslog, + log_facility => $syslog_log_facility_ironic, + database_connection => $database_connection, + glance_api_servers => $glance_api_servers, +} + +class { '::ironic::client': } + +class { '::ironic::conductor': } + +class { '::ironic::drivers::pxe': + tftp_server => $baremetal_address, + tftp_root => $tftp_root, + tftp_master_path => "${tftp_root}/master_images", +} + +ironic_config { + 'neutron/url': value => "http://${neutron_endpoint}:9696"; + 'keystone_authtoken/auth_uri': value => "http://${service_endpoint}:5000/"; + 'keystone_authtoken/auth_host': value => $service_endpoint; + 'keystone_authtoken/admin_tenant_name': value => $ironic_tenant; + 'keystone_authtoken/admin_user': value => $ironic_user; + 'keystone_authtoken/admin_password': value => $ironic_user_password, secret => true; + 'glance/swift_temp_url_key': value => $ironic_swift_tempurl_key; + 'glance/swift_endpoint_url': value => "http://${baremetal_vip}:8080"; + 'conductor/api_url': value => "http://${baremetal_vip}:6385"; +} + +file { $tftp_root: + ensure => directory, + owner => 'ironic', + group => 'ironic', + mode => '0755', + require => Class['ironic'], +} + +file { "${tftp_root}/pxelinux.0": + ensure => present, + source => '/usr/lib/syslinux/pxelinux.0', + require => Package['syslinux'], +} + +file { "${tftp_root}/map-file": + content => "r ^([^/]) ${tftp_root}/\\1", +} + +class { '::tftp': + username => 'ironic', + directory => $tftp_root, + options => "--map-file ${tftp_root}/map-file", + inetd => false, + require => File["${tftp_root}/map-file"], +} + +package { 'syslinux': + ensure => 'present', +} + +package { 'ipmitool': + ensure => 'present', + before => Class['::ironic::conductor'], +} + +file { "/etc/ironic/fuel_key": + ensure => present, + source => '/var/lib/astute/ironic/ironic', + owner => 'ironic', + group => 'ironic', + mode => '0600', + require => Class['ironic'], +} + diff --git a/f2s/resources/ironic-conductor/meta.yaml b/f2s/resources/ironic-conductor/meta.yaml new file mode 100644 index 00000000..a3762264 --- /dev/null +++ b/f2s/resources/ironic-conductor/meta.yaml @@ -0,0 +1,10 @@ +id: ironic-conductor +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/ironic-db/actions/run.pp b/f2s/resources/ironic-db/actions/run.pp new file mode 100644 index 00000000..b663b73f --- /dev/null +++ b/f2s/resources/ironic-db/actions/run.pp @@ -0,0 +1,51 @@ +notice('MODULAR: ironic/db.pp') + +$ironic_hash = hiera_hash('ironic', {}) +$mysql_hash = hiera_hash('mysql', {}) +$database_vip = hiera('database_vip') + +$mysql_root_user = pick($mysql_hash['root_user'], 'root') +$mysql_db_create = pick($mysql_hash['db_create'], true) +$mysql_root_password = $mysql_hash['root_password'] + +$db_user = pick($ironic_hash['db_user'], 'ironic') +$db_name = pick($ironic_hash['db_name'], 'ironic') +$db_password = pick($ironic_hash['db_password'], $mysql_root_password) + +$db_host = pick($ironic_hash['db_host'], $database_vip) +$db_create = pick($ironic_hash['db_create'], $mysql_db_create) +$db_root_user = pick($ironic_hash['root_user'], $mysql_root_user) +$db_root_password = pick($ironic_hash['root_password'], $mysql_root_password) + +$allowed_hosts = [ hiera('node_name'), 'localhost', '127.0.0.1', '%' ] + +validate_string($mysql_root_user) +validate_string($database_vip) + +if $db_create { + class { 'galera::client': + custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), + } + + class { 'ironic::db::mysql': + user => $db_user, + password => $db_password, + dbname => $db_name, + allowed_hosts => $allowed_hosts, + } + + class { 'osnailyfacter::mysql_access': + db_host => $db_host, + db_user => $db_root_user, + db_password => $db_root_password, + } + + Class['galera::client'] -> + Class['osnailyfacter::mysql_access'] -> + Class['ironic::db::mysql'] +} + +class mysql::config {} +include mysql::config +class mysql::server {} +include mysql::server diff --git a/f2s/resources/ironic-db/meta.yaml b/f2s/resources/ironic-db/meta.yaml new file mode 100644 index 00000000..5f307f46 --- /dev/null +++ b/f2s/resources/ironic-db/meta.yaml @@ -0,0 +1,20 @@ +id: ironic-db +handler: puppetv2 +version: '8.0' +inputs: + database_vip: + value: null + fqdn: + value: null + ironic: + value: null + mysql: + value: null + mysql_custom_setup_class: + value: null + node_name: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/ironic-keystone/actions/run.pp b/f2s/resources/ironic-keystone/actions/run.pp new file mode 100644 index 00000000..da4f136b --- /dev/null +++ b/f2s/resources/ironic-keystone/actions/run.pp @@ -0,0 +1,39 @@ +notice('MODULAR: ironic/keystone.pp') + +$ironic_hash = hiera_hash('ironic', {}) +$public_vip = hiera('public_vip') +$management_vip = hiera('management_vip') +$public_ssl_hash = hiera('public_ssl') +$ironic_tenant = pick($ironic_hash['tenant'],'services') +$ironic_user = pick($ironic_hash['auth_name'],'ironic') +$ironic_user_password = pick($ironic_hash['user_password'],'ironic') +$configure_endpoint = pick($ironic_hash['configure_endpoint'], true) +$configure_user = pick($ironic_hash['configure_user'], true) +$configure_user_role = pick($ironic_hash['configure_user_role'], true) +$service_name = pick($ironic_hash['service_name'], 'ironic') + +$public_address = $public_ssl_hash['services'] ? { + true => $public_ssl_hash['hostname'], + default => $public_vip, +} +$public_protocol = $public_ssl_hash['services'] ? { + true => 'https', + default => 'http', +} + +$region = hiera('region', 'RegionOne') +$public_url = "${public_protocol}://${public_address}:6385" +$admin_url = "http://${management_vip}:6385" +$internal_url = "http://${management_vip}:6385" + +class { 'ironic::keystone::auth': + password => $ironic_user_password, + region => $region, + public_url => $public_url, + internal_url => $internal_url, + admin_url => $admin_url, + configure_endpoint => $configure_endpoint, + configure_user => $configure_user, + configure_user_role => $configure_user_role, + service_name => $service_name, +} diff --git a/f2s/resources/ironic-keystone/meta.yaml b/f2s/resources/ironic-keystone/meta.yaml new file mode 100644 index 00000000..ccf4ca9a --- /dev/null +++ b/f2s/resources/ironic-keystone/meta.yaml @@ -0,0 +1,20 @@ +id: ironic-keystone +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + ironic: + value: null + management_vip: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null diff --git a/f2s/resources/keystone-db/actions/run.pp b/f2s/resources/keystone-db/actions/run.pp new file mode 100644 index 00000000..f6d5947c --- /dev/null +++ b/f2s/resources/keystone-db/actions/run.pp @@ -0,0 +1,54 @@ +notice('MODULAR: keystone/db.pp') + +$node_name = hiera('node_name') +$network_metadata = hiera_hash('network_metadata', {}) + +$keystone_hash = hiera_hash('keystone', {}) +$mysql_hash = hiera_hash('mysql', {}) +$database_vip = hiera('database_vip') + +$mysql_root_user = pick($mysql_hash['root_user'], 'root') +$mysql_db_create = pick($mysql_hash['db_create'], true) +$mysql_root_password = $mysql_hash['root_password'] + +$db_user = pick($keystone_hash['db_user'], 'keystone') +$db_name = pick($keystone_hash['db_name'], 'keystone') +$db_password = pick($keystone_hash['db_password'], $mysql_root_password) + +$db_host = pick($keystone_hash['db_host'], $database_vip) +$db_create = pick($keystone_hash['db_create'], $mysql_db_create) +$db_root_user = pick($keystone_hash['root_user'], $mysql_root_user) +$db_root_password = pick($keystone_hash['root_password'], $mysql_root_password) + +$allowed_hosts = [ $node_name, 'localhost', '127.0.0.1', '%' ] + +if $db_create { + + class { 'galera::client': + custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), + } + + class { 'keystone::db::mysql': + user => $db_user, + password => $db_password, + dbname => $db_name, + allowed_hosts => $allowed_hosts, + } + + class { 'osnailyfacter::mysql_access': + db_host => $db_host, + db_user => $db_root_user, + db_password => $db_root_password, + } + + Class['galera::client'] -> + Class['osnailyfacter::mysql_access'] -> + Class['keystone::db::mysql'] + + +} + +class mysql::config {} +include mysql::config +class mysql::server {} +include mysql::server diff --git a/f2s/resources/keystone-db/meta.yaml b/f2s/resources/keystone-db/meta.yaml new file mode 100644 index 00000000..40db7bb2 --- /dev/null +++ b/f2s/resources/keystone-db/meta.yaml @@ -0,0 +1,22 @@ +id: keystone-db +handler: puppetv2 +version: '8.0' +inputs: + database_vip: + value: null + fqdn: + value: null + keystone: + value: null + mysql: + value: null + mysql_custom_setup_class: + value: null + network_metadata: + value: null + node_name: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/keystone/actions/run.pp b/f2s/resources/keystone/actions/run.pp new file mode 100644 index 00000000..d37f3a0e --- /dev/null +++ b/f2s/resources/keystone/actions/run.pp @@ -0,0 +1,236 @@ +notice('MODULAR: keystone.pp') + +$network_scheme = hiera_hash('network_scheme', {}) +$network_metadata = hiera_hash('network_metadata', {}) +prepare_network_config($network_scheme) + +$node_name = hiera('node_name') + +$keystone_hash = hiera_hash('keystone', {}) +$verbose = pick($keystone_hash['verbose'], hiera('verbose', true)) +$debug = pick($keystone_hash['debug'], hiera('debug', false)) +$use_neutron = hiera('use_neutron', false) +$use_syslog = hiera('use_syslog', true) +$use_stderr = hiera('use_stderr', false) +$access_hash = hiera_hash('access',{}) +$management_vip = hiera('management_vip') +$database_vip = hiera('database_vip') +$public_vip = hiera('public_vip') +$service_endpoint = hiera('service_endpoint') +$glance_hash = hiera_hash('glance', {}) +$nova_hash = hiera_hash('nova', {}) +$cinder_hash = hiera_hash('cinder', {}) +$ceilometer_hash = hiera_hash('ceilometer', {}) +$syslog_log_facility = hiera('syslog_log_facility_keystone') +$rabbit_hash = hiera_hash('rabbit_hash', {}) +$neutron_user_password = hiera('neutron_user_password', false) +$service_workers = pick($keystone_hash['workers'], + min(max($::processorcount, 2), 16)) + +$db_type = 'mysql' +$db_host = pick($keystone_hash['db_host'], $database_vip) +$db_password = $keystone_hash['db_password'] +$db_name = pick($keystone_hash['db_name'], 'keystone') +$db_user = pick($keystone_hash['db_user'], 'keystone') + +$admin_token = $keystone_hash['admin_token'] +$admin_tenant = $access_hash['tenant'] +$admin_email = $access_hash['email'] +$admin_user = $access_hash['user'] +$admin_password = $access_hash['password'] +$region = hiera('region', 'RegionOne') + +$public_ssl_hash = hiera('public_ssl') +$public_service_endpoint = hiera('public_service_endpoint', $public_vip) +$public_address = $public_ssl_hash['services'] ? { + true => $public_ssl_hash['hostname'], + default => $public_service_endpoint, +} + +$admin_address = $service_endpoint +$local_address_for_bind = get_network_role_property('keystone/api', 'ipaddr') + +$memcache_server_port = hiera('memcache_server_port', '11211') +$memcache_pool_maxsize = '100' +$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles')) +$memcache_address_map = get_node_to_ipaddr_map_by_network_role($memcache_nodes, 'mgmt/memcache') + +$public_port = '5000' +$admin_port = '35357' +$internal_port = '5000' +$public_protocol = $public_ssl_hash['services'] ? { + true => 'https', + default => 'http', +} + +$public_url = "${public_protocol}://${public_address}:${public_port}" +$admin_url = "http://${admin_address}:${admin_port}" +$internal_url = "http://${service_endpoint}:${internal_port}" + +$revoke_driver = 'keystone.contrib.revoke.backends.sql.Revoke' + +$enabled = true +$ssl = false + +$vhost_limit_request_field_size = 'LimitRequestFieldSize 81900' + +$rabbit_password = $rabbit_hash['password'] +$rabbit_user = $rabbit_hash['user'] +$rabbit_hosts = split(hiera('amqp_hosts',''), ',') +$rabbit_virtual_host = '/' + +$max_pool_size = hiera('max_pool_size') +$max_overflow = hiera('max_overflow') +$max_retries = '-1' +$database_idle_timeout = '3600' + +$murano_settings_hash = hiera('murano_settings', {}) +if has_key($murano_settings_hash, 'murano_repo_url') { + $murano_repo_url = $murano_settings_hash['murano_repo_url'] +} else { + $murano_repo_url = 'http://storage.apps.openstack.org' +} + +############################################################################### + +####### KEYSTONE ########### +class { 'openstack::keystone': + verbose => $verbose, + debug => $debug, + db_type => $db_type, + db_host => $db_host, + db_password => $db_password, + db_name => $db_name, + db_user => $db_user, + admin_token => $admin_token, + public_address => $public_address, + public_ssl => $public_ssl_hash['services'], + public_hostname => $public_ssl_hash['hostname'], + internal_address => $service_endpoint, + admin_address => $admin_address, + public_bind_host => $local_address_for_bind, + admin_bind_host => $local_address_for_bind, + enabled => $enabled, + use_syslog => $use_syslog, + use_stderr => $use_stderr, + syslog_log_facility => $syslog_log_facility, + region => $region, + memcache_servers => values($memcache_address_map), + memcache_server_port => $memcache_server_port, + memcache_pool_maxsize => $memcache_pool_maxsize, + max_retries => $max_retries, + max_pool_size => $max_pool_size, + max_overflow => $max_overflow, + rabbit_password => $rabbit_password, + rabbit_userid => $rabbit_user, + rabbit_hosts => $rabbit_hosts, + rabbit_virtual_host => $rabbit_virtual_host, + database_idle_timeout => $database_idle_timeout, + revoke_driver => $revoke_driver, + public_url => $public_url, + admin_url => $admin_url, + internal_url => $internal_url, + ceilometer => $ceilometer_hash['enabled'], + service_workers => $service_workers, +} + +####### WSGI ########### + +class { 'osnailyfacter::apache': + listen_ports => hiera_array('apache_ports', ['80', '8888', '5000', '35357']), +} + +class { 'keystone::wsgi::apache': + priority => '05', + threads => 3, + workers => min($::processorcount, 6), + ssl => $ssl, + vhost_custom_fragment => $vhost_limit_request_field_size, + access_log_format => '%h %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"', + + wsgi_script_ensure => $::osfamily ? { + 'RedHat' => 'link', + default => 'file', + }, + wsgi_script_source => $::osfamily ? { + # TODO: (adidenko) use file from package for Debian, when + # https://bugs.launchpad.net/fuel/+bug/1476688 is fixed. + # 'Debian' => '/usr/share/keystone/wsgi.py', + 'RedHat' => '/usr/share/keystone/keystone.wsgi', + default => undef, + }, +} + +include ::tweaks::apache_wrappers + +############################################################################### + +class { 'keystone::roles::admin': + admin => $admin_user, + password => $admin_password, + email => $admin_email, + admin_tenant => $admin_tenant, +} + +class { 'openstack::auth_file': + admin_user => $admin_user, + admin_password => $admin_password, + admin_tenant => $admin_tenant, + region_name => $region, + controller_node => $service_endpoint, + murano_repo_url => $murano_repo_url, +} + +# Get paste.ini source +include keystone::params +$keystone_paste_ini = $::keystone::params::paste_config ? { + undef => '/etc/keystone/keystone-paste.ini', + default => $::keystone::params::paste_config, +} + +# Make sure admin token auth middleware is in place +exec { 'add_admin_token_auth_middleware': + path => ['/bin', '/usr/bin'], + command => "sed -i 's/\\( token_auth \\)/\\1admin_token_auth /' $keystone_paste_ini", + unless => "fgrep -q ' admin_token_auth' $keystone_paste_ini", + require => Package['keystone'], +} + +#Can't use openrc to create admin user +exec { 'purge_openrc': + path => '/bin:/usr/bin:/sbin:/usr/sbin', + command => 'rm -f /root/openrc', + onlyif => 'test -f /root/openrc', +} + +Exec <| title == 'keystone-manage db_sync' |> ~> +Exec <| title == 'purge_openrc' |> + +Exec <| title == 'add_admin_token_auth_middleware' |> -> +Exec <| title == 'keystone-manage db_sync' |> -> +Exec <| title == 'purge_openrc' |> -> +Class['keystone::roles::admin'] -> +Class['openstack::auth_file'] + +$haproxy_stats_url = "http://${service_endpoint}:10000/;csv" + +haproxy_backend_status { 'keystone-public' : + name => 'keystone-1', + url => $haproxy_stats_url, +} + +haproxy_backend_status { 'keystone-admin' : + name => 'keystone-2', + url => $haproxy_stats_url, +} + +Service['keystone'] -> Haproxy_backend_status<||> +Service<| title == 'httpd' |> -> Haproxy_backend_status<||> +Haproxy_backend_status<||> -> Class['keystone::roles::admin'] + +####### Disable upstart startup on install ####### +if ($::operatingsystem == 'Ubuntu') { + tweaks::ubuntu_service_override { 'keystone': + package_name => 'keystone', + } +} diff --git a/f2s/resources/keystone/meta.yaml b/f2s/resources/keystone/meta.yaml new file mode 100644 index 00000000..fce3075d --- /dev/null +++ b/f2s/resources/keystone/meta.yaml @@ -0,0 +1,74 @@ +id: keystone +handler: puppetv2 +version: '8.0' +inputs: + access: + value: null + amqp_hosts: + value: null + apache_ports: + value: null + ceilometer: + value: null + cinder: + value: null + database_vip: + value: null + debug: + value: null + fqdn: + value: null + glance: + value: null + keystone: + value: null + management_vip: + value: null + max_overflow: + value: null + max_pool_size: + value: null + memcache_roles: + value: null + memcache_server_port: + value: null + murano_settings: + value: null + network_metadata: + value: null + network_scheme: + value: null + neutron_user_password: + value: null + node_name: + value: null + nodes: + value: null + nova: + value: null + public_service_endpoint: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + rabbit_hash: + value: null + region: + value: null + role: + value: null + service_endpoint: + value: null + syslog_log_facility_keystone: + value: null + use_neutron: + value: null + use_stderr: + value: null + use_syslog: + value: null + verbose: + value: null diff --git a/f2s/resources/logging/actions/run.pp b/f2s/resources/logging/actions/run.pp new file mode 100644 index 00000000..44bbab58 --- /dev/null +++ b/f2s/resources/logging/actions/run.pp @@ -0,0 +1,67 @@ +notice('MODULAR: logging.pp') + +$base_syslog_hash = hiera('base_syslog_hash') +$syslog_hash = hiera('syslog_hash') +$use_syslog = hiera('use_syslog', true) +$debug = pick($syslog_hash['debug'], hiera('debug', false)) +$nodes_hash = hiera('nodes', {}) +$roles = node_roles($nodes_hash, hiera('uid')) + +################################################## + +$base_syslog_rserver = { + 'remote_type' => 'tcp', + 'server' => $base_syslog_hash['syslog_server'], + 'port' => $base_syslog_hash['syslog_port'] +} + +$syslog_rserver = { + 'remote_type' => $syslog_hash['syslog_transport'], + 'server' => $syslog_hash['syslog_server'], + 'port' => $syslog_hash['syslog_port'], +} + +if $syslog_hash['metadata']['enabled'] { + $rservers = [$base_syslog_rserver, $syslog_rserver] +} else { + $rservers = [$base_syslog_rserver] +} + +if $use_syslog { + if ($::operatingsystem == 'Ubuntu') { + # ensure the var log folder permissions are correct even if it's a mount + # LP#1489347 + file { '/var/log': + owner => 'root', + group => 'syslog', + mode => '0775', + } + } + + if member($roles, 'ironic') { + $ironic_collector = true + } + + class { '::openstack::logging': + role => 'client', + show_timezone => true, + # log both locally include auth, and remote + log_remote => true, + log_local => true, + log_auth_local => true, + # keep four weekly log rotations, + # force rotate if 300M size have exceeded + rotation => 'weekly', + keep => '4', + minsize => '10M', + maxsize => '100M', + # remote servers to send logs to + rservers => $rservers, + # should be true, if client is running at virtual node + virtual => str2bool($::is_virtual), + # Rabbit doesn't support syslog directly + rabbit_log_level => 'NOTICE', + debug => $debug, + ironic_collector => $ironic_collector, + } +} diff --git a/f2s/resources/logging/meta.yaml b/f2s/resources/logging/meta.yaml new file mode 100644 index 00000000..443636ca --- /dev/null +++ b/f2s/resources/logging/meta.yaml @@ -0,0 +1,24 @@ +id: logging +handler: puppetv2 +version: '8.0' +inputs: + base_syslog_hash: + value: null + debug: + value: null + fqdn: + value: null + node_role: + value: null + nodes: + value: null + puppet_modules: + value: null + role: + value: null + syslog_hash: + value: null + uid: + value: null + use_syslog: + value: null diff --git a/f2s/resources/memcached/actions/run.pp b/f2s/resources/memcached/actions/run.pp new file mode 100644 index 00000000..877a381f --- /dev/null +++ b/f2s/resources/memcached/actions/run.pp @@ -0,0 +1,8 @@ +notice('MODULAR: memcached.pp') + +prepare_network_config(hiera('network_scheme', {})) + +class { 'memcached': + listen_ip => get_network_role_property('mgmt/memcache', 'ipaddr'), + max_memory => '50%', +} diff --git a/f2s/resources/memcached/meta.yaml b/f2s/resources/memcached/meta.yaml new file mode 100644 index 00000000..a7395ba4 --- /dev/null +++ b/f2s/resources/memcached/meta.yaml @@ -0,0 +1,12 @@ +id: memcached +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + network_scheme: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/murano-db/actions/run.pp b/f2s/resources/murano-db/actions/run.pp new file mode 100644 index 00000000..ddc326ba --- /dev/null +++ b/f2s/resources/murano-db/actions/run.pp @@ -0,0 +1,57 @@ +notice('MODULAR: murano/db.pp') + +$node_name = hiera('node_name') +$murano_hash = hiera_hash('murano_hash', {}) +$murano_enabled = pick($murano_hash['enabled'], false) +$mysql_hash = hiera_hash('mysql_hash', {}) +$management_vip = hiera('management_vip', undef) +$database_vip = hiera('database_vip') + +$mysql_root_user = pick($mysql_hash['root_user'], 'root') +$mysql_db_create = pick($mysql_hash['db_create'], true) +$mysql_root_password = $mysql_hash['root_password'] + +$db_user = pick($murano_hash['db_user'], 'murano') +$db_name = pick($murano_hash['db_name'], 'murano') +$db_password = pick($murano_hash['db_password'], $mysql_root_password) + +$db_host = pick($murano_hash['db_host'], $database_vip) +$db_create = pick($murano_hash['db_create'], $mysql_db_create) +$db_root_user = pick($murano_hash['root_user'], $mysql_root_user) +$db_root_password = pick($murano_hash['root_password'], $mysql_root_password) + +$allowed_hosts = [ $node_name, 'localhost', '127.0.0.1', '%' ] + +validate_string($mysql_root_user) + +if $murano_enabled and $db_create { + + class { 'galera::client': + custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), + } + + class { 'murano::db::mysql': + user => $db_user, + password => $db_password, + dbname => $db_name, + allowed_hosts => $allowed_hosts, + } + + class { 'osnailyfacter::mysql_access': + db_host => $db_host, + db_user => $db_root_user, + db_password => $db_root_password, + } + + Class['galera::client'] -> + Class['osnailyfacter::mysql_access'] -> + Class['murano::db::mysql'] + +} + +class mysql::config {} +include mysql::config +class mysql::server {} +include mysql::server +class murano::api {} +include murano::api diff --git a/f2s/resources/murano-db/meta.yaml b/f2s/resources/murano-db/meta.yaml new file mode 100644 index 00000000..a2ebcfdc --- /dev/null +++ b/f2s/resources/murano-db/meta.yaml @@ -0,0 +1,22 @@ +id: murano-db +handler: puppetv2 +version: '8.0' +inputs: + database_vip: + value: null + fqdn: + value: null + management_vip: + value: null + murano: + value: null + murano_hash: + value: null + mysql_hash: + value: null + node_name: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/murano-keystone/actions/run.pp b/f2s/resources/murano-keystone/actions/run.pp new file mode 100644 index 00000000..4d132d74 --- /dev/null +++ b/f2s/resources/murano-keystone/actions/run.pp @@ -0,0 +1,36 @@ +notice('MODULAR: murano/keystone.pp') + +$murano_hash = hiera_hash('murano_hash', {}) +$public_ip = hiera('public_vip') +$management_ip = hiera('management_vip') +$public_ssl = hiera('public_ssl') +$region = hiera('region', 'RegionOne') + + +$public_protocol = $public_ssl['services'] ? { + true => 'https', + default => 'http', +} + +$public_address = $public_ssl['services'] ? { + true => $public_ssl['hostname'], + default => $public_ip, +} + +$api_bind_port = '8082' + +$tenant = pick($murano_hash['tenant'], 'services') +$public_url = "${public_protocol}://${public_address}:${api_bind_port}" +$admin_url = "http://${management_ip}:${api_bind_port}" + +################################################################# + +class { 'murano::keystone::auth': + password => $murano_hash['user_password'], + service_type => 'application_catalog', + region => $region, + tenant => $tenant, + public_url => $public_url, + admin_url => $admin_url, + internal_url => $admin_url, +} diff --git a/f2s/resources/murano-keystone/meta.yaml b/f2s/resources/murano-keystone/meta.yaml new file mode 100644 index 00000000..1283949d --- /dev/null +++ b/f2s/resources/murano-keystone/meta.yaml @@ -0,0 +1,22 @@ +id: murano-keystone +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + management_vip: + value: null + murano_hash: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null + service_endpoint: + value: null diff --git a/f2s/resources/murano/actions/run.pp b/f2s/resources/murano/actions/run.pp new file mode 100644 index 00000000..90750555 --- /dev/null +++ b/f2s/resources/murano/actions/run.pp @@ -0,0 +1,163 @@ +notice('MODULAR: murano.pp') + +prepare_network_config(hiera('network_scheme', {})) + +$murano_hash = hiera_hash('murano_hash', {}) +$murano_settings_hash = hiera_hash('murano_settings', {}) +$rabbit_hash = hiera_hash('rabbit_hash', {}) +$heat_hash = hiera_hash('heat_hash', {}) +$neutron_config = hiera_hash('neutron_config', {}) +$node_role = hiera('node_role') +$public_ip = hiera('public_vip') +$database_ip = hiera('database_vip') +$management_ip = hiera('management_vip') +$region = hiera('region', 'RegionOne') +$use_neutron = hiera('use_neutron', false) +$service_endpoint = hiera('service_endpoint') +$syslog_log_facility_murano = hiera('syslog_log_facility_murano') +$debug = pick($murano_hash['debug'], hiera('debug', false)) +$verbose = pick($murano_hash['verbose'], hiera('verbose', true)) +$use_syslog = hiera('use_syslog', true) +$use_stderr = hiera('use_stderr', false) +$rabbit_ha_queues = hiera('rabbit_ha_queues') +$amqp_port = hiera('amqp_port') +$amqp_hosts = hiera('amqp_hosts') +$public_ssl = hiera_hash('public_ssl', {}) + +################################################################# + +if $murano_hash['enabled'] { + $public_protocol = pick($public_ssl['services'], false) ? { + true => 'https', + default => 'http', + } + + $public_address = pick($public_ssl['services'], false) ? { + true => pick($public_ssl['hostname']), + default => $public_ip, + } + + $firewall_rule = '202 murano-api' + + $api_bind_port = '8082' + $api_bind_host = get_network_role_property('murano/api', 'ipaddr') + + $murano_user = pick($murano_hash['user'], 'murano') + $tenant = pick($murano_hash['tenant'], 'services') + $internal_url = "http://${api_bind_host}:${api_bind_port}" + $db_user = pick($murano_hash['db_user'], 'murano') + $db_name = pick($murano_hash['db_name'], 'murano') + $db_password = pick($murano_hash['db_password']) + $db_host = pick($murano_hash['db_host'], $database_ip) + $read_timeout = '60' + $sql_connection = "mysql://${db_user}:${db_password}@${db_host}/${db_name}?read_timeout=${read_timeout}" + + $external_network = $use_neutron ? { + true => get_ext_net_name($neutron_config['predefined_networks']), + default => undef, + } + + $repository_url = has_key($murano_settings_hash, 'murano_repo_url') ? { + true => $murano_settings_hash['murano_repo_url'], + default => 'http://storage.apps.openstack.org', + } + + ####### Disable upstart startup on install ####### + tweaks::ubuntu_service_override { ['murano-api', 'murano-engine']: + package_name => 'murano', + } + + firewall { $firewall_rule : + dport => $api_bind_port, + proto => 'tcp', + action => 'accept', + } + + class { 'murano' : + verbose => $verbose, + debug => $debug, + use_syslog => $use_syslog, + use_stderr => $use_stderr, + log_facility => $syslog_log_facility_murano, + database_connection => $sql_connection, + keystone_uri => "${public_protocol}://${public_address}:5000/v2.0/", + keystone_username => $murano_user, + keystone_password => $murano_hash['user_password'], + keystone_tenant => $tenant, + identity_uri => "http://${service_endpoint}:35357/", + use_neutron => $use_neutron, + rabbit_os_user => $rabbit_hash['user'], + rabbit_os_password => $rabbit_hash['password'], + rabbit_os_port => $amqp_port, + rabbit_os_hosts => split($amqp_hosts, ','), + rabbit_ha_queues => $rabbit_ha_queues, + rabbit_own_host => $public_ip, + rabbit_own_port => '55572', + rabbit_own_user => 'murano', + rabbit_own_password => $heat_hash['rabbit_password'], + service_host => $api_bind_host, + service_port => $api_bind_port, + external_network => $external_network, + } + + class { 'murano::api': + host => $api_bind_host, + port => $api_bind_port, + } + + class { 'murano::engine': } + + class { 'murano::client': } + + class { 'murano::dashboard': + api_url => $internal_url, + repo_url => $repository_url, + } + + class { 'murano::rabbitmq': + rabbit_user => 'murano', + rabbit_password => $heat_hash['rabbit_password'], + rabbit_port => '55572', + } + + $haproxy_stats_url = "http://${management_ip}:10000/;csv" + + haproxy_backend_status { 'murano-api' : + name => 'murano-api', + url => $haproxy_stats_url, + } + + if ($node_role == 'primary-controller') { + haproxy_backend_status { 'keystone-public' : + name => 'keystone-1', + url => $haproxy_stats_url, + } + + haproxy_backend_status { 'keystone-admin' : + name => 'keystone-2', + url => $haproxy_stats_url, + } + + murano::application { 'io.murano' : + os_tenant_name => $tenant, + os_username => $murano_user, + os_password => $murano_hash['user_password'], + os_auth_url => "${public_protocol}://${public_address}:5000/v2.0/", + os_region => $region, + mandatory => true, + } + + Haproxy_backend_status['keystone-admin'] -> Haproxy_backend_status['murano-api'] + Haproxy_backend_status['keystone-public'] -> Haproxy_backend_status['murano-api'] + Haproxy_backend_status['murano-api'] -> Murano::Application['io.murano'] + + Service['murano-api'] -> Murano::Application<| mandatory == true |> + } + + Firewall[$firewall_rule] -> Class['murano::api'] + Service['murano-api'] -> Haproxy_backend_status['murano-api'] +} +######################### + +class openstack::firewall {} +include openstack::firewall diff --git a/f2s/resources/murano/meta.yaml b/f2s/resources/murano/meta.yaml new file mode 100644 index 00000000..4482611a --- /dev/null +++ b/f2s/resources/murano/meta.yaml @@ -0,0 +1,56 @@ +id: murano +handler: puppetv2 +version: '8.0' +inputs: + amqp_hosts: + value: null + amqp_port: + value: null + database_vip: + value: null + debug: + value: null + fqdn: + value: null + heat_hash: + value: null + management_vip: + value: null + murano: + value: null + murano_hash: + value: null + murano_settings: + value: null + network_scheme: + value: null + neutron_config: + value: null + node_role: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + rabbit_ha_queues: + value: null + rabbit_hash: + value: null + region: + value: null + role: + value: null + service_endpoint: + value: null + syslog_log_facility_murano: + value: null + use_neutron: + value: null + use_stderr: + value: null + use_syslog: + value: null + verbose: + value: null diff --git a/f2s/resources/netconfig/actions/run.pp b/f2s/resources/netconfig/actions/run.pp new file mode 100644 index 00000000..7494a336 --- /dev/null +++ b/f2s/resources/netconfig/actions/run.pp @@ -0,0 +1,106 @@ +notice('MODULAR: netconfig.pp') + +$network_scheme = hiera('network_scheme') + +if ( $::l23_os =~ /(?i:centos6)/ and $::kernelmajversion == '3.10' ) { + $ovs_datapath_package_name = 'kmod-openvswitch-lt' +} + +class { 'l23network' : + use_ovs => hiera('use_ovs', false), + use_ovs_dkms_datapath_module => $::l23_os ? { + /(?i:redhat7|centos7)/ => false, + default => true + }, + ovs_datapath_package_name => $ovs_datapath_package_name, +} +prepare_network_config($network_scheme) +$sdn = generate_network_config() +notify {'SDN': message => $sdn } + +#Set arp_accept to 1 by default #lp1456272 +sysctl::value { 'net.ipv4.conf.all.arp_accept': value => '1' } +sysctl::value { 'net.ipv4.conf.default.arp_accept': value => '1' } + +# setting kernel reserved ports +# defaults are 49000,49001,35357,41055,41056,58882 +class { 'openstack::reserved_ports': } + +### TCP connections keepalives and failover related parameters ### +# configure TCP keepalive for host OS. +# Send 3 probes each 8 seconds, if the connection was idle +# for a 30 seconds. Consider it dead, if there was no responces +# during the check time frame, i.e. 30+3*8=54 seconds overall. +# (note: overall check time frame should be lower then +# nova_report_interval). +class { 'openstack::keepalive' : + tcpka_time => '30', + tcpka_probes => '8', + tcpka_intvl => '3', + tcp_retries2 => '5', +} + +# increase network backlog for performance on fast networks +sysctl::value { 'net.core.netdev_max_backlog': value => '261144' } + +L2_port<||> -> Sysfs_config_value<||> +L3_ifconfig<||> -> Sysfs_config_value<||> +L3_route<||> -> Sysfs_config_value<||> + +class { 'sysfs' :} + +if hiera('set_rps', true) { + sysfs_config_value { 'rps_cpus' : + ensure => 'present', + name => '/etc/sysfs.d/rps_cpus.conf', + value => cpu_affinity_hex($::processorcount), + sysfs => '/sys/class/net/*/queues/rx-*/rps_cpus', + exclude => '/sys/class/net/lo/*', + } +} + +if hiera('set_xps', true) { + sysfs_config_value { 'xps_cpus' : + ensure => 'present', + name => '/etc/sysfs.d/xps_cpus.conf', + value => cpu_affinity_hex($::processorcount), + sysfs => '/sys/class/net/*/queues/tx-*/xps_cpus', + exclude => '/sys/class/net/lo/*', + } +} + +if !defined(Package['irqbalance']) { + package { 'irqbalance': + ensure => installed, + } +} + +if !defined(Service['irqbalance']) { + service { 'irqbalance': + ensure => running, + require => Package['irqbalance'], + } +} + +# We need to wait at least 30 seconds for the bridges and other interfaces to +# come up after being created. This should allow for all interfaces to be up +# and ready for traffic before proceeding with further deploy steps. LP#1458954 +exec { 'wait-for-interfaces': + path => '/usr/bin:/bin', + command => 'sleep 32', +} + +# check that network was configured successfully +# and the default gateway is online +$default_gateway = hiera('default_gateway') + +ping_host { $default_gateway : + ensure => 'up', +} +L2_port<||> -> Ping_host[$default_gateway] +L2_bond<||> -> Ping_host[$default_gateway] +L3_ifconfig<||> -> Ping_host[$default_gateway] +L3_route<||> -> Ping_host[$default_gateway] + +Class['l23network'] -> +Exec['wait-for-interfaces'] diff --git a/f2s/resources/netconfig/meta.yaml b/f2s/resources/netconfig/meta.yaml new file mode 100644 index 00000000..695c9e22 --- /dev/null +++ b/f2s/resources/netconfig/meta.yaml @@ -0,0 +1,24 @@ +id: netconfig +handler: puppetv2 +version: '8.0' +inputs: + default_gateway: + value: null + fqdn: + value: null + network_metadata: + value: null + network_scheme: + value: null + puppet_modules: + value: null + role: + value: null + set_rps: + value: null + set_xps: + value: null + use_neutron: + value: null + use_ovs: + value: null diff --git a/f2s/resources/neutron-db/actions/run.pp b/f2s/resources/neutron-db/actions/run.pp new file mode 100644 index 00000000..8cdbae15 --- /dev/null +++ b/f2s/resources/neutron-db/actions/run.pp @@ -0,0 +1,59 @@ +notice('MODULAR: openstack-network/db.pp') + +$node_name = hiera('node_name') +$use_neutron = hiera('use_neutron', false) +$neutron_hash = hiera_hash('quantum_settings', {}) +$mysql_hash = hiera_hash('mysql', {}) +$management_vip = hiera('management_vip', undef) +$database_vip = hiera('database_vip', undef) + +$mysql_root_user = pick($mysql_hash['root_user'], 'root') +$mysql_db_create = pick($mysql_hash['db_create'], true) +$mysql_root_password = $mysql_hash['root_password'] + +$neutron_db = merge($neutron_hash['database'], {}) + +$db_user = pick($neutron_db['db_user'], 'neutron') +$db_name = pick($neutron_db['db_name'], 'neutron') +$db_password = pick($neutron_db['passwd'], $mysql_root_password) + +$db_host = pick($neutron_db['db_host'], $database_vip) +$db_create = pick($neutron_db['db_create'], $mysql_db_create) +$db_root_user = pick($neutron_db['root_user'], $mysql_root_user) +$db_root_password = pick($neutron_db['root_password'], $mysql_root_password) + +$allowed_hosts = [ $node_name, 'localhost', '127.0.0.1', '%' ] + +validate_string($mysql_root_user) + +if $use_neutron and $db_create { + + class { 'galera::client': + custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), + } + + class { 'neutron::db::mysql': + user => $db_user, + password => $db_password, + dbname => $db_name, + allowed_hosts => $allowed_hosts, + } + + class { 'osnailyfacter::mysql_access': + db_host => $db_host, + db_user => $db_root_user, + db_password => $db_root_password, + } + + Class['galera::client'] -> + Class['osnailyfacter::mysql_access'] -> + Class['neutron::db::mysql'] + +} + +# =========================================================================== + +class mysql::config {} +include mysql::config +class mysql::server {} +include mysql::server diff --git a/f2s/resources/neutron-db/meta.yaml b/f2s/resources/neutron-db/meta.yaml new file mode 100644 index 00000000..bf93b0e4 --- /dev/null +++ b/f2s/resources/neutron-db/meta.yaml @@ -0,0 +1,26 @@ +id: neutron-db +handler: puppetv2 +version: '8.0' +inputs: + database_vip: + value: null + fqdn: + value: null + management_vip: + value: null + mysql: + value: null + mysql_custom_setup_class: + value: null + neutron_db_password: + value: null + node_name: + value: null + puppet_modules: + value: null + quantum_settings: + value: null + role: + value: null + use_neutron: + value: null diff --git a/f2s/resources/neutron-keystone/actions/run.pp b/f2s/resources/neutron-keystone/actions/run.pp new file mode 100644 index 00000000..937b42b7 --- /dev/null +++ b/f2s/resources/neutron-keystone/actions/run.pp @@ -0,0 +1,50 @@ +notice('MODULAR: openstack-network/keystone.pp') + +$use_neutron = hiera('use_neutron', false) +$neutron_hash = hiera_hash('quantum_settings', {}) +$public_vip = hiera('public_vip') +$public_ssl_hash = hiera('public_ssl') +$public_address = $public_ssl_hash['services'] ? { + true => $public_ssl_hash['hostname'], + default => $public_vip, +} +$public_protocol = $public_ssl_hash['services'] ? { + true => 'https', + default => 'http', +} +$admin_address = hiera('management_vip') +$admin_protocol = 'http' +$region = pick($neutron_hash['region'], hiera('region', 'RegionOne')) + +$password = $neutron_hash['keystone']['admin_password'] +$auth_name = pick($neutron_hash['auth_name'], 'neutron') +$configure_endpoint = pick($neutron_hash['configure_endpoint'], true) +$configure_user = pick($neutron_hash['configure_user'], true) +$configure_user_role = pick($neutron_hash['configure_user_role'], true) +$service_name = pick($neutron_hash['service_name'], 'neutron') +$tenant = pick($neutron_hash['tenant'], 'services') + +$port = '9696' + +$public_url = "${public_protocol}://${public_address}:${port}" +$internal_url = "${admin_protocol}://${admin_address}:${port}" +$admin_url = "${admin_protocol}://${admin_address}:${port}" + + +validate_string($public_address) +validate_string($password) + +if $use_neutron { + class { '::neutron::keystone::auth': + password => $password, + auth_name => $auth_name, + configure_endpoint => $configure_endpoint, + configure_user => $configure_user, + configure_user_role => $configure_user_role, + service_name => $service_name, + public_url => $public_url, + internal_url => $internal_url, + admin_url => $admin_url, + region => $region, + } +} diff --git a/f2s/resources/neutron-keystone/meta.yaml b/f2s/resources/neutron-keystone/meta.yaml new file mode 100644 index 00000000..9f3a3479 --- /dev/null +++ b/f2s/resources/neutron-keystone/meta.yaml @@ -0,0 +1,22 @@ +id: neutron-keystone +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + management_vip: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + quantum_settings: + value: null + region: + value: null + role: + value: null + use_neutron: + value: null diff --git a/f2s/resources/nova-db/actions/run.pp b/f2s/resources/nova-db/actions/run.pp new file mode 100644 index 00000000..88470175 --- /dev/null +++ b/f2s/resources/nova-db/actions/run.pp @@ -0,0 +1,53 @@ +notice('MODULAR: openstack-controller/db.pp') + +$nova_hash = hiera_hash('nova', {}) +$mysql_hash = hiera_hash('mysql', {}) +$management_vip = hiera('management_vip', undef) +$database_vip = hiera('database_vip', undef) + +$mysql_root_user = pick($mysql_hash['root_user'], 'root') +$mysql_db_create = pick($mysql_hash['db_create'], true) +$mysql_root_password = $mysql_hash['root_password'] + +$db_user = pick($nova_hash['db_user'], 'nova') +$db_name = pick($nova_hash['db_name'], 'nova') +$db_password = pick($nova_hash['db_password'], $mysql_root_password) + +$db_host = pick($nova_hash['db_host'], $database_vip) +$db_create = pick($nova_hash['db_create'], $mysql_db_create) +$db_root_user = pick($nova_hash['root_user'], $mysql_root_user) +$db_root_password = pick($nova_hash['root_password'], $mysql_root_password) + +$allowed_hosts = [ $::hostname, 'localhost', '127.0.0.1', '%' ] + +validate_string($mysql_root_user) + +if $db_create { + + class { 'galera::client': + custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), + } + + class { 'nova::db::mysql': + user => $db_user, + password => $db_password, + dbname => $db_name, + allowed_hosts => $allowed_hosts, + } + + class { 'osnailyfacter::mysql_access': + db_host => $db_host, + db_user => $db_root_user, + db_password => $db_root_password, + } + + Class['galera::client'] -> + Class['osnailyfacter::mysql_access'] -> + Class['nova::db::mysql'] + +} + +class mysql::config {} +include mysql::config +class mysql::server {} +include mysql::server diff --git a/f2s/resources/nova-db/meta.yaml b/f2s/resources/nova-db/meta.yaml new file mode 100644 index 00000000..7335e140 --- /dev/null +++ b/f2s/resources/nova-db/meta.yaml @@ -0,0 +1,20 @@ +id: nova-db +handler: puppetv2 +version: '8.0' +inputs: + database_vip: + value: null + fqdn: + value: null + management_vip: + value: null + mysql: + value: null + mysql_custom_setup_class: + value: null + nova: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/nova-keystone/actions/run.pp b/f2s/resources/nova-keystone/actions/run.pp new file mode 100644 index 00000000..0f5a4fc8 --- /dev/null +++ b/f2s/resources/nova-keystone/actions/run.pp @@ -0,0 +1,56 @@ +notice('MODULAR: openstack-controller/keystone.pp') + +$nova_hash = hiera_hash('nova', {}) +$public_vip = hiera('public_vip') +$public_ssl_hash = hiera('public_ssl') +$public_address = $public_ssl_hash['services'] ? { + true => $public_ssl_hash['hostname'], + default => $public_vip, +} +$public_protocol = $public_ssl_hash['services'] ? { + true => 'https', + default => 'http', +} +$admin_protocol = 'http' +$admin_address = hiera('management_vip') +$region = pick($nova_hash['region'], hiera('region', 'RegionOne')) + +$password = $nova_hash['user_password'] +$auth_name = pick($nova_hash['auth_name'], 'nova') +$configure_endpoint = pick($nova_hash['configure_endpoint'], true) +$configure_user = pick($nova_hash['configure_user'], true) +$configure_user_role = pick($nova_hash['configure_user_role'], true) +$service_name = pick($nova_hash['service_name'], 'nova') +$tenant = pick($nova_hash['tenant'], 'services') + +$compute_port = '8774' +$public_base_url = "${public_protocol}://${public_address}:${compute_port}" +$admin_base_url = "${admin_protocol}://${admin_address}:${compute_port}" + +$ec2_port = '8773' +$ec2_public_url = "${public_protocol}://${public_address}:${ec2_port}/services/Cloud" +$ec2_internal_url = "${admin_protocol}://${admin_address}:${ec2_port}/services/Cloud" +$ec2_admin_url = "${admin_protocol}://${admin_address}:${ec2_port}/services/Admin" + +validate_string($public_address) +validate_string($password) + +class { '::nova::keystone::auth': + password => $password, + auth_name => $auth_name, + configure_endpoint => $configure_endpoint, + configure_endpoint_v3 => $configure_endpoint, + configure_user => $configure_user, + configure_user_role => $configure_user_role, + service_name => $service_name, + public_url => "${public_base_url}/v2/%(tenant_id)s", + public_url_v3 => "${public_base_url}/v3", + internal_url => "${admin_base_url}/v2/%(tenant_id)s", + internal_url_v3 => "${admin_base_url}/v3", + admin_url => "${admin_base_url}/v2/%(tenant_id)s", + admin_url_v3 => "${admin_base_url}/v3", + region => $region, + ec2_public_url => $ec2_public_url, + ec2_internal_url => $ec2_internal_url, + ec2_admin_url => $ec2_admin_url, +} diff --git a/f2s/resources/nova-keystone/meta.yaml b/f2s/resources/nova-keystone/meta.yaml new file mode 100644 index 00000000..58021f68 --- /dev/null +++ b/f2s/resources/nova-keystone/meta.yaml @@ -0,0 +1,20 @@ +id: nova-keystone +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + management_vip: + value: null + nova: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null diff --git a/f2s/resources/ntp-check/actions/run.pp b/f2s/resources/ntp-check/actions/run.pp new file mode 100644 index 00000000..193e64f2 --- /dev/null +++ b/f2s/resources/ntp-check/actions/run.pp @@ -0,0 +1,6 @@ +notice('MODULAR: ntp-check.pp') +# get the ntp configuration from hiera +$ntp_servers = hiera('external_ntp') +# take the comma seperated list and turn it into an array of servers and then +# pass it to the ntp_available function to check that at least 1 server works +ntp_available(strip(split($ntp_servers['ntp_list'], ','))) diff --git a/f2s/resources/ntp-check/meta.yaml b/f2s/resources/ntp-check/meta.yaml new file mode 100644 index 00000000..a25929c7 --- /dev/null +++ b/f2s/resources/ntp-check/meta.yaml @@ -0,0 +1,12 @@ +id: ntp-check +handler: puppetv2 +version: '8.0' +inputs: + external_ntp: + value: null + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/ntp-client/actions/run.pp b/f2s/resources/ntp-client/actions/run.pp new file mode 100644 index 00000000..38a37f2b --- /dev/null +++ b/f2s/resources/ntp-client/actions/run.pp @@ -0,0 +1,26 @@ +notice('MODULAR: ntp-client.pp') + +$management_vrouter_vip = hiera('management_vrouter_vip') +$ntp_servers = hiera_array('ntp_servers', [$management_vrouter_vip]) +$nodes_hash = hiera('nodes', {}) +$roles = node_roles($nodes_hash, hiera('uid')) + +if !(member($roles, 'controller') or member($roles, 'primary-controller')) { + class { 'ntp': + servers => $ntp_servers, + service_ensure => 'running', + service_enable => true, + disable_monitor => true, + iburst_enable => true, + tinker => true, + panic => '0', + stepout => '5', + minpoll => '3', + } + + include ntp::params + tweaks::ubuntu_service_override { 'ntpd': + package_name => $ntp::params::package_name, + service_name => $ntp::params::service_name, + } +} diff --git a/f2s/resources/ntp-client/meta.yaml b/f2s/resources/ntp-client/meta.yaml new file mode 100644 index 00000000..568e2f2e --- /dev/null +++ b/f2s/resources/ntp-client/meta.yaml @@ -0,0 +1,10 @@ +id: ntp-client +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/ntp-server/actions/run.pp b/f2s/resources/ntp-server/actions/run.pp new file mode 100644 index 00000000..6055f681 --- /dev/null +++ b/f2s/resources/ntp-server/actions/run.pp @@ -0,0 +1,31 @@ +notice('MODULAR: ntp-server.pp') + +$ntp_servers = hiera('external_ntp') + +class { 'ntp': + servers => strip(split($ntp_servers['ntp_list'], ',')), + service_enable => true, + service_ensure => 'running', + disable_monitor => true, + iburst_enable => true, + tinker => true, + panic => '0', + stepout => '5', + minpoll => '3', + restrict => [ + '-4 default kod nomodify notrap nopeer noquery', + '-6 default kod nomodify notrap nopeer noquery', + '127.0.0.1', + '::1', + ], +} + +class { 'cluster::ntp_ocf': } + +if $::operatingsystem == 'Ubuntu' { + include ntp::params + tweaks::ubuntu_service_override { 'ntpd': + package_name => $ntp::params::package_name, + service_name => $ntp::params::service_name, + } +} diff --git a/f2s/resources/ntp-server/meta.yaml b/f2s/resources/ntp-server/meta.yaml new file mode 100644 index 00000000..78918ad7 --- /dev/null +++ b/f2s/resources/ntp-server/meta.yaml @@ -0,0 +1,12 @@ +id: ntp-server +handler: puppetv2 +version: '8.0' +inputs: + external_ntp: + value: null + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/openstack-cinder/actions/run.pp b/f2s/resources/openstack-cinder/actions/run.pp new file mode 100644 index 00000000..7ea72770 --- /dev/null +++ b/f2s/resources/openstack-cinder/actions/run.pp @@ -0,0 +1,107 @@ +notice('MODULAR: openstack-cinder.pp') + +#Network stuff +prepare_network_config(hiera('network_scheme', {})) +$cinder_hash = hiera_hash('cinder_hash', {}) +$management_vip = hiera('management_vip') +$queue_provider = hiera('queue_provider', 'rabbitmq') +$cinder_volume_group = hiera('cinder_volume_group', 'cinder') +$nodes_hash = hiera('nodes', {}) +$storage_hash = hiera_hash('storage', {}) +$ceilometer_hash = hiera_hash('ceilometer_hash',{}) +$rabbit_hash = hiera_hash('rabbit_hash', {}) +$service_endpoint = hiera('service_endpoint') +$service_workers = pick($cinder_hash['workers'], + min(max($::processorcount, 2), 16)) + +$cinder_db_password = $cinder_hash[db_password] +$cinder_user_password = $cinder_hash[user_password] +$keystone_user = pick($cinder_hash['user'], 'cinder') +$keystone_tenant = pick($cinder_hash['tenant'], 'services') +$region = hiera('region', 'RegionOne') +$db_host = pick($cinder_hash['db_host'], hiera('database_vip')) +$cinder_db_user = pick($cinder_hash['db_user'], 'cinder') +$cinder_db_name = pick($cinder_hash['db_name'], 'cinder') +$roles = node_roles($nodes_hash, hiera('uid')) +$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") + +# Determine who should get the volume service +if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) { + $manage_volumes = 'iscsi' +} elsif ($storage_hash['volumes_ceph']) { + $manage_volumes = 'ceph' +} elsif member($roles, 'cinder-vmware') { + $manage_volumes = 'vmdk' +} else { + $manage_volumes = false +} + +# SQLAlchemy backend configuration +$max_pool_size = min($::processorcount * 5 + 0, 30 + 0) +$max_overflow = min($::processorcount * 5 + 0, 60 + 0) +$max_retries = '-1' +$idle_timeout = '3600' + +$keystone_auth_protocol = 'http' +$keystone_auth_host = $service_endpoint +$service_port = '5000' +$auth_uri = "${keystone_auth_protocol}://${keystone_auth_host}:${service_port}/" +$identity_uri = "${keystone_auth_protocol}://${keystone_auth_host}:${service_port}/" + +$openstack_version = { + 'keystone' => 'installed', + 'glance' => 'installed', + 'horizon' => 'installed', + 'nova' => 'installed', + 'novncproxy' => 'installed', + 'cinder' => 'installed', +} + +######### Cinder Controller Services ######## +class {'openstack::cinder': + sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_name}?charset=utf8&read_timeout=60", + queue_provider => $queue_provider, + amqp_hosts => hiera('amqp_hosts',''), + amqp_user => $rabbit_hash['user'], + amqp_password => $rabbit_hash['password'], + rabbit_ha_queues => true, + volume_group => $cinder_volume_group, + physical_volume => undef, + manage_volumes => $manage_volumes, + enabled => true, + glance_api_servers => $glance_api_servers, + auth_host => $service_endpoint, + bind_host => get_network_role_property('cinder/api', 'ipaddr'), + iscsi_bind_host => get_network_role_property('cinder/iscsi', 'ipaddr'), + keystone_user => $keystone_user, + keystone_tenant => $keystone_tenant, + auth_uri => $auth_uri, + region => $region, + identity_uri => $identity_uri, + cinder_user_password => $cinder_user_password, + use_syslog => hiera('use_syslog', true), + use_stderr => hiera('use_stderr', false), + verbose => pick($cinder_hash['verbose'], hiera('verbose', true)), + debug => pick($cinder_hash['debug'], hiera('debug', true)), + syslog_log_facility => hiera('syslog_log_facility_cinder', 'LOG_LOCAL3'), + cinder_rate_limits => hiera('cinder_rate_limits'), + max_retries => $max_retries, + max_pool_size => $max_pool_size, + max_overflow => $max_overflow, + idle_timeout => $idle_timeout, + ceilometer => $ceilometer_hash[enabled], + service_workers => $service_workers, +} # end class + +####### Disable upstart startup on install ####### +if($::operatingsystem == 'Ubuntu') { + tweaks::ubuntu_service_override { 'cinder-api': + package_name => 'cinder-api', + } + tweaks::ubuntu_service_override { 'cinder-backup': + package_name => 'cinder-backup', + } + tweaks::ubuntu_service_override { 'cinder-scheduler': + package_name => 'cinder-scheduler', + } +} diff --git a/f2s/resources/openstack-cinder/meta.yaml b/f2s/resources/openstack-cinder/meta.yaml new file mode 100644 index 00000000..5b0ade7a --- /dev/null +++ b/f2s/resources/openstack-cinder/meta.yaml @@ -0,0 +1,56 @@ +id: openstack-cinder +handler: puppetv2 +version: '8.0' +inputs: + amqp_hosts: + value: null + ceilometer_hash: + value: null + cinder: + value: null + cinder_hash: + value: null + cinder_rate_limits: + value: null + cinder_volume_group: + value: null + database_vip: + value: null + debug: + value: null + fqdn: + value: null + glance_api_servers: + value: null + management_vip: + value: null + network_scheme: + value: null + nodes: + value: null + puppet_modules: + value: null + queue_provider: + value: null + rabbit_ha_queues: + value: null + rabbit_hash: + value: null + region: + value: null + role: + value: null + service_endpoint: + value: null + storage: + value: null + syslog_log_facility_cinder: + value: null + uid: + value: null + use_stderr: + value: null + use_syslog: + value: null + verbose: + value: null diff --git a/f2s/resources/openstack-controller/actions/run.pp b/f2s/resources/openstack-controller/actions/run.pp new file mode 100644 index 00000000..9406d362 --- /dev/null +++ b/f2s/resources/openstack-controller/actions/run.pp @@ -0,0 +1,233 @@ +notice('MODULAR: openstack-controller.pp') + +$network_scheme = hiera_hash('network_scheme', {}) +$network_metadata = hiera_hash('network_metadata', {}) +prepare_network_config($network_scheme) + +$nova_rate_limits = hiera('nova_rate_limits') +$primary_controller = hiera('primary_controller') +$use_neutron = hiera('use_neutron', false) +$nova_report_interval = hiera('nova_report_interval') +$nova_service_down_time = hiera('nova_service_down_time') +$use_syslog = hiera('use_syslog', true) +$use_stderr = hiera('use_stderr', false) +$syslog_log_facility_glance = hiera('syslog_log_facility_glance', 'LOG_LOCAL2') +$syslog_log_facility_neutron = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4') +$syslog_log_facility_nova = hiera('syslog_log_facility_nova','LOG_LOCAL6') +$syslog_log_facility_keystone = hiera('syslog_log_facility_keystone', 'LOG_LOCAL7') +$management_vip = hiera('management_vip') +$public_vip = hiera('public_vip') +$sahara_hash = hiera_hash('sahara', {}) +$nodes_hash = hiera('nodes', {}) +$mysql_hash = hiera_hash('mysql', {}) +$access_hash = hiera_hash('access', {}) +$keystone_hash = hiera_hash('keystone', {}) +$glance_hash = hiera_hash('glance', {}) +$storage_hash = hiera_hash('storage', {}) +$nova_hash = hiera_hash('nova', {}) +$nova_config_hash = hiera_hash('nova_config', {}) +$api_bind_address = get_network_role_property('nova/api', 'ipaddr') +$rabbit_hash = hiera_hash('rabbit_hash', {}) +$ceilometer_hash = hiera_hash('ceilometer',{}) +$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') +$workloads_hash = hiera_hash('workloads_collector', {}) +$service_endpoint = hiera('service_endpoint') +$db_host = pick($nova_hash['db_host'], hiera('database_vip')) +$nova_db_user = pick($nova_hash['db_user'], 'nova') +$keystone_user = pick($nova_hash['user'], 'nova') +$keystone_tenant = pick($nova_hash['tenant'], 'services') +$glance_api_servers = hiera('glance_api_servers', "$management_vip:9292") +$region = hiera('region', 'RegionOne') +$service_workers = pick($nova_hash['workers'], + min(max($::processorcount, 2), 16)) +$ironic_hash = hiera_hash('ironic', {}) + +$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles')) +$memcache_ipaddrs = ipsort(values(get_node_to_ipaddr_map_by_network_role($memcache_nodes,'mgmt/memcache'))) +$roles = node_roles($nodes_hash, hiera('uid')) +$openstack_controller_hash = hiera_hash('openstack_controller', {}) + +$floating_hash = {} + +if $use_neutron { + $network_provider = 'neutron' + $novanetwork_params = {} + $neutron_config = hiera_hash('quantum_settings') + $neutron_db_password = $neutron_config['database']['passwd'] + $neutron_user_password = $neutron_config['keystone']['admin_password'] + $neutron_metadata_proxy_secret = $neutron_config['metadata']['metadata_proxy_shared_secret'] + $base_mac = $neutron_config['L2']['base_mac'] +} else { + $network_provider = 'nova' + $floating_ips_range = hiera('floating_network_range') + $neutron_config = {} + $novanetwork_params = hiera('novanetwork_parameters') +} + +# SQLAlchemy backend configuration +$max_pool_size = min($::processorcount * 5 + 0, 30 + 0) +$max_overflow = min($::processorcount * 5 + 0, 60 + 0) +$max_retries = '-1' +$idle_timeout = '3600' + +# TODO: openstack_version is confusing, there's such string var in hiera and hardcoded hash +$hiera_openstack_version = hiera('openstack_version') +$openstack_version = { + 'keystone' => 'installed', + 'glance' => 'installed', + 'horizon' => 'installed', + 'nova' => 'installed', + 'novncproxy' => 'installed', + 'cinder' => 'installed', +} + +################################################################# +if hiera('use_vcenter', false) or hiera('libvirt_type') == 'vcenter' { + $multi_host = false +} else { + $multi_host = true +} + +class { '::openstack::controller': + private_interface => $use_neutron ? { true=>false, default=>hiera('private_int')}, + public_interface => hiera('public_int', undef), + public_address => $public_vip, # It is feature for HA mode. + internal_address => $management_vip, # All internal traffic goes + admin_address => $management_vip, # through load balancer. + floating_range => $use_neutron ? { true =>$floating_hash, default =>false}, + fixed_range => $use_neutron ? { true =>false, default =>hiera('fixed_network_range')}, + multi_host => $multi_host, + network_config => hiera('network_config', {}), + num_networks => hiera('num_networks', undef), + network_size => hiera('network_size', undef), + network_manager => hiera('network_manager', undef), + network_provider => $network_provider, + verbose => pick($openstack_controller_hash['verbose'], true), + debug => pick($openstack_controller_hash['debug'], hiera('debug', true)), + auto_assign_floating_ip => hiera('auto_assign_floating_ip', false), + glance_api_servers => $glance_api_servers, + primary_controller => $primary_controller, + novnc_address => $api_bind_address, + nova_db_user => $nova_db_user, + nova_db_password => $nova_hash[db_password], + nova_user => $keystone_user, + nova_user_password => $nova_hash[user_password], + nova_user_tenant => $keystone_tenant, + nova_hash => $nova_hash, + queue_provider => 'rabbitmq', + amqp_hosts => hiera('amqp_hosts',''), + amqp_user => $rabbit_hash['user'], + amqp_password => $rabbit_hash['password'], + rabbit_ha_queues => true, + cache_server_ip => $memcache_ipaddrs, + api_bind_address => $api_bind_address, + db_host => $db_host, + service_endpoint => $service_endpoint, + neutron_metadata_proxy_secret => $neutron_metadata_proxy_secret, + cinder => true, + ceilometer => $ceilometer_hash[enabled], + service_workers => $service_workers, + use_syslog => $use_syslog, + use_stderr => $use_stderr, + syslog_log_facility_nova => $syslog_log_facility_nova, + nova_rate_limits => $nova_rate_limits, + nova_report_interval => $nova_report_interval, + nova_service_down_time => $nova_service_down_time, + ha_mode => true, + # SQLALchemy backend + max_retries => $max_retries, + max_pool_size => $max_pool_size, + max_overflow => $max_overflow, + idle_timeout => $idle_timeout, +} + +#TODO: PUT this configuration stanza into nova class +nova_config { 'DEFAULT/use_cow_images': value => hiera('use_cow_images')} + +if $primary_controller { + + $haproxy_stats_url = "http://${management_vip}:10000/;csv" + + haproxy_backend_status { 'nova-api' : + name => 'nova-api-2', + url => $haproxy_stats_url, + } + + Openstack::Ha::Haproxy_service <| |> -> Haproxy_backend_status <| |> + + Class['nova::api'] -> Haproxy_backend_status['nova-api'] + + exec { 'create-m1.micro-flavor' : + path => '/sbin:/usr/sbin:/bin:/usr/bin', + environment => [ + "OS_TENANT_NAME=${keystone_tenant}", + "OS_USERNAME=${keystone_user}", + "OS_PASSWORD=${nova_hash['user_password']}", + "OS_AUTH_URL=http://${service_endpoint}:5000/v2.0/", + 'OS_ENDPOINT_TYPE=internalURL', + "OS_REGION_NAME=${region}", + "NOVA_ENDPOINT_TYPE=internalURL", + ], + command => 'bash -c "nova flavor-create --is-public true m1.micro auto 64 0 1"', + #FIXME(mattymo): Upstream bug PUP-2299 for retries in unless/onlyif + unless => 'bash -c "for tries in {1..10}; do nova flavor-list | grep -q m1.micro && exit 0; sleep 2; done"; exit 1', + tries => 10, + try_sleep => 2, + require => Class['nova'], + } + + Haproxy_backend_status <| |> -> Exec<| title == 'create-m1.micro-flavor' |> + + if ! $use_neutron { + nova_floating_range { $floating_ips_range: + ensure => 'present', + pool => 'nova', + username => $access_hash[user], + api_key => $access_hash[password], + auth_method => 'password', + auth_url => "http://${service_endpoint}:5000/v2.0/", + authtenant_name => $access_hash[tenant], + api_retries => 10, + } + Haproxy_backend_status['nova-api'] -> Nova_floating_range <| |> + } +} + +nova_config { + 'DEFAULT/teardown_unused_network_gateway': value => 'True' +} + +if $sahara_hash['enabled'] { + $nova_scheduler_default_filters = [ 'DifferentHostFilter' ] + if $storage_hash['volumes_lvm'] { + $cinder_scheduler_filters = [ 'InstanceLocalityFilter' ] + } else { + $cinder_scheduler_filters = [] + } +} else { + $nova_scheduler_default_filters = [] + $cinder_scheduler_filters = [] +} + +if $ironic_hash['enabled'] { + $scheduler_host_manager = 'nova.scheduler.ironic_host_manager.IronicHostManager' +} + +class { '::nova::scheduler::filter': + cpu_allocation_ratio => pick($nova_hash['cpu_allocation_ratio'], '8.0'), + disk_allocation_ratio => pick($nova_hash['disk_allocation_ratio'], '1.0'), + ram_allocation_ratio => pick($nova_hash['ram_allocation_ratio'], '1.0'), + scheduler_host_subset_size => pick($nova_hash['scheduler_host_subset_size'], '30'), + scheduler_default_filters => concat($nova_scheduler_default_filters, pick($nova_config_hash['default_filters'], [ 'RetryFilter', 'AvailabilityZoneFilter', 'RamFilter', 'CoreFilter', 'DiskFilter', 'ComputeFilter', 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter' ])), + scheduler_host_manager => $scheduler_host_manager, +} + +class { 'cinder::scheduler::filter': + scheduler_default_filters => concat($cinder_scheduler_filters, [ 'AvailabilityZoneFilter', 'CapacityFilter', 'CapabilitiesFilter' ]) +} + +# From logasy filter.pp +nova_config { + 'DEFAULT/ram_weight_multiplier': value => '1.0' +} + diff --git a/f2s/resources/openstack-controller/meta.yaml b/f2s/resources/openstack-controller/meta.yaml new file mode 100644 index 00000000..05b1cf94 --- /dev/null +++ b/f2s/resources/openstack-controller/meta.yaml @@ -0,0 +1,110 @@ +id: openstack-controller +handler: puppetv2 +version: '8.0' +inputs: + access: + value: null + amqp_hosts: + value: null + auto_assign_floating_ip: + value: null + ceilometer: + value: null + database_vip: + value: null + debug: + value: null + fqdn: + value: null + glance: + value: null + glance_api_servers: + value: null + ironic: + value: null + keystone: + value: null + libvirt_type: + value: null + management_vip: + value: null + memcache_roles: + value: null + mysql: + value: null + network_config: + value: null + network_manager: + value: null + network_metadata: + value: null + network_scheme: + value: null + network_size: + value: null + nodes: + value: null + nova: + value: null + nova_config: + value: null + nova_quota: + value: null + nova_rate_limits: + value: null + nova_report_interval: + value: null + nova_service_down_time: + value: null + num_networks: + value: null + openstack_controller: + value: null + openstack_version: + value: null + primary_controller: + value: null + public_int: + value: null + public_vip: + value: null + puppet_modules: + value: null + quantum_settings: + value: null + rabbit_hash: + value: null + region: + value: null + role: + value: null + sahara: + value: null + service_endpoint: + value: null + storage: + value: null + syslog_log_facility_ceph: + value: null + syslog_log_facility_glance: + value: null + syslog_log_facility_keystone: + value: null + syslog_log_facility_neutron: + value: null + syslog_log_facility_nova: + value: null + uid: + value: null + use_cow_images: + value: null + use_neutron: + value: null + use_stderr: + value: null + use_syslog: + value: null + use_vcenter: + value: null + workloads_collector: + value: null diff --git a/f2s/resources/openstack-haproxy-ceilometer/actions/run.pp b/f2s/resources/openstack-haproxy-ceilometer/actions/run.pp new file mode 100644 index 00000000..74edc62e --- /dev/null +++ b/f2s/resources/openstack-haproxy-ceilometer/actions/run.pp @@ -0,0 +1,23 @@ +notice('MODULAR: openstack-haproxy-ceilometer.pp') + +$ceilometer_hash = hiera_hash('ceilometer',{}) +# NOT enabled by default +$use_ceilometer = pick($ceilometer_hash['enabled'], false) +$public_ssl_hash = hiera('public_ssl') +$ceilometer_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceilometer_nodes'), 'ceilometer/api') + +if ($use_ceilometer) { + $server_names = hiera_array('ceilometer_names', keys($ceilometer_address_map)) + $ipaddresses = hiera_array('ceilometer_ipaddresses', values($ceilometer_address_map)) + $public_virtual_ip = hiera('public_vip') + $internal_virtual_ip = hiera('management_vip') + + # configure ceilometer ha proxy + class { '::openstack::ha::ceilometer': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + } +} diff --git a/f2s/resources/openstack-haproxy-ceilometer/meta.yaml b/f2s/resources/openstack-haproxy-ceilometer/meta.yaml new file mode 100644 index 00000000..f61cb4dc --- /dev/null +++ b/f2s/resources/openstack-haproxy-ceilometer/meta.yaml @@ -0,0 +1,16 @@ +id: openstack-haproxy-ceilometer +handler: puppetv2 +version: '8.0' +inputs: + ceilometer: + value: null + ceilometer_nodes: + value: null + fqdn: + value: null + public_ssl: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/openstack-haproxy-cinder/actions/run.pp b/f2s/resources/openstack-haproxy-cinder/actions/run.pp new file mode 100644 index 00000000..238e0ecd --- /dev/null +++ b/f2s/resources/openstack-haproxy-cinder/actions/run.pp @@ -0,0 +1,24 @@ +notice('MODULAR: openstack-haproxy-cinder.pp') + +$network_metadata = hiera_hash('network_metadata') +$cinder_hash = hiera_hash('cinder_hash', {}) +# enabled by default +$use_cinder = pick($cinder_hash['enabled'], true) +$public_ssl_hash = hiera('public_ssl') + +$cinder_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('cinder_nodes'), 'cinder/api') +if ($use_cinder) { + $server_names = hiera_array('cinder_names', keys($cinder_address_map)) + $ipaddresses = hiera_array('cinder_ipaddresses', values($cinder_address_map)) + $public_virtual_ip = hiera('public_vip') + $internal_virtual_ip = hiera('management_vip') + + # configure cinder ha proxy + class { '::openstack::ha::cinder': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + } +} diff --git a/f2s/resources/openstack-haproxy-cinder/meta.yaml b/f2s/resources/openstack-haproxy-cinder/meta.yaml new file mode 100644 index 00000000..e831c7d4 --- /dev/null +++ b/f2s/resources/openstack-haproxy-cinder/meta.yaml @@ -0,0 +1,26 @@ +id: openstack-haproxy-cinder +handler: puppetv2 +version: '8.0' +inputs: + cinder_hash: + value: null + cinder_ipaddresses: + value: null + cinder_names: + value: null + cinder_nodes: + value: null + fqdn: + value: null + management_vip: + value: null + network_metadata: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/openstack-haproxy-glance/actions/run.pp b/f2s/resources/openstack-haproxy-glance/actions/run.pp new file mode 100644 index 00000000..14dec201 --- /dev/null +++ b/f2s/resources/openstack-haproxy-glance/actions/run.pp @@ -0,0 +1,26 @@ +notice('MODULAR: openstack-haproxy-glance.pp') + +$network_metadata = hiera_hash('network_metadata') +$glance_hash = hiera_hash('glance', {}) +# enabled by default +$use_glance = pick($glance_hash['enabled'], true) +$public_ssl_hash = hiera('public_ssl') + + +#todo(sv): change to 'glance' as soon as glance as node-role was ready +$glances_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']), 'glance/api') + +if ($use_glance) { + $server_names = hiera_array('glance_names', keys($glances_address_map)) + $ipaddresses = hiera_array('glance_ipaddresses', values($glances_address_map)) + $public_virtual_ip = hiera('public_vip') + $internal_virtual_ip = hiera('management_vip') + + class { '::openstack::ha::glance': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + } +} diff --git a/f2s/resources/openstack-haproxy-glance/meta.yaml b/f2s/resources/openstack-haproxy-glance/meta.yaml new file mode 100644 index 00000000..0b24818f --- /dev/null +++ b/f2s/resources/openstack-haproxy-glance/meta.yaml @@ -0,0 +1,24 @@ +id: openstack-haproxy-glance +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + glance: + value: null + glance_ipaddresses: + value: null + glance_names: + value: null + management_vip: + value: null + network_metadata: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/openstack-haproxy-heat/actions/run.pp b/f2s/resources/openstack-haproxy-heat/actions/run.pp new file mode 100644 index 00000000..4bbf4336 --- /dev/null +++ b/f2s/resources/openstack-haproxy-heat/actions/run.pp @@ -0,0 +1,24 @@ +notice('MODULAR: openstack-haproxy-heat.pp') + +$heat_hash = hiera_hash('heat', {}) +# enabled by default +$use_heat = pick($heat_hash['enabled'], true) +$public_ssl_hash = hiera('public_ssl') +$network_metadata = hiera_hash('network_metadata') +$heat_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, hiera('heat_roles')), 'heat/api') + +if ($use_heat) { + $server_names = hiera_array('heat_names',keys($heat_address_map)) + $ipaddresses = hiera_array('heat_ipaddresses', values($heat_address_map)) + $public_virtual_ip = hiera('public_vip') + $internal_virtual_ip = hiera('management_vip') + +# configure heat ha proxy + class { '::openstack::ha::heat': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + } +} diff --git a/f2s/resources/openstack-haproxy-heat/meta.yaml b/f2s/resources/openstack-haproxy-heat/meta.yaml new file mode 100644 index 00000000..eaef0fa4 --- /dev/null +++ b/f2s/resources/openstack-haproxy-heat/meta.yaml @@ -0,0 +1,26 @@ +id: openstack-haproxy-heat +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + heat: + value: null + heat_ipaddresses: + value: null + heat_names: + value: null + heat_roles: + value: null + management_vip: + value: null + network_metadata: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/openstack-haproxy-horizon/actions/run.pp b/f2s/resources/openstack-haproxy-horizon/actions/run.pp new file mode 100644 index 00000000..a491245a --- /dev/null +++ b/f2s/resources/openstack-haproxy-horizon/actions/run.pp @@ -0,0 +1,24 @@ +notice('MODULAR: openstack-haproxy-horizon.pp') + +$network_metadata = hiera_hash('network_metadata') +$horizon_hash = hiera_hash('horizon', {}) +# enabled by default +$use_horizon = pick($horizon_hash['enabled'], true) +$public_ssl_hash = hiera('public_ssl') + +$horizon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('horizon_nodes'), 'horizon') +if ($use_horizon) { + $server_names = hiera_array('horizon_names', keys($horizon_address_map)) + $ipaddresses = hiera_array('horizon_ipaddresses', values($horizon_address_map)) + $public_virtual_ip = hiera('public_vip') + $internal_virtual_ip = hiera('management_vip') + + # configure horizon ha proxy + class { '::openstack::ha::horizon': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + use_ssl => $public_ssl_hash['horizon'], + } +} diff --git a/f2s/resources/openstack-haproxy-horizon/meta.yaml b/f2s/resources/openstack-haproxy-horizon/meta.yaml new file mode 100644 index 00000000..01b55985 --- /dev/null +++ b/f2s/resources/openstack-haproxy-horizon/meta.yaml @@ -0,0 +1,26 @@ +id: openstack-haproxy-horizon +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + horizon: + value: null + horizon_ipaddresses: + value: null + horizon_names: + value: null + horizon_nodes: + value: null + management_vip: + value: null + network_metadata: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/openstack-haproxy-ironic/actions/run.pp b/f2s/resources/openstack-haproxy-ironic/actions/run.pp new file mode 100644 index 00000000..df256fc6 --- /dev/null +++ b/f2s/resources/openstack-haproxy-ironic/actions/run.pp @@ -0,0 +1,22 @@ +notice('MODULAR: openstack-haproxy-ironic.pp') + +$network_metadata = hiera_hash('network_metadata') +$public_ssl_hash = hiera('public_ssl') +$ironic_hash = hiera_hash('ironic', {}) + +$ironic_address_map = get_node_to_ipaddr_map_by_network_role(hiera('ironic_api_nodes'), 'ironic/api') + +$server_names = hiera_array('ironic_server_names', keys($ironic_address_map)) +$ipaddresses = hiera_array('ironic_ipaddresses', values($ironic_address_map)) +$public_virtual_ip = hiera('public_vip') +$internal_virtual_ip = hiera('management_vip') +$baremetal_virtual_ip = $network_metadata['vips']['baremetal']['ipaddr'] + +class { '::openstack::ha::ironic': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + baremetal_virtual_ip => $baremetal_virtual_ip, +} diff --git a/f2s/resources/openstack-haproxy-ironic/meta.yaml b/f2s/resources/openstack-haproxy-ironic/meta.yaml new file mode 100644 index 00000000..88ca5135 --- /dev/null +++ b/f2s/resources/openstack-haproxy-ironic/meta.yaml @@ -0,0 +1,8 @@ +id: openstack-haproxy-ironic +handler: puppetv2 +version: '8.0' +inputs: + ironic: + value: null + puppet_modules: + value: null diff --git a/f2s/resources/openstack-haproxy-keystone/actions/run.pp b/f2s/resources/openstack-haproxy-keystone/actions/run.pp new file mode 100644 index 00000000..8772ac4c --- /dev/null +++ b/f2s/resources/openstack-haproxy-keystone/actions/run.pp @@ -0,0 +1,29 @@ +notice('MODULAR: openstack-haproxy-keystone.pp') + +$network_metadata = hiera_hash('network_metadata') +$keystone_hash = hiera_hash('keystone', {}) +# enabled by default +$use_keystone = pick($keystone_hash['enabled'], true) +$public_ssl_hash = hiera('public_ssl') + +#todo(sv): change to 'keystone' as soon as keystone as node-role was ready +$keystones_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']), 'keystone/api') + +if ($use_keystone) { + $server_names = pick(hiera_array('keystone_names', undef), + keys($keystones_address_map)) + $ipaddresses = pick(hiera_array('keystone_ipaddresses', undef), + values($keystones_address_map)) + $public_virtual_ip = pick(hiera('public_service_endpoint', undef), hiera('public_vip')) + $internal_virtual_ip = pick(hiera('service_endpoint', undef), hiera('management_vip')) + + + # configure keystone ha proxy + class { '::openstack::ha::keystone': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + } +} diff --git a/f2s/resources/openstack-haproxy-keystone/meta.yaml b/f2s/resources/openstack-haproxy-keystone/meta.yaml new file mode 100644 index 00000000..f55d40d0 --- /dev/null +++ b/f2s/resources/openstack-haproxy-keystone/meta.yaml @@ -0,0 +1,28 @@ +id: openstack-haproxy-keystone +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + keystone: + value: null + keystone_ipaddresses: + value: null + keystone_names: + value: null + management_vip: + value: null + network_metadata: + value: null + public_service_endpoint: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null + service_endpoint: + value: null diff --git a/f2s/resources/openstack-haproxy-murano/actions/run.pp b/f2s/resources/openstack-haproxy-murano/actions/run.pp new file mode 100644 index 00000000..5224a2e0 --- /dev/null +++ b/f2s/resources/openstack-haproxy-murano/actions/run.pp @@ -0,0 +1,24 @@ +notice('MODULAR: openstack-haproxy-murano.pp') + +$murano_hash = hiera_hash('murano_hash',{}) +# NOT enabled by default +$use_murano = pick($murano_hash['enabled'], false) +$public_ssl_hash = hiera('public_ssl') +$network_metadata = hiera_hash('network_metadata') +$murano_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, hiera('murano_roles')), 'murano/api') + +if ($use_murano) { + $server_names = hiera_array('murano_names',keys($murano_address_map)) + $ipaddresses = hiera_array('murano_ipaddresses', values($murano_address_map)) + $public_virtual_ip = hiera('public_vip') + $internal_virtual_ip = hiera('management_vip') + + # configure murano ha proxy + class { '::openstack::ha::murano': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + } +} diff --git a/f2s/resources/openstack-haproxy-murano/meta.yaml b/f2s/resources/openstack-haproxy-murano/meta.yaml new file mode 100644 index 00000000..98c4a60a --- /dev/null +++ b/f2s/resources/openstack-haproxy-murano/meta.yaml @@ -0,0 +1,18 @@ +id: openstack-haproxy-murano +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + murano_hash: + value: null + murano_roles: + value: null + network_metadata: + value: null + public_ssl: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/openstack-haproxy-mysqld/actions/run.pp b/f2s/resources/openstack-haproxy-mysqld/actions/run.pp new file mode 100644 index 00000000..d2ba97c7 --- /dev/null +++ b/f2s/resources/openstack-haproxy-mysqld/actions/run.pp @@ -0,0 +1,31 @@ +notice('MODULAR: openstack-haproxy-mysqld.pp') + +$network_metadata = hiera_hash('network_metadata') +$mysql_hash = hiera_hash('mysql', {}) +# enabled by default +$use_mysql = pick($mysql_hash['enabled'], true) + +$custom_mysql_setup_class = hiera('custom_mysql_setup_class', 'galera') +$public_ssl_hash = hiera('public_ssl') + +$database_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('database_nodes'), 'mgmt/database') + +# only do this if mysql is enabled and we are using one of the galera/percona classes +if $use_mysql and ($custom_mysql_setup_class in ['galera', 'percona', 'percona_packages']) { + $server_names = hiera_array('mysqld_names', keys($database_address_map)) + $ipaddresses = hiera_array('mysqld_ipaddresses', values($database_address_map)) + $public_virtual_ip = hiera('public_vip') + $internal_virtual_ip = pick(hiera('database_vip', undef), hiera('management_vip')) + + $primary_controller = hiera('primary_controller') + + + # configure mysql ha proxy + class { '::openstack::ha::mysqld': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + is_primary_controller => $primary_controller, + } +} diff --git a/f2s/resources/openstack-haproxy-mysqld/meta.yaml b/f2s/resources/openstack-haproxy-mysqld/meta.yaml new file mode 100644 index 00000000..749d697e --- /dev/null +++ b/f2s/resources/openstack-haproxy-mysqld/meta.yaml @@ -0,0 +1,32 @@ +id: openstack-haproxy-mysqld +handler: puppetv2 +version: '8.0' +inputs: + custom_mysql_setup_class: + value: null + database_nodes: + value: null + database_vip: + value: null + fqdn: + value: null + management_vip: + value: null + mysql: + value: null + mysqld_ipaddresses: + value: null + mysqld_names: + value: null + network_metadata: + value: null + primary_controller: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/openstack-haproxy-neutron/actions/run.pp b/f2s/resources/openstack-haproxy-neutron/actions/run.pp new file mode 100644 index 00000000..738ccfde --- /dev/null +++ b/f2s/resources/openstack-haproxy-neutron/actions/run.pp @@ -0,0 +1,22 @@ +notice('MODULAR: openstack-haproxy-neutron.pp') + +# NOT enabled by default +$use_neutron = hiera('use_neutron', false) +$public_ssl_hash = hiera('public_ssl') + +$neutron_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('neutron_nodes'), 'neutron/api') +if ($use_neutron) { + $server_names = hiera_array('neutron_names', keys($neutron_address_map)) + $ipaddresses = hiera_array('neutron_ipaddresses', values($neutron_address_map)) + $public_virtual_ip = hiera('public_vip') + $internal_virtual_ip = hiera('management_vip') + + # configure neutron ha proxy + class { '::openstack::ha::neutron': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + } +} diff --git a/f2s/resources/openstack-haproxy-neutron/meta.yaml b/f2s/resources/openstack-haproxy-neutron/meta.yaml new file mode 100644 index 00000000..b99d5a34 --- /dev/null +++ b/f2s/resources/openstack-haproxy-neutron/meta.yaml @@ -0,0 +1,24 @@ +id: openstack-haproxy-neutron +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + management_vip: + value: null + neutron_ipaddresses: + value: null + neutron_names: + value: null + neutron_nodes: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null + use_neutron: + value: null diff --git a/f2s/resources/openstack-haproxy-nova/actions/run.pp b/f2s/resources/openstack-haproxy-nova/actions/run.pp new file mode 100644 index 00000000..060d9db7 --- /dev/null +++ b/f2s/resources/openstack-haproxy-nova/actions/run.pp @@ -0,0 +1,25 @@ +notice('MODULAR: openstack-haproxy-nova.pp') + +$nova_hash = hiera_hash('nova', {}) +# enabled by default +$use_nova = pick($nova_hash['enabled'], true) +$public_ssl_hash = hiera('public_ssl') + +$nova_api_address_map = get_node_to_ipaddr_map_by_network_role(hiera('nova_api_nodes'), 'nova/api') + +if ($use_nova) { + $server_names = hiera_array('nova_names', keys($nova_api_address_map)) + $ipaddresses = hiera_array('nova_ipaddresses', values($nova_api_address_map)) + $public_virtual_ip = hiera('public_vip') + $internal_virtual_ip = hiera('management_vip') + + + # configure nova ha proxy + class { '::openstack::ha::nova': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + } +} diff --git a/f2s/resources/openstack-haproxy-nova/meta.yaml b/f2s/resources/openstack-haproxy-nova/meta.yaml new file mode 100644 index 00000000..a4dfc852 --- /dev/null +++ b/f2s/resources/openstack-haproxy-nova/meta.yaml @@ -0,0 +1,24 @@ +id: openstack-haproxy-nova +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + management_vip: + value: null + nova: + value: null + nova_api_nodes: + value: null + nova_ipaddresses: + value: null + nova_names: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/openstack-haproxy-radosgw/actions/run.pp b/f2s/resources/openstack-haproxy-radosgw/actions/run.pp new file mode 100644 index 00000000..f5911423 --- /dev/null +++ b/f2s/resources/openstack-haproxy-radosgw/actions/run.pp @@ -0,0 +1,34 @@ +notice('MODULAR: openstack-haproxy-radosgw.pp') + +$network_metadata = hiera_hash('network_metadata') +$storage_hash = hiera_hash('storage', {}) +$public_ssl_hash = hiera('public_ssl') + + +if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { + $use_swift = true +} else { + $use_swift = false +} +if !($use_swift) and ($storage_hash['objects_ceph']) { + $use_radosgw = true +} else { + $use_radosgw = false +} + +if $use_radosgw { + $rgw_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_rgw_nodes'), 'ceph/radosgw') + $server_names = hiera_array('radosgw_server_names', keys($rgw_address_map)) + $ipaddresses = hiera_array('radosgw_ipaddresses', values($rgw_address_map)) + $public_virtual_ip = hiera('public_vip') + $internal_virtual_ip = hiera('management_vip') + + # configure radosgw ha proxy + class { '::openstack::ha::radosgw': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + } +} diff --git a/f2s/resources/openstack-haproxy-radosgw/meta.yaml b/f2s/resources/openstack-haproxy-radosgw/meta.yaml new file mode 100644 index 00000000..4fe901ff --- /dev/null +++ b/f2s/resources/openstack-haproxy-radosgw/meta.yaml @@ -0,0 +1,16 @@ +id: openstack-haproxy-radosgw +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + network_metadata: + value: null + public_ssl: + value: null + puppet_modules: + value: null + role: + value: null + storage: + value: null diff --git a/f2s/resources/openstack-haproxy-sahara/actions/run.pp b/f2s/resources/openstack-haproxy-sahara/actions/run.pp new file mode 100644 index 00000000..4f76a2f2 --- /dev/null +++ b/f2s/resources/openstack-haproxy-sahara/actions/run.pp @@ -0,0 +1,24 @@ +notice('MODULAR: openstack-haproxy-sahara.pp') + +$sahara_hash = hiera_hash('sahara_hash',{}) +# NOT enabled by default +$use_sahara = pick($sahara_hash['enabled'], false) +$public_ssl_hash = hiera('public_ssl') +$network_metadata = hiera_hash('network_metadata') +$sahara_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, hiera('sahara_roles')), 'sahara/api') + +if ($use_sahara) { + $server_names = hiera_array('sahara_names',keys($sahara_address_map)) + $ipaddresses = hiera_array('sahara_ipaddresses', values($sahara_address_map)) + $public_virtual_ip = hiera('public_vip') + $internal_virtual_ip = hiera('management_vip') + + # configure sahara ha proxy + class { '::openstack::ha::sahara': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + } +} diff --git a/f2s/resources/openstack-haproxy-sahara/meta.yaml b/f2s/resources/openstack-haproxy-sahara/meta.yaml new file mode 100644 index 00000000..fdfcbe77 --- /dev/null +++ b/f2s/resources/openstack-haproxy-sahara/meta.yaml @@ -0,0 +1,18 @@ +id: openstack-haproxy-sahara +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + network_metadata: + value: null + public_ssl: + value: null + puppet_modules: + value: null + role: + value: null + sahara_hash: + value: null + sahara_roles: + value: null diff --git a/f2s/resources/openstack-haproxy-stats/actions/run.pp b/f2s/resources/openstack-haproxy-stats/actions/run.pp new file mode 100644 index 00000000..cfcf71d6 --- /dev/null +++ b/f2s/resources/openstack-haproxy-stats/actions/run.pp @@ -0,0 +1,7 @@ +notice('MODULAR: openstack-haproxy-stats.pp') + +$internal_virtual_ip = unique([hiera('management_vip'), hiera('database_vip'), hiera('service_endpoint')]) + +class { '::openstack::ha::stats': + internal_virtual_ip => $internal_virtual_ip, +} diff --git a/f2s/resources/openstack-haproxy-stats/meta.yaml b/f2s/resources/openstack-haproxy-stats/meta.yaml new file mode 100644 index 00000000..98072cdc --- /dev/null +++ b/f2s/resources/openstack-haproxy-stats/meta.yaml @@ -0,0 +1,16 @@ +id: openstack-haproxy-stats +handler: puppetv2 +version: '8.0' +inputs: + database_vip: + value: null + fqdn: + value: null + management_vip: + value: null + puppet_modules: + value: null + role: + value: null + service_endpoint: + value: null diff --git a/f2s/resources/openstack-haproxy-swift/actions/run.pp b/f2s/resources/openstack-haproxy-swift/actions/run.pp new file mode 100644 index 00000000..01819d46 --- /dev/null +++ b/f2s/resources/openstack-haproxy-swift/actions/run.pp @@ -0,0 +1,37 @@ +notice('MODULAR: openstack-haproxy-swift.pp') + +$network_metadata = hiera_hash('network_metadata') +$storage_hash = hiera_hash('storage', {}) +$swift_proxies = hiera_hash('swift_proxies', undef) +$public_ssl_hash = hiera('public_ssl') +$ironic_hash = hiera_hash('ironic', {}) + +if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { + $use_swift = true +} else { + $use_swift = false +} + +$swift_proxies_address_map = get_node_to_ipaddr_map_by_network_role($swift_proxies, 'swift/api') + +if ($use_swift) { + + $server_names = hiera_array('swift_server_names', keys($swift_proxies_address_map)) + $ipaddresses = hiera_array('swift_ipaddresses', values($swift_proxies_address_map)) + $public_virtual_ip = hiera('public_vip') + $internal_virtual_ip = hiera('management_vip') + + if $ironic_hash['enabled'] { + $baremetal_virtual_ip = $network_metadata['vips']['baremetal']['ipaddr'] + } + + # configure swift ha proxy + class { '::openstack::ha::swift': + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + baremetal_virtual_ip => $baremetal_virtual_ip, + } +} diff --git a/f2s/resources/openstack-haproxy-swift/meta.yaml b/f2s/resources/openstack-haproxy-swift/meta.yaml new file mode 100644 index 00000000..985d727a --- /dev/null +++ b/f2s/resources/openstack-haproxy-swift/meta.yaml @@ -0,0 +1,28 @@ +id: openstack-haproxy-swift +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + ironic: + value: null + management_vip: + value: null + network_metadata: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null + storage: + value: null + swift_ipaddresses: + value: null + swift_proxies: + value: null + swift_server_names: + value: null diff --git a/f2s/resources/openstack-haproxy/actions/run.pp b/f2s/resources/openstack-haproxy/actions/run.pp new file mode 100644 index 00000000..c38ce7dc --- /dev/null +++ b/f2s/resources/openstack-haproxy/actions/run.pp @@ -0,0 +1,3 @@ +notice('MODULAR: openstack-haproxy.pp') +# This is a placeholder task that is used to tie all the haproxy tasks together. +# Any haproxy configurations should go in a openstack-haproxy- task diff --git a/f2s/resources/openstack-haproxy/meta.yaml b/f2s/resources/openstack-haproxy/meta.yaml new file mode 100644 index 00000000..7ce44222 --- /dev/null +++ b/f2s/resources/openstack-haproxy/meta.yaml @@ -0,0 +1,10 @@ +id: openstack-haproxy +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/openstack-network-agents-dhcp/actions/run.pp b/f2s/resources/openstack-network-agents-dhcp/actions/run.pp new file mode 100644 index 00000000..2c8a35f8 --- /dev/null +++ b/f2s/resources/openstack-network-agents-dhcp/actions/run.pp @@ -0,0 +1,39 @@ +notice('MODULAR: openstack-network/agents/dhcp.pp') + +$use_neutron = hiera('use_neutron', false) + +class neutron {} +class { 'neutron' :} + +if $use_neutron { + + $debug = hiera('debug', true) + $resync_interval = '30' + $isolated_metadata = try_get_value($neutron_config, 'metadata/isolated_metadata', true) + + $neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) + $ha_agent = try_get_value($neutron_advanced_config, 'dhcp_agent_ha', true) + + class { 'neutron::agents::dhcp': + debug => $debug, + resync_interval => $resync_interval, + manage_service => true, + enable_isolated_metadata => $isolated_metadata, + dhcp_delete_namespaces => true, + enabled => true, + } + + if $ha_agent { + $primary_controller = hiera('primary_controller') + class { 'cluster::neutron::dhcp' : + primary => $primary_controller, + } + } + + #======================== + package { 'neutron': + name => 'binutils', + ensure => 'installed', + } + +} diff --git a/f2s/resources/openstack-network-agents-dhcp/meta.yaml b/f2s/resources/openstack-network-agents-dhcp/meta.yaml new file mode 100644 index 00000000..0a9fdba2 --- /dev/null +++ b/f2s/resources/openstack-network-agents-dhcp/meta.yaml @@ -0,0 +1,18 @@ +id: openstack-network-agents-dhcp +handler: puppetv2 +version: '8.0' +inputs: + debug: + value: null + fqdn: + value: null + neutron_advanced_configuration: + value: null + primary_controller: + value: null + puppet_modules: + value: null + role: + value: null + use_neutron: + value: null diff --git a/f2s/resources/openstack-network-agents-l3/actions/run.pp b/f2s/resources/openstack-network-agents-l3/actions/run.pp new file mode 100644 index 00000000..8a275768 --- /dev/null +++ b/f2s/resources/openstack-network-agents-l3/actions/run.pp @@ -0,0 +1,59 @@ +notice('MODULAR: openstack-network/agents/l3.pp') + +$use_neutron = hiera('use_neutron', false) + +class neutron {} +class { 'neutron' :} + +$neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) +$dvr = pick($neutron_advanced_config['neutron_dvr'], false) + +$role = hiera('role') +$controller = $role in ['controller', 'primary-controller'] +$compute = $role in ['compute'] + +if $use_neutron and ($controller or ($dvr and $compute)) { + $debug = hiera('debug', true) + $metadata_port = '8775' + $network_scheme = hiera('network_scheme', {}) + + if $controller { + if $dvr { + $agent_mode = 'dvr-snat' + } else { + $agent_mode = 'legacy' + } + } else { + # works on copute nodes only if dvr is enabled + $agent_mode = 'dvr' + } + + prepare_network_config($network_scheme) + + $ha_agent = try_get_value($neutron_advanced_config, 'l3_agent_ha', true) + $external_network_bridge = get_network_role_property('neutron/floating', 'interface') + + class { 'neutron::agents::l3': + debug => $debug, + metadata_port => $metadata_port, + external_network_bridge => $external_network_bridge, + manage_service => true, + enabled => true, + router_delete_namespaces => true, + agent_mode => $agent_mode, + } + + if $ha_agent { + $primary_controller = hiera('primary_controller') + cluster::neutron::l3 { 'default-l3' : + primary => $primary_controller, + } + } + + #======================== + package { 'neutron': + name => 'binutils', + ensure => 'installed', + } + +} diff --git a/f2s/resources/openstack-network-agents-l3/meta.yaml b/f2s/resources/openstack-network-agents-l3/meta.yaml new file mode 100644 index 00000000..83a470da --- /dev/null +++ b/f2s/resources/openstack-network-agents-l3/meta.yaml @@ -0,0 +1,20 @@ +id: openstack-network-agents-l3 +handler: puppetv2 +version: '8.0' +inputs: + debug: + value: null + fqdn: + value: null + network_scheme: + value: null + neutron_advanced_configuration: + value: null + primary_controller: + value: null + puppet_modules: + value: null + role: + value: null + use_neutron: + value: null diff --git a/f2s/resources/openstack-network-agents-metadata/actions/run.pp b/f2s/resources/openstack-network-agents-metadata/actions/run.pp new file mode 100644 index 00000000..56f24e3a --- /dev/null +++ b/f2s/resources/openstack-network-agents-metadata/actions/run.pp @@ -0,0 +1,57 @@ +notice('MODULAR: openstack-network/agents/metadata.pp') + +$use_neutron = hiera('use_neutron', false) + +class neutron {} +class { 'neutron' :} + +if $use_neutron { + $debug = hiera('debug', true) + $neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) + $ha_agent = try_get_value($neutron_advanced_config, 'metadata_agent_ha', true) + + $auth_region = hiera('region', 'RegionOne') + $service_endpoint = hiera('service_endpoint') + $auth_api_version = 'v2.0' + $admin_identity_uri = "http://${service_endpoint}:35357" + $admin_auth_url = "${admin_identity_uri}/${auth_api_version}" + + $neutron_config = hiera_hash('neutron_config') + + $keystone_user = try_get_value($neutron_config, 'keystone/admin_user', 'neutron') + $keystone_tenant = try_get_value($neutron_config, 'keystone/admin_tenant', 'services') + $neutron_user_password = try_get_value($neutron_config, 'keystone/admin_password') + + $shared_secret = try_get_value($neutron_config, 'metadata/metadata_proxy_shared_secret') + + $management_vip = hiera('management_vip') + $nova_endpoint = hiera('nova_endpoint', $management_vip) + + class { 'neutron::agents::metadata': + debug => $debug, + auth_region => $auth_region, + auth_url => $admin_auth_url, + auth_user => $keystone_user, + auth_tenant => $keystone_tenant, + auth_password => $neutron_user_password, + shared_secret => $shared_secret, + metadata_ip => $nova_endpoint, + manage_service => true, + enabled => true, + + } + + if $ha_agent { + $primary_controller = hiera('primary_controller') + class { 'cluster::neutron::metadata' : + primary => $primary_controller, + } + } + + #======================== + package { 'neutron': + name => 'binutils', + ensure => 'installed', + } + +} diff --git a/f2s/resources/openstack-network-agents-metadata/meta.yaml b/f2s/resources/openstack-network-agents-metadata/meta.yaml new file mode 100644 index 00000000..6b288cd4 --- /dev/null +++ b/f2s/resources/openstack-network-agents-metadata/meta.yaml @@ -0,0 +1,28 @@ +id: openstack-network-agents-metadata +handler: puppetv2 +version: '8.0' +inputs: + debug: + value: null + fqdn: + value: null + management_vip: + value: null + neutron_advanced_configuration: + value: null + neutron_config: + value: null + nova_endpoint: + value: null + primary_controller: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null + service_endpoint: + value: null + use_neutron: + value: null diff --git a/f2s/resources/openstack-network-common-config/actions/run.pp b/f2s/resources/openstack-network-common-config/actions/run.pp new file mode 100644 index 00000000..66b49530 --- /dev/null +++ b/f2s/resources/openstack-network-common-config/actions/run.pp @@ -0,0 +1,110 @@ +notice('MODULAR: openstack-network/common-config.pp') + +$use_neutron = hiera('use_neutron', false) + +if $use_neutron { + + $openstack_network_hash = hiera_hash('openstack_network', { }) + $neutron_config = hiera_hash('neutron_config') + + $core_plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' + $service_plugins = [ + 'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin', + 'neutron.services.metering.metering_plugin.MeteringPlugin', + ] + + $rabbit_hash = hiera_hash('rabbit_hash', { }) + $ceilometer_hash = hiera_hash('ceilometer', { }) + $network_scheme = hiera_hash('network_scheme') + + $verbose = pick($openstack_network_hash['verbose'], hiera('verbose', true)) + $debug = pick($openstack_network_hash['debug'], hiera('debug', true)) + $use_syslog = hiera('use_syslog', true) + $use_stderr = hiera('use_stderr', false) + $log_facility = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4') + + prepare_network_config($network_scheme) + $bind_host = get_network_role_property('neutron/api', 'ipaddr') + + $base_mac = $neutron_config['L2']['base_mac'] + $use_ceilometer = $ceilometer_hash['enabled'] + $amqp_hosts = split(hiera('amqp_hosts', ''), ',') + $amqp_user = $rabbit_hash['user'] + $amqp_password = $rabbit_hash['password'] + + $segmentation_type = try_get_value($neutron_config, 'L2/segmentation_type') + + $nets = $neutron_config['predefined_networks'] + + if $segmentation_type == 'vlan' { + $net_role_property = 'neutron/private' + $iface = get_network_role_property($net_role_property, 'phys_dev') + $mtu_for_virt_network = pick(get_transformation_property('mtu', $iface[0]), '1500') + $overlay_net_mtu = $mtu_for_virt_network + } else { + $net_role_property = 'neutron/mesh' + $iface = get_network_role_property($net_role_property, 'phys_dev') + $physical_net_mtu = pick(get_transformation_property('mtu', $iface[0]), '1500') + + if $segmentation_type == 'gre' { + $mtu_offset = '42' + } else { + # vxlan is the default segmentation type for non-vlan cases + $mtu_offset = '50' + } + + if $physical_net_mtu { + $overlay_net_mtu = $physical_net_mtu - $mtu_offset + } else { + $overlay_net_mtu = '1500' - $mtu_offset + } + + } + + class { 'neutron' : + verbose => $verbose, + debug => $debug, + use_syslog => $use_syslog, + use_stderr => $use_stderr, + log_facility => $log_facility, + bind_host => $bind_host, + base_mac => $base_mac, + core_plugin => $core_plugin, + service_plugins => $service_plugins, + allow_overlapping_ips => true, + mac_generation_retries => '32', + dhcp_lease_duration => '600', + dhcp_agents_per_network => '2', + report_interval => '10', + rabbit_user => $amqp_user, + rabbit_hosts => $amqp_hosts, + rabbit_password => $amqp_password, + kombu_reconnect_delay => '5.0', + network_device_mtu => $overlay_net_mtu, + advertise_mtu => true, + } + + if $use_syslog { + neutron_config { 'DEFAULT/use_syslog_rfc_format': value => true; } + } + + if $use_ceilometer { + neutron_config { 'DEFAULT/notification_driver': value => 'messaging' } + } + +} + +### SYSCTL ### + +# All nodes with network functions should have net forwarding. +# Its a requirement for network namespaces to function. +sysctl::value { 'net.ipv4.ip_forward': value => '1' } + +# All nodes with network functions should have these thresholds +# to avoid "Neighbour table overflow" problem +sysctl::value { 'net.ipv4.neigh.default.gc_thresh1': value => '4096' } +sysctl::value { 'net.ipv4.neigh.default.gc_thresh2': value => '8192' } +sysctl::value { 'net.ipv4.neigh.default.gc_thresh3': value => '16384' } + +Sysctl::Value <| |> -> Nova_config <||> +Sysctl::Value <| |> -> Neutron_config <||> diff --git a/f2s/resources/openstack-network-common-config/meta.yaml b/f2s/resources/openstack-network-common-config/meta.yaml new file mode 100644 index 00000000..b13c01fc --- /dev/null +++ b/f2s/resources/openstack-network-common-config/meta.yaml @@ -0,0 +1,34 @@ +id: openstack-network-common-config +handler: puppetv2 +version: '8.0' +inputs: + amqp_hosts: + value: null + ceilometer: + value: null + debug: + value: null + fqdn: + value: null + network_scheme: + value: null + neutron_config: + value: null + openstack_network: + value: null + puppet_modules: + value: null + rabbit_hash: + value: null + role: + value: null + syslog_log_facility_neutron: + value: null + use_neutron: + value: null + use_stderr: + value: null + use_syslog: + value: null + verbose: + value: null diff --git a/f2s/resources/openstack-network-compute-nova/actions/run.pp b/f2s/resources/openstack-network-compute-nova/actions/run.pp new file mode 100644 index 00000000..3fdd4b33 --- /dev/null +++ b/f2s/resources/openstack-network-compute-nova/actions/run.pp @@ -0,0 +1,267 @@ +notice('MODULAR: openstack-network/compute-nova.pp') + +$use_neutron = hiera('use_neutron', false) + +if $use_neutron { + include nova::params + $neutron_config = hiera_hash('neutron_config') + $neutron_integration_bridge = 'br-int' + $nova_hash = hiera_hash('nova') + $libvirt_vif_driver = pick($nova_hash['libvirt_vif_driver'], 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver') + + $management_vip = hiera('management_vip') + $service_endpoint = hiera('service_endpoint', $management_vip) + $neutron_endpoint = hiera('neutron_endpoint', $management_vip) + $admin_password = try_get_value($neutron_config, 'keystone/admin_password') + $admin_tenant_name = try_get_value($neutron_config, 'keystone/admin_tenant', 'services') + $admin_username = try_get_value($neutron_config, 'keystone/admin_user', 'neutron') + $region_name = hiera('region', 'RegionOne') + $auth_api_version = 'v2.0' + $admin_identity_uri = "http://${service_endpoint}:35357" + $admin_auth_url = "${admin_identity_uri}/${auth_api_version}" + $neutron_url = "http://${neutron_endpoint}:9696" + + service { 'libvirt' : + ensure => 'running', + enable => true, + # Workaround for bug LP #1469308 + # also service name for Ubuntu and Centos is the same. + name => 'libvirtd', + provider => $nova::params::special_service_provider, + } + + exec { 'destroy_libvirt_default_network': + command => 'virsh net-destroy default', + onlyif => 'virsh net-info default | grep -qE "Active:.* yes"', + path => [ '/bin', '/sbin', '/usr/bin', '/usr/sbin' ], + tries => 3, + require => Service['libvirt'], + } + + exec { 'undefine_libvirt_default_network': + command => 'virsh net-undefine default', + onlyif => 'virsh net-info default 2>&1 > /dev/null', + path => [ '/bin', '/sbin', '/usr/bin', '/usr/sbin' ], + tries => 3, + require => Exec['destroy_libvirt_default_network'], + } + + Service['libvirt'] ~> Exec['destroy_libvirt_default_network'] + + # script called by qemu needs to manipulate the tap device + file_line { 'clear_emulator_capabilities': + path => '/etc/libvirt/qemu.conf', + line => 'clear_emulator_capabilities = 0', + notify => Service['libvirt'] + } + + file_line { 'no_qemu_selinux': + path => '/etc/libvirt/qemu.conf', + line => 'security_driver = "none"', + notify => Service['libvirt'] + } + + class { 'nova::compute::neutron': + libvirt_vif_driver => $libvirt_vif_driver, + } + + nova_config { + 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; + 'DEFAULT/linuxnet_ovs_integration_bridge': value => $neutron_integration_bridge; + 'DEFAULT/network_device_mtu': value => '65000'; + } + + class { 'nova::network::neutron' : + neutron_admin_password => $admin_password, + neutron_admin_tenant_name => $admin_tenant_name, + neutron_region_name => $region_name, + neutron_admin_username => $admin_username, + neutron_admin_auth_url => $admin_auth_url, + neutron_url => $neutron_url, + neutron_ovs_bridge => $neutron_integration_bridge, + } + + augeas { 'sysctl-net.bridge.bridge-nf-call-arptables': + context => '/files/etc/sysctl.conf', + changes => "set net.bridge.bridge-nf-call-arptables '1'", + before => Service['libvirt'], + } + augeas { 'sysctl-net.bridge.bridge-nf-call-iptables': + context => '/files/etc/sysctl.conf', + changes => "set net.bridge.bridge-nf-call-iptables '1'", + before => Service['libvirt'], + } + augeas { 'sysctl-net.bridge.bridge-nf-call-ip6tables': + context => '/files/etc/sysctl.conf', + changes => "set net.bridge.bridge-nf-call-ip6tables '1'", + before => Service['libvirt'], + } + + # We need to restart nova-compute service in orderto apply new settings + # nova-compute must not be restarted until integration bridge is created by + # Neutron L2 agent. + # The reason is described here https://bugs.launchpad.net/fuel/+bug/1477475 + exec { 'wait-for-int-br': + command => "ovs-vsctl br-exists $neutron_integration_bridge", + path => [ '/sbin', '/bin', '/usr/bin', '/usr/sbin' ], + try_sleep => 6, + tries => 10, + } + Exec['wait-for-int-br'] -> Service['nova-compute'] + service { 'nova-compute': + ensure => 'running', + name => $::nova::params::compute_service_name, + } + Nova_config<| |> ~> Service['nova-compute'] + + if($::operatingsystem == 'Ubuntu') { + tweaks::ubuntu_service_override { 'nova-network': + package_name => 'nova-network', + } + } + +} else { + + $network_scheme = hiera('network_scheme', { }) + prepare_network_config($network_scheme) + + $nova_hash = hiera_hash('nova_hash', { }) + $bind_address = get_network_role_property('nova/api', 'ipaddr') + $public_int = get_network_role_property('public/vip', 'interface') + $private_interface = get_network_role_property('nova/private', 'interface') + $public_interface = $public_int ? { undef=>'', default => $public_int } + $auto_assign_floating_ip = hiera('auto_assign_floating_ip', false) + $nova_rate_limits = hiera('nova_rate_limits') + $network_size = hiera('network_size', undef) + $network_manager = hiera('network_manager', undef) + $network_config = hiera('network_config', { }) + $create_networks = true + $num_networks = hiera('num_networks', '1') + $novanetwork_params = hiera('novanetwork_parameters') + $fixed_range = hiera('fixed_network_range') + $use_vcenter = hiera('use_vcenter', false) + $enabled_apis = 'metadata' + $dns_nameservers = hiera_array('dns_nameservers', []) + + if ! $fixed_range { + fail('Must specify the fixed range when using nova-networks') + } + + if $use_vcenter { + $enable_nova_net = false + nova_config { + 'DEFAULT/multi_host': value => 'False'; + 'DEFAULT/send_arp_for_ha': value => 'False'; + } + } else { + include keystone::python + + Nova_config<| |> -> Service['nova-network'] + + case $::osfamily { + 'RedHat': { + $pymemcache_package_name = 'python-memcached' + } + 'Debian': { + $pymemcache_package_name = 'python-memcache' + } + default: { + fail("Unsupported osfamily: ${::osfamily} operatingsystem: ${::operatingsystem},\ + module ${module_name} only support osfamily RedHat and Debian") + } + } + + if !defined(Package[$pymemcache_package_name]) { + package { $pymemcache_package_name: + ensure => 'present', + } -> + Nova::Generic_service <| title == 'api' |> + } + + class { 'nova::api': + ensure_package => 'installed', + enabled => true, + admin_tenant_name => $admin_tenant_name, + admin_user => 'nova', + admin_password => $nova_hash['user_password'], + enabled_apis => $enabled_apis, + api_bind_address => $bind_address, + ratelimits => $nova_rate_limits, + # NOTE(bogdando) 1 api worker for compute node is enough + osapi_compute_workers => '1', + } + + if $::operatingsystem == 'Ubuntu' { + tweaks::ubuntu_service_override { 'nova-api': + package_name => 'nova-api', + } + } + + nova_config { + 'DEFAULT/multi_host' : value => 'True'; + 'DEFAULT/send_arp_for_ha' : value => 'True'; + 'DEFAULT/metadata_host' : value => $bind_address; + } + + if ! $public_interface { + fail('public_interface must be defined for multi host compute nodes') + } + + $enable_nova_net = true + + if $auto_assign_floating_ip { + nova_config { 'DEFAULT/auto_assign_floating_ip': value => 'True' } + } + } + +# Stub for networking-refresh that is needed by Nova::Network/Nova::Generic_service[network] +# We do not need it due to l23network is doing all stuff +# BTW '/sbin/ifdown -a ; /sbin/ifup -a' does not work on CentOS + exec { 'networking-refresh': + command => '/bin/echo "networking-refresh has been refreshed"', + refreshonly => true, + } + +# Stubs for nova_paste_api_ini + exec { 'post-nova_config': + command => '/bin/echo "Nova config has changed"', + refreshonly => true, + } + +# Stubs for nova_network + file { '/etc/nova/nova.conf': + ensure => 'present', + } + +# Stubs for nova-api + package { 'nova-common': + name => 'binutils', + ensure => 'installed', + } + + if $::operatingsystem == 'Ubuntu' { + tweaks::ubuntu_service_override { 'nova-network': + package_name => 'nova-network', + } + } + + class { 'nova::network': + ensure_package => 'installed', + private_interface => $private_interface, + public_interface => $public_interface, + fixed_range => $fixed_range, + floating_range => false, + network_manager => $network_manager, + config_overrides => $network_config, + create_networks => $create_networks, + num_networks => $num_networks, + network_size => $network_size, + dns1 => $dns_nameservers[0], + dns2 => $dns_nameservers[1], + enabled => $enable_nova_net, + install_service => $enable_nova_net, + } +#NOTE(aglarendil): lp/1381164 + nova_config { 'DEFAULT/force_snat_range': value => '0.0.0.0/0' } + +} diff --git a/f2s/resources/openstack-network-compute-nova/meta.yaml b/f2s/resources/openstack-network-compute-nova/meta.yaml new file mode 100644 index 00000000..fda3312e --- /dev/null +++ b/f2s/resources/openstack-network-compute-nova/meta.yaml @@ -0,0 +1,10 @@ +id: openstack-network-compute-nova +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/openstack-network-networks/actions/run.pp b/f2s/resources/openstack-network-networks/actions/run.pp new file mode 100644 index 00000000..a61d775b --- /dev/null +++ b/f2s/resources/openstack-network-networks/actions/run.pp @@ -0,0 +1,106 @@ +notice('MODULAR: openstack-network/networks.pp') + +if hiera('use_neutron', false) { + $access_hash = hiera('access', { }) + $keystone_admin_tenant = $access_hash['tenant'] + $neutron_config = hiera_hash('neutron_config') + $floating_net = try_get_value($neutron_config, 'default_floating_net', 'net04_ext') + $private_net = try_get_value($neutron_config, 'default_private_net', 'net04') + $default_router = try_get_value($neutron_config, 'default_router', 'router04') + $segmentation_type = try_get_value($neutron_config, 'L2/segmentation_type') + $nets = $neutron_config['predefined_networks'] + + if $segmentation_type == 'vlan' { + $network_type = 'vlan' + $segmentation_id_range = split(try_get_value($neutron_config, 'L2/phys_nets/physnet2/vlan_range', ''), ':') + } elsif $segmentation_type == 'gre' { + $network_type = 'gre' + $segmentation_id_range = split(try_get_value($neutron_config, 'L2/tunnel_id_ranges', ''), ':') + } else { + $network_type = 'vxlan' + $segmentation_id_range = split(try_get_value($neutron_config, 'L2/tunnel_id_ranges', ''), ':') + } + + $fallback_segment_id = $segmentation_id_range[0] + $private_net_segment_id = try_get_value($nets, "${private_net}/L2/segment_id", $fallback_segment_id) + $private_net_physnet = try_get_value($nets, "${private_net}/L2/physnet", false) + $private_net_shared = try_get_value($nets, "${private_net}/shared", false) + $private_net_router_external = false + $floating_net_physnet = try_get_value($nets, "${floating_net}/L2/physnet", false) + $floating_net_router_external = try_get_value($nets, "${floating_net}/L2/router_ext") + $floating_net_floating_range = try_get_value($nets, "${floating_net}/L3/floating", '') + $floating_net_shared = try_get_value($nets, "${floating_net}/shared", false) + + if !empty($floating_net_floating_range) { + $floating_net_allocation_pool = format_allocation_pools($floating_net_floating_range) + } + + $tenant_name = try_get_value($access_hash, 'tenant', 'admin') + + neutron_network { $floating_net : + ensure => 'present', + provider_physical_network => $floating_net_physnet, + provider_network_type => 'local', + router_external => $floating_net_router_external, + tenant_name => $tenant_name, + shared => $floating_net_shared + } + + neutron_subnet { "${floating_net}__subnet" : + ensure => 'present', + cidr => try_get_value($nets, "${floating_net}/L3/subnet"), + network_name => $floating_net, + tenant_name => $tenant_name, + gateway_ip => try_get_value($nets, "${floating_net}/L3/gateway"), + enable_dhcp => false, + allocation_pools => $floating_net_allocation_pool, + } + + neutron_network { $private_net : + ensure => 'present', + provider_physical_network => $private_net_physnet, + provider_network_type => $network_type, + provider_segmentation_id => $private_net_segment_id, + router_external => $private_net_router_external, + tenant_name => $tenant_name, + shared => $private_net_shared + } + + neutron_subnet { "${private_net}__subnet" : + ensure => 'present', + cidr => try_get_value($nets, "${private_net}/L3/subnet"), + network_name => $private_net, + tenant_name => $tenant_name, + gateway_ip => try_get_value($nets, "${private_net}/L3/gateway"), + enable_dhcp => true, + dns_nameservers => try_get_value($nets, "${private_net}/L3/nameservers"), + } + + if has_key($nets, 'baremetal') { + $baremetal_physnet = try_get_value($nets, 'baremetal/L2/physnet', false) + $baremetal_segment_id = try_get_value($nets, 'baremetal/L2/segment_id') + $baremetal_router_external = try_get_value($nets, 'baremetal/L2/router_ext') + $baremetal_shared = try_get_value($nets, 'baremetal/shared', false) + + neutron_network { 'baremetal' : + ensure => 'present', + provider_physical_network => $baremetal_physnet, + provider_network_type => 'flat', + provider_segmentation_id => $baremetal_segment_id, + router_external => $baremetal_router_external, + tenant_name => $tenant_name, + shared => $baremetal_shared + } + + neutron_subnet { 'baremetal__subnet' : + ensure => 'present', + cidr => try_get_value($nets, 'baremetal/L3/subnet'), + network_name => 'baremetal', + tenant_name => $tenant_name, + gateway_ip => try_get_value($nets, 'baremetal/L3/gateway'), + enable_dhcp => true, + dns_nameservers => try_get_value($nets, 'baremetal/L3/nameservers'), + } + } + +} diff --git a/f2s/resources/openstack-network-networks/meta.yaml b/f2s/resources/openstack-network-networks/meta.yaml new file mode 100644 index 00000000..ff50ce0a --- /dev/null +++ b/f2s/resources/openstack-network-networks/meta.yaml @@ -0,0 +1,18 @@ +id: openstack-network-networks +handler: puppetv2 +version: '8.0' +inputs: + access: + value: null + fqdn: + value: null + neutron_config: + value: null + primary_controller: + value: null + puppet_modules: + value: null + role: + value: null + use_neutron: + value: null diff --git a/f2s/resources/openstack-network-plugins-l2/actions/run.pp b/f2s/resources/openstack-network-plugins-l2/actions/run.pp new file mode 100644 index 00000000..2c0ecf02 --- /dev/null +++ b/f2s/resources/openstack-network-plugins-l2/actions/run.pp @@ -0,0 +1,171 @@ +notice('MODULAR: openstack-network/plugins/ml2.pp') + +$use_neutron = hiera('use_neutron', false) + +class neutron {} +class { 'neutron' :} + +if $use_neutron { + include ::neutron::params + + $role = hiera('role') + $controller = $role in ['controller', 'primary-controller'] + $primary_controller = $role in ['primary-controller'] + $compute = $role in ['compute'] + + $neutron_config = hiera_hash('neutron_config') + $neutron_server_enable = pick($neutron_config['neutron_server_enable'], true) + + $management_vip = hiera('management_vip') + $service_endpoint = hiera('service_endpoint', $management_vip) + $auth_api_version = 'v2.0' + $identity_uri = "http://${service_endpoint}:5000" + $auth_url = "${identity_uri}/${auth_api_version}" + $auth_password = $neutron_config['keystone']['admin_password'] + $auth_user = pick($neutron_config['keystone']['admin_user'], 'neutron') + $auth_tenant = pick($neutron_config['keystone']['admin_tenant'], 'services') + $auth_region = hiera('region', 'RegionOne') + $auth_endpoint_type = 'internalURL' + + $network_scheme = hiera_hash('network_scheme') + prepare_network_config($network_scheme) + + $neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) + $l2_population = try_get_value($neutron_advanced_config, 'neutron_l2_pop', false) + $dvr = try_get_value($neutron_advanced_config, 'neutron_dvr', false) + $segmentation_type = try_get_value($neutron_config, 'L2/segmentation_type') + + if $segmentation_type == 'vlan' { + $net_role_property = 'neutron/private' + $iface = get_network_role_property($net_role_property, 'phys_dev') + $physical_net_mtu = pick(get_transformation_property('mtu', $iface[0]), '1500') + $overlay_net_mtu = $physical_net_mtu + $enable_tunneling = false + $network_vlan_ranges_physnet2 = try_get_value($neutron_config, 'L2/phys_nets/physnet2/vlan_range') + $network_vlan_ranges = ["physnet2:${network_vlan_ranges_physnet2}"] + $physnet2_bridge = try_get_value($neutron_config, 'L2/phys_nets/physnet2/bridge') + $physnet2 = "physnet2:${physnet2_bridge}" + $physnet_ironic_bridge = try_get_value($neutron_config, 'L2/phys_nets/physnet-ironic/bridge', false) + + if $physnet_ironic_bridge { + $physnet_ironic = "physnet-ironic:${physnet_ironic_bridge}" + }else { + $physnet_ironic = [] + } + + $physnets_array = [$physnet2, $physnet_ironic] + $bridge_mappings = delete_undef_values($physnets_array) + $physical_network_mtus = ["physnet2:${physical_net_mtu}"] + $tunnel_id_ranges = [] + $network_type = 'vlan' + } else { + $net_role_property = 'neutron/mesh' + $tunneling_ip = get_network_role_property($net_role_property, 'ipaddr') + $iface = get_network_role_property($net_role_property, 'phys_dev') + $physical_net_mtu = pick(get_transformation_property('mtu', $iface[0]), '1500') + $tunnel_id_ranges = [try_get_value($neutron_config, 'L2/tunnel_id_ranges')] + $network_vlan_ranges = [] + $physical_network_mtus = [] + + if $segmentation_type == 'gre' { + $mtu_offset = '42' + $network_type = 'gre' + } else { + # vxlan is the default segmentation type for non-vlan cases + $mtu_offset = '50' + $network_type = 'vxlan' + } + + if $physical_net_mtu { + $overlay_net_mtu = $physical_net_mtu - $mtu_offset + } else { + $overlay_net_mtu = '1500' - $mtu_offset + } + + $enable_tunneling = true + $tunnel_types = [$network_type] + } + + $type_drivers = ['local', 'flat', 'vlan', 'gre', 'vxlan'] + $tenant_network_types = ['flat', $network_type] + $mechanism_drivers = split(try_get_value($neutron_config, 'L2/mechanism_drivers', 'openvswitch,l2population'), ',') + $flat_networks = ['*'] + $vxlan_group = '224.0.0.1' + + class { 'neutron::plugins::ml2': + type_drivers => $type_drivers, + tenant_network_types => $tenant_network_types, + mechanism_drivers => $mechanism_drivers, + flat_networks => $flat_networks, + network_vlan_ranges => $network_vlan_ranges, + tunnel_id_ranges => $tunnel_id_ranges, + vxlan_group => $vxlan_group, + vni_ranges => $tunnel_id_ranges, + physical_network_mtus => $physical_network_mtus, + path_mtu => $overlay_net_mtu, + } + + class { 'neutron::agents::ml2::ovs': + bridge_mappings => $bridge_mappings, + enable_tunneling => $enable_tunneling, + local_ip => $tunneling_ip, + tunnel_types => $tunnel_types, + enable_distributed_routing => $dvr, + l2_population => $l2_population, + arp_responder => $l2_population, + manage_vswitch => false, + manage_service => true, + enabled => true, + } + + # Synchronize database after plugin was configured + if $primary_controller { + include ::neutron::db::sync + } + + if ! $compute { + if $neutron_server_enable { + $service_ensure = 'running' + } else { + $service_ensure = 'stopped' + } + service { 'neutron-server': + name => $::neutron::params::server_service, + enable => $neutron_server_enable, + ensure => $service_ensure, + hasstatus => true, + hasrestart => true, + tag => 'neutron-service', + } -> + exec { 'waiting-for-neutron-api': + environment => [ + "OS_TENANT_NAME=${auth_tenant}", + "OS_USERNAME=${auth_user}", + "OS_PASSWORD=${auth_password}", + "OS_AUTH_URL=${auth_url}", + "OS_REGION_NAME=${auth_region}", + "OS_ENDPOINT_TYPE=${auth_endpoint_type}", + ], + path => '/usr/sbin:/usr/bin:/sbin:/bin', + tries => '30', + try_sleep => '4', + command => 'neutron net-list --http-timeout=4 2>&1 > /dev/null', + provider => 'shell' + } + + $ha_agent = try_get_value($neutron_advanced_config, 'l2_agent_ha', true) + if $ha_agent { + #Exec<| title == 'waiting-for-neutron-api' |> -> + class { 'cluster::neutron::ovs' : + primary => $primary_controller, + } + } + } + + # Stub for upstream neutron manifests + package { 'neutron': + name => 'binutils', + ensure => 'installed', + } + +} diff --git a/f2s/resources/openstack-network-plugins-l2/meta.yaml b/f2s/resources/openstack-network-plugins-l2/meta.yaml new file mode 100644 index 00000000..1e9bfe50 --- /dev/null +++ b/f2s/resources/openstack-network-plugins-l2/meta.yaml @@ -0,0 +1,26 @@ +id: openstack-network-plugins-l2 +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + management_vip: + value: null + network_scheme: + value: null + neutron_advanced_configuration: + value: null + neutron_config: + value: null + puppet_modules: + value: null + quantum_settings: + value: null + region: + value: null + role: + value: null + service_endpoint: + value: null + use_neutron: + value: null diff --git a/f2s/resources/openstack-network-routers/actions/run.pp b/f2s/resources/openstack-network-routers/actions/run.pp new file mode 100644 index 00000000..7d2eb7cb --- /dev/null +++ b/f2s/resources/openstack-network-routers/actions/run.pp @@ -0,0 +1,32 @@ +notice('MODULAR: openstack-network/routers.pp') + +$use_neutron = hiera('use_neutron', false) + +if $use_neutron { + + $access_hash = hiera('access', { }) + $keystone_admin_tenant = pick($access_hash['tenant'], 'admin') + $neutron_config = hiera_hash('neutron_config') + $floating_net = try_get_value($neutron_config, 'default_floating_net', 'net04_ext') + $private_net = try_get_value($neutron_config, 'default_private_net', 'net04') + $default_router = try_get_value($neutron_config, 'default_router', 'router04') + $nets = $neutron_config['predefined_networks'] + + neutron_router { $default_router: + ensure => 'present', + gateway_network_name => $floating_net, + name => $default_router, + tenant_name => $keystone_admin_tenant, + } -> + + neutron_router_interface { "${default_router}:${private_net}__subnet": + ensure => 'present', + } + + if has_key($nets, 'baremetal') { + neutron_router_interface { "${default_router}:baremetal__subnet": + ensure => 'present', + require => Neutron_router[$default_router] + } + } +} diff --git a/f2s/resources/openstack-network-routers/meta.yaml b/f2s/resources/openstack-network-routers/meta.yaml new file mode 100644 index 00000000..2902337c --- /dev/null +++ b/f2s/resources/openstack-network-routers/meta.yaml @@ -0,0 +1,18 @@ +id: openstack-network-routers +handler: puppetv2 +version: '8.0' +inputs: + access: + value: null + fqdn: + value: null + neutron_config: + value: null + primary_controller: + value: null + puppet_modules: + value: null + role: + value: null + use_neutron: + value: null diff --git a/f2s/resources/openstack-network-server-config/actions/run.pp b/f2s/resources/openstack-network-server-config/actions/run.pp new file mode 100644 index 00000000..ad088e61 --- /dev/null +++ b/f2s/resources/openstack-network-server-config/actions/run.pp @@ -0,0 +1,95 @@ +notice('MODULAR: openstack-network/server-config.pp') + +$use_neutron = hiera('use_neutron', false) + +class neutron { } +class { 'neutron' : } + +if $use_neutron { + + $neutron_config = hiera_hash('neutron_config') + $neutron_server_enable = pick($neutron_config['neutron_server_enable'], true) + $database_vip = hiera('database_vip') + $management_vip = hiera('management_vip') + $service_endpoint = hiera('service_endpoint', $management_vip) + $nova_endpoint = hiera('nova_endpoint', $management_vip) + $nova_hash = hiera_hash('nova', { }) + $primary_controller = hiera('primary_controller', false) + + $neutron_db_password = $neutron_config['database']['passwd'] + $neutron_db_user = try_get_value($neutron_config, 'database/user', 'neutron') + $neutron_db_name = try_get_value($neutron_config, 'database/name', 'neutron') + $neutron_db_host = try_get_value($neutron_config, 'database/host', $database_vip) + + $neutron_db_uri = "mysql://${neutron_db_user}:${neutron_db_password}@${neutron_db_host}/${neutron_db_name}?&read_timeout=60" + + $auth_password = $neutron_config['keystone']['admin_password'] + $auth_user = pick($neutron_config['keystone']['admin_user'], 'neutron') + $auth_tenant = pick($neutron_config['keystone']['admin_tenant'], 'services') + $auth_region = hiera('region', 'RegionOne') + $auth_endpoint_type = 'internalURL' + + $auth_api_version = 'v2.0' + $identity_uri = "http://${service_endpoint}:5000/" + #$auth_url = "${identity_uri}${auth_api_version}" + $nova_admin_auth_url = "http://${service_endpoint}:35357/" + $nova_url = "http://${nova_endpoint}:8774/v2" + + $service_workers = pick($neutron_config['workers'], min(max($::processorcount, 2), 16)) + + $neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) + $dvr = pick($neutron_advanced_config['neutron_dvr'], false) + + $nova_auth_user = pick($nova_hash['user'], 'nova') + $nova_auth_password = $nova_hash['user_password'] + $nova_auth_tenant = pick($nova_hash['tenant'], 'services') + + class { 'neutron::server': + sync_db => false, + + auth_password => $auth_password, + auth_tenant => $auth_tenant, + auth_region => $auth_region, + auth_user => $auth_user, + identity_uri => $identity_uri, + auth_uri => $identity_uri, + + database_retry_interval => '2', + database_connection => $neutron_db_uri, + database_max_retries => '-1', + + agent_down_time => '30', + allow_automatic_l3agent_failover => true, + + api_workers => $service_workers, + rpc_workers => $service_workers, + + router_distributed => $dvr, + enabled => false, #$neutron_server_enable, + manage_service => true, + } + + include neutron::params + tweaks::ubuntu_service_override { "$::neutron::params::server_service": + package_name => $neutron::params::server_package ? { + false => $neutron::params::package_name, + default => $neutron::params::server_package + } + } + + class { 'neutron::server::notifications': + nova_url => $nova_url, + auth_url => $nova_admin_auth_url, + username => $nova_auth_user, + tenant_name => $nova_auth_tenant, + password => $nova_auth_password, + region_name => $auth_region, + } + + # Stub for Nuetron package + package { 'neutron': + name => 'binutils', + ensure => 'installed', + } + +} diff --git a/f2s/resources/openstack-network-server-config/meta.yaml b/f2s/resources/openstack-network-server-config/meta.yaml new file mode 100644 index 00000000..a75a3086 --- /dev/null +++ b/f2s/resources/openstack-network-server-config/meta.yaml @@ -0,0 +1,30 @@ +id: openstack-network-server-config +handler: puppetv2 +version: '8.0' +inputs: + database_vip: + value: null + fqdn: + value: null + management_vip: + value: null + neutron_advanced_configuration: + value: null + neutron_config: + value: null + nova: + value: null + nova_endpoint: + value: null + primary_controller: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null + service_endpoint: + value: null + use_neutron: + value: null diff --git a/f2s/resources/openstack-network-server-nova/actions/run.pp b/f2s/resources/openstack-network-server-nova/actions/run.pp new file mode 100644 index 00000000..4640a581 --- /dev/null +++ b/f2s/resources/openstack-network-server-nova/actions/run.pp @@ -0,0 +1,81 @@ +notice('MODULAR: openstack-network/server-nova.pp') + +$use_neutron = hiera('use_neutron', false) + +if $use_neutron { + $neutron_config = hiera_hash('neutron_config') + $management_vip = hiera('management_vip') + $service_endpoint = hiera('service_endpoint', $management_vip) + $neutron_endpoint = hiera('neutron_endpoint', $management_vip) + $admin_password = try_get_value($neutron_config, 'keystone/admin_password') + $admin_tenant_name = try_get_value($neutron_config, 'keystone/admin_tenant', 'services') + $admin_username = try_get_value($neutron_config, 'keystone/admin_user', 'neutron') + $region_name = hiera('region', 'RegionOne') + $auth_api_version = 'v2.0' + $admin_identity_uri = "http://${service_endpoint}:35357" + $admin_auth_url = "${admin_identity_uri}/${auth_api_version}" + $neutron_url = "http://${neutron_endpoint}:9696" + $neutron_ovs_bridge = 'br-int' + $conf_nova = pick($neutron_config['conf_nova'], true) + $floating_net = pick($neutron_config['default_floating_net'], 'net04_ext') + + class { 'nova::network::neutron' : + neutron_admin_password => $admin_password, + neutron_admin_tenant_name => $admin_tenant_name, + neutron_region_name => $region_name, + neutron_admin_username => $admin_username, + neutron_admin_auth_url => $admin_auth_url, + neutron_url => $neutron_url, + neutron_ovs_bridge => $neutron_ovs_bridge, + } + + if $conf_nova { + include nova::params + service { 'nova-api': + ensure => 'running', + name => $nova::params::api_service_name, + } + + nova_config { 'DEFAULT/default_floating_pool': value => $floating_net } + Nova_config<| |> ~> Service['nova-api'] + } + +} else { + + $ensure_package = 'installed' + $private_interface = hiera('private_int', undef) + $public_interface = hiera('public_int', undef) + $fixed_range = hiera('fixed_network_range', undef) + $network_manager = hiera('network_manager', undef) + $network_config = hiera('network_config', { }) + $create_networks = true + $num_networks = hiera('num_networks', undef) + $network_size = hiera('network_size', undef) + $nameservers = hiera('dns_nameservers', undef) + $enable_nova_net = false + + class { 'nova::network' : + ensure_package => $ensure_package, + private_interface => $private_interface, + public_interface => $public_interface, + fixed_range => $fixed_range, + floating_range => false, + network_manager => $network_manager, + config_overrides => $network_config, + create_networks => $create_networks, + num_networks => $num_networks, + network_size => $network_size, + dns1 => $nameservers[0], + dns2 => $nameservers[1], + enabled => $enable_nova_net, + install_service => false, # bacause controller + } + + # NOTE(aglarendil): lp/1381164 + nova_config { 'DEFAULT/force_snat_range' : value => '0.0.0.0/0' } + +# ========================================================================= + + file { '/etc/nova/nova.conf' : ensure => 'present' } + +} diff --git a/f2s/resources/openstack-network-server-nova/meta.yaml b/f2s/resources/openstack-network-server-nova/meta.yaml new file mode 100644 index 00000000..a4fe1708 --- /dev/null +++ b/f2s/resources/openstack-network-server-nova/meta.yaml @@ -0,0 +1,22 @@ +id: openstack-network-server-nova +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + management_vip: + value: null + neutron_config: + value: null + neutron_endpoint: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null + service_endpoint: + value: null + use_neutron: + value: null diff --git a/f2s/resources/pre_hiera_config/actions/run.pp b/f2s/resources/pre_hiera_config/actions/run.pp new file mode 100644 index 00000000..e23a1cb7 --- /dev/null +++ b/f2s/resources/pre_hiera_config/actions/run.pp @@ -0,0 +1,75 @@ +notice('MODULAR: hiera.pp') + +$deep_merge_package_name = $::osfamily ? { + /RedHat/ => 'rubygem-deep_merge', + /Debian/ => 'ruby-deep-merge', +} + +$data_dir = '/etc/hiera' +$data = [ + 'override/node/%{::fqdn}', + 'override/class/%{calling_class}', + 'override/module/%{calling_module}', + 'override/plugins', + 'override/common', + 'class/%{calling_class}', + 'module/%{calling_module}', + 'nodes', + 'globals', + 'astute' +] +$astute_data_file = '/etc/astute.yaml' +$hiera_main_config = '/etc/hiera.yaml' +$hiera_puppet_config = '/etc/puppet/hiera.yaml' +$hiera_data_file = "${data_dir}/astute.yaml" + +File { + owner => 'root', + group => 'root', + mode => '0644', +} + +$hiera_config_content = inline_template(' +--- +:backends: + - yaml + +:hierarchy: +<% @data.each do |name| -%> + - <%= name %> +<% end -%> + +:yaml: + :datadir: <%= @data_dir %> +:merge_behavior: deeper +:logger: noop +') + +file { 'hiera_data_dir' : + ensure => 'directory', + path => $data_dir, +} + +file { 'hiera_config' : + ensure => 'present', + path => $hiera_main_config, + content => $hiera_config_content, +} + +file { 'hiera_data_astute' : + ensure => 'symlink', + path => $hiera_data_file, + target => $astute_data_file, +} + +file { 'hiera_puppet_config' : + ensure => 'symlink', + path => $hiera_puppet_config, + target => $hiera_main_config, +} + +# needed to support the 'deeper' merge_behavior setting for hiera +package { 'rubygem-deep_merge': + ensure => present, + name => $deep_merge_package_name, +} diff --git a/f2s/resources/pre_hiera_config/meta.yaml b/f2s/resources/pre_hiera_config/meta.yaml new file mode 100644 index 00000000..3eada3fc --- /dev/null +++ b/f2s/resources/pre_hiera_config/meta.yaml @@ -0,0 +1,8 @@ +id: pre_hiera_config +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null diff --git a/f2s/resources/public_vip_ping/actions/run.pp b/f2s/resources/public_vip_ping/actions/run.pp new file mode 100644 index 00000000..d8f2eaea --- /dev/null +++ b/f2s/resources/public_vip_ping/actions/run.pp @@ -0,0 +1,17 @@ +notice('MODULAR: public_vip_ping.pp') + +prepare_network_config(hiera('network_scheme', {})) +$run_ping_checker = hiera('run_ping_checker', true) +$network_scheme = hiera('network_scheme') +$public_iface = get_network_role_property('public/vip', 'interface') +$ping_host_list = $network_scheme['endpoints'][$public_iface]['gateway'] + +if $run_ping_checker { + $vip = 'vip__public' + + cluster::virtual_ip_ping { $vip : + host_list => $ping_host_list, + } + +} + diff --git a/f2s/resources/public_vip_ping/meta.yaml b/f2s/resources/public_vip_ping/meta.yaml new file mode 100644 index 00000000..e4f7bec4 --- /dev/null +++ b/f2s/resources/public_vip_ping/meta.yaml @@ -0,0 +1,14 @@ +id: public_vip_ping +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + network_scheme: + value: null + puppet_modules: + value: null + role: + value: null + run_ping_checker: + value: null diff --git a/f2s/resources/rabbitmq/actions/run.pp b/f2s/resources/rabbitmq/actions/run.pp new file mode 100644 index 00000000..6856787c --- /dev/null +++ b/f2s/resources/rabbitmq/actions/run.pp @@ -0,0 +1,165 @@ +notice('MODULAR: rabbitmq.pp') + +$network_scheme = hiera_hash('network_scheme', {}) +prepare_network_config($network_scheme) + +$queue_provider = hiera('queue_provider', 'rabbitmq') + +if $queue_provider == 'rabbitmq' { + $erlang_cookie = hiera('erlang_cookie', 'EOKOWXQREETZSHFNTPEY') + $version = hiera('rabbit_version', '3.3.5') + $deployment_mode = hiera('deployment_mode', 'ha_compact') + $amqp_port = hiera('amqp_port', '5673') + $rabbit_hash = hiera_hash('rabbit_hash', + { + 'user' => false, + 'password' => false, + } + ) + $debug = pick($rabbit_hash['debug'], hiera('debug', false)) + $enabled = pick($rabbit_hash['enabled'], true) + $use_pacemaker = pick($rabbit_hash['pacemaker'], true) + + case $::osfamily { + 'RedHat': { + $command_timeout = "'-s KILL'" + $package_provider = 'yum' + } + 'Debian': { + $command_timeout = "'--signal=KILL'" + $package_provider = 'apt' + } + default: { + fail("Unsupported osfamily: ${::osfamily} operatingsystem: ${::operatingsystem},\ + module ${module_name} only support osfamily RedHat and Debian") + } + } + + if ($debug) { + # FIXME(aschultz): debug wasn't introduced until v3.5.0, when we upgrade + # we should change info to debug. Also don't forget to fix tests! + $rabbit_levels = '[{connection,info}]' + } else { + $rabbit_levels = '[{connection,info}]' + } + + $cluster_partition_handling = hiera('rabbit_cluster_partition_handling', 'autoheal') + $mnesia_table_loading_timeout = hiera('mnesia_table_loading_timeout', '10000') + $rabbitmq_bind_ip_address = pick(get_network_role_property('mgmt/messaging', 'ipaddr'), 'UNSET') + + # NOTE(bogdando) not a hash. Keep an indentation as is + $rabbit_tcp_listen_options = hiera('rabbit_tcp_listen_options', + '[ + binary, + {packet, raw}, + {reuseaddr, true}, + {backlog, 128}, + {nodelay, true}, + {exit_on_close, false}, + {keepalive, true} + ]' + ) + $config_kernel_variables = hiera('rabbit_config_kernel_variables', + { + 'inet_dist_listen_min' => '41055', + 'inet_dist_listen_max' => '41055', + 'inet_default_connect_options' => '[{nodelay,true}]', + 'net_ticktime' => '10', + } + ) + $config_variables = hiera('rabbit_config_variables', + { + 'log_levels' => $rabbit_levels, + 'default_vhost' => "<<\"/\">>", + 'default_permissions' => '[<<".*">>, <<".*">>, <<".*">>]', + 'tcp_listen_options' => $rabbit_tcp_listen_options, + 'cluster_partition_handling' => $cluster_partition_handling, + 'mnesia_table_loading_timeout' => $mnesia_table_loading_timeout, + 'collect_statistics_interval' => '30000', + 'disk_free_limit' => '5000000', # Corosync checks for disk space, reduce rabbitmq check to 5M see LP#1493520 comment #15 + } + ) + $config_rabbitmq_management_variables = hiera('rabbit_config_management_variables', + { + 'rates_mode' => 'none' + } + ) + + $thread_pool_calc = min(100,max(12*$physicalprocessorcount,30)) + + if $deployment_mode == 'ha_compact' { + $rabbit_pid_file = '/var/run/rabbitmq/p_pid' + } else { + $rabbit_pid_file = '/var/run/rabbitmq/pid' + } + $environment_variables = hiera('rabbit_environment_variables', + { + 'SERVER_ERL_ARGS' => "\"+K true +A${thread_pool_calc} +P 1048576\"", + 'PID_FILE' => $rabbit_pid_file, + } + ) + + if ($enabled) { + class { '::rabbitmq': + admin_enable => true, + repos_ensure => false, + package_provider => $package_provider, + package_source => undef, + service_ensure => 'running', + service_manage => true, + port => $amqp_port, + delete_guest_user => true, + default_user => $rabbit_hash['user'], + default_pass => $rabbit_hash['password'], + # NOTE(bogdando) set to true and uncomment the lines below, if puppet should create a cluster + # We don't want it as far as OCF script creates the cluster + config_cluster => false, + #erlang_cookie => $erlang_cookie, + #wipe_db_on_cookie_change => true, + #cluster_nodes => $rabbitmq_cluster_nodes, + #cluster_node_type => 'disc', + #cluster_partition_handling => $cluster_partition_handling, + version => $version, + node_ip_address => $rabbitmq_bind_ip_address, + config_kernel_variables => $config_kernel_variables, + config_rabbitmq_management_variables => $config_rabbitmq_management_variables, + config_variables => $config_variables, + environment_variables => $environment_variables, + } + + if ($use_pacemaker) { + # Install rabbit-fence daemon + class { 'cluster::rabbitmq_fence': + enabled => $enabled, + require => Class['::rabbitmq'] + } + } + + class { 'nova::rabbitmq': + enabled => $enabled, + # Do not install rabbitmq from nova classes + rabbitmq_class => false, + userid => $rabbit_hash['user'], + password => $rabbit_hash['password'], + require => Class['::rabbitmq'], + } + + if ($use_pacemaker) { + class { 'pacemaker_wrappers::rabbitmq': + command_timeout => $command_timeout, + debug => $debug, + erlang_cookie => $erlang_cookie, + admin_user => $rabbit_hash['user'], + admin_pass => $rabbit_hash['password'], + before => Class['nova::rabbitmq'], + } + } + + include rabbitmq::params + tweaks::ubuntu_service_override { 'rabbitmq-server': + package_name => $rabbitmq::params::package_name, + service_name => $rabbitmq::params::service_name, + } + } + +} diff --git a/f2s/resources/rabbitmq/meta.yaml b/f2s/resources/rabbitmq/meta.yaml new file mode 100644 index 00000000..7438c205 --- /dev/null +++ b/f2s/resources/rabbitmq/meta.yaml @@ -0,0 +1,40 @@ +id: rabbitmq +handler: puppetv2 +version: '8.0' +inputs: + amqp_port: + value: null + debug: + value: null + deployment_mode: + value: null + erlang_cookie: + value: null + fqdn: + value: null + mnesia_table_loading_timeout: + value: null + network_scheme: + value: null + puppet_modules: + value: null + queue_provider: + value: null + rabbit_cluster_partition_handling: + value: null + rabbit_config_kernel_variables: + value: null + rabbit_config_management_variables: + value: null + rabbit_config_variables: + value: null + rabbit_environment_variables: + value: null + rabbit_hash: + value: null + rabbit_tcp_listen_options: + value: null + rabbit_version: + value: null + role: + value: null diff --git a/f2s/resources/sahara-db/actions/run.pp b/f2s/resources/sahara-db/actions/run.pp new file mode 100644 index 00000000..68501ffc --- /dev/null +++ b/f2s/resources/sahara-db/actions/run.pp @@ -0,0 +1,57 @@ +notice('MODULAR: sahara/db.pp') + +$node_name = hiera('node_name') +$sahara_hash = hiera_hash('sahara_hash', {}) +$sahara_enabled = pick($sahara_hash['enabled'], false) +$mysql_hash = hiera_hash('mysql_hash', {}) +$management_vip = hiera('management_vip', undef) +$database_vip = hiera('database_vip', undef) + +$mysql_root_user = pick($mysql_hash['root_user'], 'root') +$mysql_db_create = pick($mysql_hash['db_create'], true) +$mysql_root_password = $mysql_hash['root_password'] + +$db_user = pick($sahara_hash['db_user'], 'sahara') +$db_name = pick($sahara_hash['db_name'], 'sahara') +$db_password = pick($sahara_hash['db_password'], $mysql_root_password) + +$db_host = pick($sahara_hash['db_host'], $database_vip) +$db_create = pick($sahara_hash['db_create'], $mysql_db_create) +$db_root_user = pick($sahara_hash['root_user'], $mysql_root_user) +$db_root_password = pick($sahara_hash['root_password'], $mysql_root_password) + +$allowed_hosts = [ $node_name, 'localhost', '127.0.0.1', '%' ] + +validate_string($mysql_root_user) + +if $sahara_enabled and $db_create { + + class { 'galera::client': + custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), + } + + class { 'sahara::db::mysql': + user => $db_user, + password => $db_password, + dbname => $db_name, + allowed_hosts => $allowed_hosts, + } + + class { 'osnailyfacter::mysql_access': + db_host => $db_host, + db_user => $db_root_user, + db_password => $db_root_password, + } + + Class['galera::client'] -> + Class['osnailyfacter::mysql_access'] -> + Class['sahara::db::mysql'] + +} + +class mysql::config {} +include mysql::config +class mysql::server {} +include mysql::server +class sahara::api {} +include sahara::api diff --git a/f2s/resources/sahara-db/meta.yaml b/f2s/resources/sahara-db/meta.yaml new file mode 100644 index 00000000..0e3b85d4 --- /dev/null +++ b/f2s/resources/sahara-db/meta.yaml @@ -0,0 +1,22 @@ +id: sahara-db +handler: puppetv2 +version: '8.0' +inputs: + database_vip: + value: null + fqdn: + value: null + management_vip: + value: null + mysql_hash: + value: null + node_name: + value: null + puppet_modules: + value: null + role: + value: null + sahara: + value: null + sahara_hash: + value: null diff --git a/f2s/resources/sahara-keystone/actions/run.pp b/f2s/resources/sahara-keystone/actions/run.pp new file mode 100644 index 00000000..546018a0 --- /dev/null +++ b/f2s/resources/sahara-keystone/actions/run.pp @@ -0,0 +1,34 @@ +notice('MODULAR: sahara/keystone.pp') + +$sahara_hash = hiera_hash('sahara_hash', {}) +$public_ssl_hash = hiera('public_ssl') +$public_vip = hiera('public_vip') +$admin_address = hiera('management_vip') +$api_bind_port = '8386' +$sahara_user = pick($sahara_hash['user'], 'sahara') +$sahara_password = pick($sahara_hash['user_password']) +$tenant = pick($sahara_hash['tenant'], 'services') +$region = pick($sahara_hash['region'], hiera('region', 'RegionOne')) +$service_name = pick($sahara_hash['service_name'], 'sahara') +$public_address = $public_ssl_hash['services'] ? { + true => $public_ssl_hash['hostname'], + default => $public_vip, +} +$public_protocol = $public_ssl_hash['services'] ? { + true => 'https', + default => 'http', +} +$public_url = "${public_protocol}://${public_address}:${api_bind_port}/v1.1/%(tenant_id)s" +$admin_url = "http://${admin_address}:${api_bind_port}/v1.1/%(tenant_id)s" + +class { 'sahara::keystone::auth': + auth_name => $sahara_user, + password => $sahara_password, + service_type => 'data_processing', + service_name => $service_name, + region => $region, + tenant => $tenant, + public_url => $public_url, + admin_url => $admin_url, + internal_url => $admin_url, +} diff --git a/f2s/resources/sahara-keystone/meta.yaml b/f2s/resources/sahara-keystone/meta.yaml new file mode 100644 index 00000000..e5716756 --- /dev/null +++ b/f2s/resources/sahara-keystone/meta.yaml @@ -0,0 +1,20 @@ +id: sahara-keystone +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + management_vip: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null + sahara_hash: + value: null diff --git a/f2s/resources/sahara/actions/run.pp b/f2s/resources/sahara/actions/run.pp new file mode 100644 index 00000000..82b1c42b --- /dev/null +++ b/f2s/resources/sahara/actions/run.pp @@ -0,0 +1,156 @@ +notice('MODULAR: sahara.pp') + +prepare_network_config(hiera('network_scheme', {})) + +$access_admin = hiera_hash('access_hash', {}) +$sahara_hash = hiera_hash('sahara_hash', {}) +$rabbit_hash = hiera_hash('rabbit_hash', {}) +$public_ssl_hash = hiera('public_ssl') +$ceilometer_hash = hiera_hash('ceilometer_hash', {}) +$primary_controller = hiera('primary_controller') +$public_vip = hiera('public_vip') +$database_vip = hiera('database_vip', undef) +$management_vip = hiera('management_vip') +$use_neutron = hiera('use_neutron', false) +$service_endpoint = hiera('service_endpoint') +$syslog_log_facility_sahara = hiera('syslog_log_facility_sahara') +$debug = pick($sahara_hash['debug'], hiera('debug', false)) +$verbose = pick($sahara_hash['verbose'], hiera('verbose', true)) +$use_syslog = hiera('use_syslog', true) +$use_stderr = hiera('use_stderr', false) +$rabbit_ha_queues = hiera('rabbit_ha_queues') +$amqp_port = hiera('amqp_port') +$amqp_hosts = hiera('amqp_hosts') + +################################################################# + +if $sahara_hash['enabled'] { + $firewall_rule = '201 sahara-api' + $api_bind_port = '8386' + $api_bind_host = get_network_role_property('sahara/api', 'ipaddr') + $public_address = $public_ssl_hash['services'] ? { + true => $public_ssl_hash['hostname'], + default => $public_vip, + } + $public_protocol = $public_ssl_hash['services'] ? { + true => 'https', + default => 'http', + } + $sahara_user = pick($sahara_hash['user'], 'sahara') + $sahara_password = pick($sahara_hash['user_password']) + $tenant = pick($sahara_hash['tenant'], 'services') + $db_user = pick($sahara_hash['db_user'], 'sahara') + $db_name = pick($sahara_hash['db_name'], 'sahara') + $db_password = pick($sahara_hash['db_password']) + $db_host = pick($sahara_hash['db_host'], $database_vip) + $max_pool_size = min($::processorcount * 5 + 0, 30 + 0) + $max_overflow = min($::processorcount * 5 + 0, 60 + 0) + $max_retries = '-1' + $idle_timeout = '3600' + $read_timeout = '60' + $sql_connection = "mysql://${db_user}:${db_password}@${db_host}/${db_name}?read_timeout=${read_timeout}" + + ####### Disable upstart startup on install ####### + tweaks::ubuntu_service_override { 'sahara-api': + package_name => 'sahara', + } + + firewall { $firewall_rule : + dport => $api_bind_port, + proto => 'tcp', + action => 'accept', + } + + class { 'sahara' : + host => $api_bind_host, + port => $api_bind_port, + verbose => $verbose, + debug => $debug, + use_syslog => $use_syslog, + use_stderr => $use_stderr, + plugins => [ 'ambari', 'cdh', 'mapr', 'spark', 'vanilla' ], + log_facility => $syslog_log_facility_sahara, + database_connection => $sql_connection, + database_max_pool_size => $max_pool_size, + database_max_overflow => $max_overflow, + database_max_retries => $max_retries, + database_idle_timeout => $idle_timeout, + auth_uri => "http://${service_endpoint}:5000/v2.0/", + identity_uri => "http://${service_endpoint}:35357/", + rpc_backend => 'rabbit', + use_neutron => $use_neutron, + admin_user => $sahara_user, + admin_password => $sahara_password, + admin_tenant_name => $tenant, + rabbit_userid => $rabbit_hash['user'], + rabbit_password => $rabbit_hash['password'], + rabbit_ha_queues => $rabbit_ha_queues, + rabbit_port => $amqp_port, + rabbit_hosts => split($amqp_hosts, ',') + } + + if $public_ssl_hash['services'] { + file { '/etc/pki/tls/certs': + mode => 755, + } + + file { '/etc/pki/tls/certs/public_haproxy.pem': + mode => 644, + } + + sahara_config { + 'object_store_access/public_identity_ca_file': value => '/etc/pki/tls/certs/public_haproxy.pem'; + 'object_store_access/public_object_store_ca_file': value => '/etc/pki/tls/certs/public_haproxy.pem'; + } + } + + class { 'sahara::service::api': } + + class { 'sahara::service::engine': } + + class { 'sahara::client': } + + if $ceilometer_hash['enabled'] { + class { '::sahara::notify': + enable_notifications => true, + } + } + + $haproxy_stats_url = "http://${management_vip}:10000/;csv" + + haproxy_backend_status { 'sahara' : + name => 'sahara', + url => $haproxy_stats_url, + } + + if $primary_controller { + haproxy_backend_status { 'keystone-public' : + name => 'keystone-1', + url => $haproxy_stats_url, + } + + haproxy_backend_status { 'keystone-admin' : + name => 'keystone-2', + url => $haproxy_stats_url, + } + + class { 'sahara_templates::create_templates' : + use_neutron => $use_neutron, + auth_user => $access_admin['user'], + auth_password => $access_admin['password'], + auth_tenant => $access_admin['tenant'], + auth_uri => "${public_protocol}://${public_address}:5000/v2.0/", + } + + Haproxy_backend_status['keystone-admin'] -> Haproxy_backend_status['sahara'] + Haproxy_backend_status['keystone-public'] -> Haproxy_backend_status['sahara'] + Haproxy_backend_status['sahara'] -> Class['sahara_templates::create_templates'] + } + + Firewall[$firewall_rule] -> Class['sahara::service::api'] + Service['sahara-api'] -> Haproxy_backend_status['sahara'] +} +######################### + +class openstack::firewall {} +include openstack::firewall diff --git a/f2s/resources/sahara/meta.yaml b/f2s/resources/sahara/meta.yaml new file mode 100644 index 00000000..e74714fb --- /dev/null +++ b/f2s/resources/sahara/meta.yaml @@ -0,0 +1,52 @@ +id: sahara +handler: puppetv2 +version: '8.0' +inputs: + access_hash: + value: null + amqp_hosts: + value: null + amqp_port: + value: null + ceilometer_hash: + value: null + database_vip: + value: null + debug: + value: null + fqdn: + value: null + management_vip: + value: null + network_scheme: + value: null + primary_controller: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + rabbit_ha_queues: + value: null + rabbit_hash: + value: null + role: + value: null + sahara: + value: null + sahara_hash: + value: null + service_endpoint: + value: null + syslog_log_facility_sahara: + value: null + use_neutron: + value: null + use_stderr: + value: null + use_syslog: + value: null + verbose: + value: null diff --git a/f2s/resources/ssl-add-trust-chain/actions/run.pp b/f2s/resources/ssl-add-trust-chain/actions/run.pp new file mode 100644 index 00000000..231088d0 --- /dev/null +++ b/f2s/resources/ssl-add-trust-chain/actions/run.pp @@ -0,0 +1,42 @@ +notice('MODULAR: ssl_add_trust_chain.pp') + +$public_ssl_hash = hiera('public_ssl') +$ip = hiera('public_vip') + +case $::osfamily { + /(?i)redhat/: { + file { '/etc/pki/ca-trust/source/anchors/public_haproxy.pem': + ensure => 'link', + target => '/etc/pki/tls/certs/public_haproxy.pem', + }-> + + exec { 'enable_trust': + path => '/bin:/usr/bin:/sbin:/usr/sbin', + command => 'update-ca-trust force-enable', + }-> + + exec { 'add_trust': + path => '/bin:/usr/bin:/sbin:/usr/sbin', + command => 'update-ca-trust extract', + } + } + /(?i)debian/: { + file { '/usr/local/share/ca-certificates/public_haproxy.crt': + ensure => 'link', + target => '/etc/pki/tls/certs/public_haproxy.pem', + }-> + + exec { 'add_trust': + path => '/bin:/usr/bin:/sbin:/usr/sbin', + command => 'update-ca-certificates', + } + } + default: { + fail("Unsupported OS: ${::osfamily}/${::operatingsystem}") + } +} + +host { $public_ssl_hash['hostname']: + ensure => present, + ip => $ip, +} diff --git a/f2s/resources/ssl-add-trust-chain/meta.yaml b/f2s/resources/ssl-add-trust-chain/meta.yaml new file mode 100644 index 00000000..40c9ce36 --- /dev/null +++ b/f2s/resources/ssl-add-trust-chain/meta.yaml @@ -0,0 +1,14 @@ +id: ssl-add-trust-chain +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/ssl-keys-saving/actions/run.pp b/f2s/resources/ssl-keys-saving/actions/run.pp new file mode 100644 index 00000000..862b1f21 --- /dev/null +++ b/f2s/resources/ssl-keys-saving/actions/run.pp @@ -0,0 +1,22 @@ +notice('MODULAR: ssl_keys_saving.pp') + +$public_ssl_hash = hiera_hash('public_ssl') +$pub_certificate_content = $public_ssl_hash['cert_data']['content'] +$base_path = "/etc/pki/tls/certs" +$pki_path = [ "/etc/pki", "/etc/pki/tls" ] +$astute_base_path = "/var/lib/astute/haproxy" + +File { + owner => 'root', + group => 'root', + mode => '0644', +} + +file { [ $pki_path, $base_path, $astute_base_path ]: + ensure => directory, +} + +file { ["$base_path/public_haproxy.pem", "$astute_base_path/public_haproxy.pem"]: + ensure => present, + content => $pub_certificate_content, +} diff --git a/f2s/resources/ssl-keys-saving/meta.yaml b/f2s/resources/ssl-keys-saving/meta.yaml new file mode 100644 index 00000000..e59e90fe --- /dev/null +++ b/f2s/resources/ssl-keys-saving/meta.yaml @@ -0,0 +1,12 @@ +id: ssl-keys-saving +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + public_ssl: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/swift-keystone/actions/run.pp b/f2s/resources/swift-keystone/actions/run.pp new file mode 100644 index 00000000..6e7e5770 --- /dev/null +++ b/f2s/resources/swift-keystone/actions/run.pp @@ -0,0 +1,45 @@ +notice('MODULAR: swift/keystone.pp') + +$swift_hash = hiera_hash('swift', {}) +$public_vip = hiera('public_vip') +$admin_address = hiera('management_vip') +$region = pick($swift_hash['region'], hiera('region', 'RegionOne')) +$public_ssl_hash = hiera('public_ssl') +$public_address = $public_ssl_hash['services'] ? { + true => $public_ssl_hash['hostname'], + default => $public_vip, +} +$public_protocol = $public_ssl_hash['services'] ? { + true => 'https', + default => 'http', +} + +$password = $swift_hash['user_password'] +$auth_name = pick($swift_hash['auth_name'], 'swift') +$configure_endpoint = pick($swift_hash['configure_endpoint'], true) +$service_name = pick($swift_hash['service_name'], 'swift') +$tenant = pick($swift_hash['tenant'], 'services') + +validate_string($public_address) +validate_string($password) + +$public_url = "${public_protocol}://${public_address}:8080/v1/AUTH_%(tenant_id)s" +$admin_url = "http://${admin_address}:8080/v1/AUTH_%(tenant_id)s" + +# Amazon S3 endpoints +$public_url_s3 = "${public_protocol}://${public_address}:8080" +$admin_url_s3 = "http://${admin_address}:8080" + +class { '::swift::keystone::auth': + password => $password, + auth_name => $auth_name, + configure_endpoint => $configure_endpoint, + service_name => $service_name, + public_url => $public_url, + internal_url => $admin_url, + admin_url => $admin_url, + public_url_s3 => $public_url_s3, + internal_url_s3 => $admin_url_s3, + admin_url_s3 => $admin_url_s3, + region => $region, +} diff --git a/f2s/resources/swift-keystone/meta.yaml b/f2s/resources/swift-keystone/meta.yaml new file mode 100644 index 00000000..10f75482 --- /dev/null +++ b/f2s/resources/swift-keystone/meta.yaml @@ -0,0 +1,20 @@ +id: swift-keystone +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + management_vip: + value: null + public_ssl: + value: null + public_vip: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null + swift: + value: null diff --git a/f2s/resources/swift-rebalance-cron/actions/run.pp b/f2s/resources/swift-rebalance-cron/actions/run.pp new file mode 100644 index 00000000..272e8e34 --- /dev/null +++ b/f2s/resources/swift-rebalance-cron/actions/run.pp @@ -0,0 +1,24 @@ +notice('MODULAR: swift/rebalance_cronjob.pp') + +$network_metadata = hiera_hash('network_metadata') +# $network_scheme = hiera_hash('network_scheme') +# prepare_network_config($network_scheme) + +$storage_hash = hiera('storage_hash') +$swift_master_role = hiera('swift_master_role', 'primary-controller') +$ring_min_part_hours = hiera('swift_ring_min_part_hours', 1) + +# Use Swift if it isn't replaced by vCenter, Ceph for BOTH images and objects +if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { + $master_swift_replication_nodes = get_nodes_hash_by_roles($network_metadata, [$swift_master_role]) + $master_swift_replication_nodes_list = values($master_swift_replication_nodes) + $master_swift_replication_ip = $master_swift_replication_nodes_list[0]['network_roles']['swift/replication'] + + + # setup a cronjob to rebalance and repush rings periodically + class { 'openstack::swift::rebalance_cronjob': + ring_rebalance_period => min($ring_min_part_hours * 2, 23), + master_swift_replication_ip => $master_swift_replication_ip, + primary_proxy => hiera('is_primary_swift_proxy'), + } +} diff --git a/f2s/resources/swift-rebalance-cron/meta.yaml b/f2s/resources/swift-rebalance-cron/meta.yaml new file mode 100644 index 00000000..380a46b2 --- /dev/null +++ b/f2s/resources/swift-rebalance-cron/meta.yaml @@ -0,0 +1,20 @@ +id: swift-rebalance-cron +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + is_primary_swift_proxy: + value: null + network_metadata: + value: null + puppet_modules: + value: null + role: + value: null + storage_hash: + value: null + swift_master_role: + value: null + swift_ring_min_part_hours: + value: null diff --git a/f2s/resources/swift/actions/run.pp b/f2s/resources/swift/actions/run.pp new file mode 100644 index 00000000..aec24337 --- /dev/null +++ b/f2s/resources/swift/actions/run.pp @@ -0,0 +1,147 @@ +notice('MODULAR: swift.pp') + +$network_scheme = hiera_hash('network_scheme') +$network_metadata = hiera_hash('network_metadata') +prepare_network_config($network_scheme) + +$swift_hash = hiera_hash('swift_hash') +$swift_master_role = hiera('swift_master_role', 'primary-controller') +$swift_nodes = hiera_hash('swift_nodes', {}) +$swift_operator_roles = pick($swift_hash['swift_operator_roles'], ['admin', 'SwiftOperator']) +$swift_proxies_addr_list = values(get_node_to_ipaddr_map_by_network_role(hiera_hash('swift_proxies', {}), 'swift/api')) +# todo(sv) replace 'management' to mgmt/memcache +$memcaches_addr_list = values(get_node_to_ipaddr_map_by_network_role(hiera_hash('swift_proxy_caches', {}), 'management')) +$is_primary_swift_proxy = hiera('is_primary_swift_proxy', false) +$proxy_port = hiera('proxy_port', '8080') +$storage_hash = hiera_hash('storage_hash') +$mp_hash = hiera('mp') +$management_vip = hiera('management_vip') +$public_vip = hiera('public_vip') +$swift_api_ipaddr = get_network_role_property('swift/api', 'ipaddr') +$swift_storage_ipaddr = get_network_role_property('swift/replication', 'ipaddr') +$debug = pick($swift_hash['debug'], hiera('debug', false)) +$verbose = pick($swift_hash['verbose'], hiera('verbose', false)) +# NOTE(mattymo): Changing ring_part_power or part_hours on redeploy leads to data loss +$ring_part_power = pick($swift_hash['ring_part_power'], 10) +$ring_min_part_hours = hiera('swift_ring_min_part_hours', 1) +$deploy_swift_storage = hiera('deploy_swift_storage', true) +$deploy_swift_proxy = hiera('deploy_swift_proxy', true) +$create_keystone_auth = pick($swift_hash['create_keystone_auth'], true) +#Keystone settings +$service_endpoint = hiera('service_endpoint') +$keystone_user = pick($swift_hash['user'], 'swift') +$keystone_password = pick($swift_hash['user_password'], 'passsword') +$keystone_tenant = pick($swift_hash['tenant'], 'services') +$keystone_protocol = pick($swift_hash['auth_protocol'], 'http') +$region = hiera('region', 'RegionOne') +$service_workers = pick($swift_hash['workers'], + min(max($::processorcount, 2), 16)) + +# Use Swift if it isn't replaced by vCenter, Ceph for BOTH images and objects +if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { + $master_swift_proxy_nodes = get_nodes_hash_by_roles($network_metadata, [$swift_master_role]) + $master_swift_proxy_nodes_list = values($master_swift_proxy_nodes) + $master_swift_proxy_ip = regsubst($master_swift_proxy_nodes_list[0]['network_roles']['swift/api'], '\/\d+$', '') + $master_swift_replication_ip = regsubst($master_swift_proxy_nodes_list[0]['network_roles']['swift/replication'], '\/\d+$', '') + $swift_partition = hiera('swift_partition', '/var/lib/glance/node') + + if ($deploy_swift_storage){ + if !defined(File['/var/lib/glance']) { + file {'/var/lib/glance': + ensure => 'directory', + group => 'swift', + require => Package['swift'], + } -> Service <| tag == 'swift-service' |> + } else { + File['/var/lib/glance'] { + ensure => 'directory', + group => 'swift', + require +> Package['swift'], + } + File['/var/lib/glance'] -> Service <| tag == 'swift-service' |> + } + + class { 'openstack::swift::storage_node': + storage_type => false, + loopback_size => '5243780', + storage_mnt_base_dir => $swift_partition, + storage_devices => filter_hash($mp_hash,'point'), + swift_zone => $master_swift_proxy_nodes_list[0]['swift_zone'], + swift_local_net_ip => $swift_storage_ipaddr, + master_swift_proxy_ip => $master_swift_proxy_ip, + master_swift_replication_ip => $master_swift_replication_ip, + sync_rings => ! $is_primary_swift_proxy, + debug => $debug, + verbose => $verbose, + log_facility => 'LOG_SYSLOG', + } + } + + if $is_primary_swift_proxy { + ring_devices {'all': + storages => $swift_nodes, + require => Class['swift'], + } + } + + if $deploy_swift_proxy { + $sto_nets = get_routable_networks_for_network_role($network_scheme, 'swift/replication', ' ') + $man_nets = get_routable_networks_for_network_role($network_scheme, 'swift/api', ' ') + + class { 'openstack::swift::proxy': + swift_user_password => $swift_hash['user_password'], + swift_operator_roles => $swift_operator_roles, + swift_proxies_cache => $memcaches_addr_list, + ring_part_power => $ring_part_power, + primary_proxy => $is_primary_swift_proxy, + swift_proxy_local_ipaddr => $swift_api_ipaddr, + swift_replication_local_ipaddr => $swift_storage_ipaddr, + master_swift_proxy_ip => $master_swift_proxy_ip, + master_swift_replication_ip => $master_swift_replication_ip, + proxy_port => $proxy_port, + proxy_workers => $service_workers, + debug => $debug, + verbose => $verbose, + log_facility => 'LOG_SYSLOG', + ceilometer => hiera('use_ceilometer',false), + ring_min_part_hours => $ring_min_part_hours, + admin_user => $keystone_user, + admin_tenant_name => $keystone_tenant, + admin_password => $keystone_password, + auth_host => $service_endpoint, + auth_protocol => $keystone_protocol, + } -> + class { 'openstack::swift::status': + endpoint => "http://${swift_api_ipaddr}:${proxy_port}", + vip => $management_vip, + only_from => "127.0.0.1 240.0.0.2 ${sto_nets} ${man_nets}", + con_timeout => 5 + } -> + class { 'swift::dispersion': + auth_url => "http://$service_endpoint:5000/v2.0/", + auth_user => $keystone_user, + auth_tenant => $keystone_tenant, + auth_pass => $keystone_password, + auth_version => '2.0', + } + + Service<| tag == 'swift-service' |> -> Class['swift::dispersion'] + + if defined(Class['openstack::swift::storage_node']) { + Class['openstack::swift::storage_node'] -> Class['swift::dispersion'] + } + } +} + +# 'ceilometer' class is being declared inside openstack::ceilometer class +# which is declared inside openstack::controller class in the other task. +# So we need a stub here for dependency from swift::proxy::ceilometer +class ceilometer {} +include ceilometer + +# Class[Swift::Proxy::Cache] requires Class[Memcached] if memcache_servers +# contains 127.0.0.1. But we're deploying memcached in another task. So we +# need to add this stub here. +class memcached {} +include memcached + diff --git a/f2s/resources/swift/meta.yaml b/f2s/resources/swift/meta.yaml new file mode 100644 index 00000000..bcbe1073 --- /dev/null +++ b/f2s/resources/swift/meta.yaml @@ -0,0 +1,60 @@ +id: swift +handler: puppetv2 +version: '8.0' +inputs: + debug: + value: null + deploy_swift_proxy: + value: null + deploy_swift_storage: + value: null + fqdn: + value: null + is_primary_swift_proxy: + value: null + management_vip: + value: null + mp: + value: null + network_metadata: + value: null + network_scheme: + value: null + nodes: + value: null + proxy_port: + value: null + public_vip: + value: null + puppet_modules: + value: null + region: + value: null + role: + value: null + service_endpoint: + value: null + storage: + value: null + storage_hash: + value: null + swift: + value: null + swift_hash: + value: null + swift_master_role: + value: null + swift_nodes: + value: null + swift_partition: + value: null + swift_proxies: + value: null + swift_proxy_caches: + value: null + swift_ring_min_part_hours: + value: null + use_ceilometer: + value: null + verbose: + value: null diff --git a/f2s/resources/tools/actions/run.pp b/f2s/resources/tools/actions/run.pp new file mode 100644 index 00000000..f2b1ef72 --- /dev/null +++ b/f2s/resources/tools/actions/run.pp @@ -0,0 +1,42 @@ +notice('MODULAR: tools.pp') + +class { 'osnailyfacter::atop': } +class { 'osnailyfacter::ssh': } + +if $::virtual != 'physical' { + class { 'osnailyfacter::acpid': } +} + +$tools = [ + 'screen', + 'tmux', + 'man', + 'htop', + 'tcpdump', + 'strace', + 'fuel-misc' +] + +package { $tools : + ensure => 'present', +} + +package { 'cloud-init': + ensure => 'absent', +} + +if $::osfamily == 'Debian' { + apt::conf { 'notranslations': + ensure => 'present', + content => 'Acquire::Languages "none";', + notify_update => false, + } +} + +$puppet = hiera('puppet') +class { 'osnailyfacter::puppet_pull': + modules_source => $puppet['modules'], + manifests_source => $puppet['manifests'], +} + +$deployment_mode = hiera('deployment_mode') diff --git a/f2s/resources/tools/meta.yaml b/f2s/resources/tools/meta.yaml new file mode 100644 index 00000000..480a3777 --- /dev/null +++ b/f2s/resources/tools/meta.yaml @@ -0,0 +1,14 @@ +id: tools +handler: puppetv2 +version: '8.0' +inputs: + deployment_mode: + value: null + fqdn: + value: null + puppet: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/top-role-ceph-osd/actions/run.pp b/f2s/resources/top-role-ceph-osd/actions/run.pp new file mode 100644 index 00000000..93bb6ee4 --- /dev/null +++ b/f2s/resources/top-role-ceph-osd/actions/run.pp @@ -0,0 +1,57 @@ +notice('MODULAR: ceph-osd.pp') + +# Pulling hiera +$storage_hash = hiera('storage', {}) +$public_vip = hiera('public_vip') +$management_vip = hiera('management_vip') +$use_neutron = hiera('use_neutron', false) +$mp_hash = hiera('mp') +$verbose = pick($storage_hash['verbose'], true) +$debug = pick($storage_hash['debug'], hiera('debug', true)) +$use_monit = false +$auto_assign_floating_ip = hiera('auto_assign_floating_ip', false) +$keystone_hash = hiera('keystone', {}) +$access_hash = hiera('access', {}) +$network_scheme = hiera_hash('network_scheme') +$neutron_mellanox = hiera('neutron_mellanox', false) +$syslog_hash = hiera('syslog', {}) +$use_syslog = hiera('use_syslog', true) +$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public') +$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node') +$primary_mons = keys($ceph_primary_monitor_node) +$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name'] +prepare_network_config($network_scheme) +$ceph_cluster_network = get_network_role_property('ceph/replication', 'network') +$ceph_public_network = get_network_role_property('ceph/public', 'network') + +class {'ceph': + primary_mon => $primary_mon, + mon_hosts => keys($mon_address_map), + mon_ip_addresses => values($mon_address_map), + cluster_node_address => $public_vip, + osd_pool_default_size => $storage_hash['osd_pool_size'], + osd_pool_default_pg_num => $storage_hash['pg_num'], + osd_pool_default_pgp_num => $storage_hash['pg_num'], + use_rgw => $storage_hash['objects_ceph'], + glance_backend => $glance_backend, + rgw_pub_ip => $public_vip, + rgw_adm_ip => $management_vip, + rgw_int_ip => $management_vip, + cluster_network => $ceph_cluster_network, + public_network => $ceph_public_network, + use_syslog => $use_syslog, + syslog_log_level => hiera('syslog_log_level_ceph', 'info'), + syslog_log_facility => hiera('syslog_log_facility_ceph','LOG_LOCAL0'), + rgw_keystone_admin_token => $keystone_hash['admin_token'], + ephemeral_ceph => $storage_hash['ephemeral_ceph'], +} + +$osd_devices = split($::osd_devices_list, ' ') +#Class Ceph is already defined so it will do it's thing. +notify {"ceph_osd: ${osd_devices}": } +notify {"osd_devices: ${::osd_devices_list}": } +# TODO(bogdando) add monit ceph-osd services monitoring, if required + +################################################################# + +# vim: set ts=2 sw=2 et : diff --git a/f2s/resources/top-role-ceph-osd/meta.yaml b/f2s/resources/top-role-ceph-osd/meta.yaml new file mode 100644 index 00000000..17500bcd --- /dev/null +++ b/f2s/resources/top-role-ceph-osd/meta.yaml @@ -0,0 +1,10 @@ +id: top-role-ceph-osd +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/top-role-cinder-vmware/actions/run.pp b/f2s/resources/top-role-cinder-vmware/actions/run.pp new file mode 100644 index 00000000..0f96eebe --- /dev/null +++ b/f2s/resources/top-role-cinder-vmware/actions/run.pp @@ -0,0 +1,11 @@ +notice('MODULAR: cinder-vmware.pp') + +$nodes_hash = hiera('nodes', {}) +$roles = node_roles($nodes_hash, hiera('uid')) +$cinder_hash = hiera_hash('cinder_hash', {}) + +if (member($roles, 'cinder-vmware')) { + $debug = pick($cinder_hash['debug'], hiera('debug', true)) + $volumes = get_cinder_vmware_data($cinder_hash['instances'], $debug) + create_resources(vmware::cinder::vmdk, $volumes) +} diff --git a/f2s/resources/top-role-cinder-vmware/meta.yaml b/f2s/resources/top-role-cinder-vmware/meta.yaml new file mode 100644 index 00000000..b990a06d --- /dev/null +++ b/f2s/resources/top-role-cinder-vmware/meta.yaml @@ -0,0 +1,10 @@ +id: top-role-cinder-vmware +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/top-role-cinder/actions/run.pp b/f2s/resources/top-role-cinder/actions/run.pp new file mode 100644 index 00000000..5cd24839 --- /dev/null +++ b/f2s/resources/top-role-cinder/actions/run.pp @@ -0,0 +1,308 @@ +notice('MODULAR: cinder.pp') + +# Pulling hiera +prepare_network_config(hiera('network_scheme', {})) +$cinder_hash = hiera_hash('cinder_hash', {}) +$storage_address = get_network_role_property('cinder/iscsi', 'ipaddr') +$public_vip = hiera('public_vip') +$management_vip = hiera('management_vip') +$primary_controller = hiera('primary_controller') +$use_neutron = hiera('use_neutron', false) +$mp_hash = hiera('mp') +$verbose = pick($cinder_hash['verbose'], true) +$debug = pick($cinder_hash['debug'], hiera('debug', true)) +$use_monit = false +$auto_assign_floating_ip = hiera('auto_assign_floating_ip', false) +$nodes_hash = hiera('nodes', {}) +$storage_hash = hiera_hash('storage_hash', {}) +$vcenter_hash = hiera('vcenter', {}) +$nova_hash = hiera_hash('nova_hash', {}) +$mysql_hash = hiera_hash('mysql_hash', {}) +$rabbit_hash = hiera_hash('rabbit_hash', {}) +$glance_hash = hiera_hash('glance_hash', {}) +$keystone_hash = hiera_hash('keystone_hash', {}) +$ceilometer_hash = hiera_hash('ceilometer_hash',{}) +$access_hash = hiera('access', {}) +$network_scheme = hiera_hash('network_scheme') +$neutron_mellanox = hiera('neutron_mellanox', false) +$syslog_hash = hiera('syslog', {}) +$base_syslog_hash = hiera('base_syslog', {}) +$use_stderr = hiera('use_stderr', false) +$use_syslog = hiera('use_syslog', true) +$syslog_log_facility_glance = hiera('syslog_log_facility_glance', 'LOG_LOCAL2') +$syslog_log_facility_cinder = hiera('syslog_log_facility_cinder', 'LOG_LOCAL3') +$syslog_log_facility_neutron = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4') +$syslog_log_facility_nova = hiera('syslog_log_facility_nova','LOG_LOCAL6') +$syslog_log_facility_keystone = hiera('syslog_log_facility_keystone', 'LOG_LOCAL7') +$syslog_log_facility_murano = hiera('syslog_log_facility_murano', 'LOG_LOCAL0') +$syslog_log_facility_sahara = hiera('syslog_log_facility_sahara','LOG_LOCAL0') +$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') + +$cinder_db_password = $cinder_hash[db_password] +$keystone_user = pick($cinder_hash['user'], 'cinder') +$keystone_tenant = pick($cinder_hash['tenant'], 'services') +$db_host = pick($cinder_hash['db_host'], hiera('database_vip')) +$cinder_db_user = pick($cinder_hash['db_user'], 'cinder') +$cinder_db_name = pick($cinder_hash['db_name'], 'cinder') + +$service_endpoint = hiera('service_endpoint') +$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") + +$keystone_auth_protocol = 'http' +$keystone_auth_host = $service_endpoint +$service_port = '5000' +$auth_uri = "${keystone_auth_protocol}://${keystone_auth_host}:${service_port}/" + +# TODO: openstack_version is confusing, there's such string var in hiera and hardcoded hash +$hiera_openstack_version = hiera('openstack_version') +$openstack_version = { + 'keystone' => 'installed', + 'glance' => 'installed', + 'horizon' => 'installed', + 'nova' => 'installed', + 'novncproxy' => 'installed', + 'cinder' => 'installed', +} + +$queue_provider = hiera('queue_provider', 'rabbitmq') +$custom_mysql_setup_class='galera' + +# Do the stuff +if $neutron_mellanox { + $mellanox_mode = $neutron_mellanox['plugin'] +} else { + $mellanox_mode = 'disabled' +} + +if (!empty(filter_nodes(hiera('nodes'), 'role', 'ceph-osd')) or + $storage_hash['volumes_ceph'] or + $storage_hash['images_ceph'] or + $storage_hash['objects_ceph'] +) { + $use_ceph = true +} else { + $use_ceph = false +} + +if $use_neutron { + $neutron_config = hiera('quantum_settings') +} else { + $neutron_config = {} +} + +if $primary_controller { + if ($mellanox_mode == 'ethernet') { + $test_vm_pkg = 'cirros-testvm-mellanox' + } else { + $test_vm_pkg = 'cirros-testvm' + } + package { 'cirros-testvm' : + ensure => 'installed', + name => $test_vm_pkg, + } +} + +if !$rabbit_hash['user'] { + $rabbit_hash['user'] = 'nova' +} + +if ! $use_neutron { + $floating_ips_range = hiera('floating_network_range') +} +$floating_hash = {} + +##CALCULATED PARAMETERS + + +##NO NEED TO CHANGE + +$node = filter_nodes($nodes_hash, 'name', $::hostname) +if empty($node) { + fail("Node $::hostname is not defined in the hash structure") +} + +$roles = node_roles($nodes_hash, hiera('uid')) +$mountpoints = filter_hash($mp_hash,'point') + +# SQLAlchemy backend configuration +$max_pool_size = min($::processorcount * 5 + 0, 30 + 0) +$max_overflow = min($::processorcount * 5 + 0, 60 + 0) +$max_retries = '-1' +$idle_timeout = '3600' + +# Determine who should get the volume service + +if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) { + $manage_volumes = 'iscsi' +} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) { + $manage_volumes = 'vmdk' +} elsif ($storage_hash['volumes_ceph']) { + $manage_volumes = 'ceph' +} else { + $manage_volumes = false +} + +#Determine who should be the default backend + +if ($storage_hash['images_ceph']) { + $glance_backend = 'ceph' + $glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ] +} elsif ($storage_hash['images_vcenter']) { + $glance_backend = 'vmware' + $glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ] +} else { + $glance_backend = 'swift' + $glance_known_stores = [ 'glance.store.swift.Store', 'glance.store.http.Store' ] +} + +# NOTE(bogdando) for controller nodes running Corosync with Pacemaker +# we delegate all of the monitor functions to RA instead of monit. +if member($roles, 'controller') or member($roles, 'primary-controller') { + $use_monit_real = false +} else { + $use_monit_real = $use_monit +} + +if $use_monit_real { + # Configure service names for monit watchdogs and 'service' system path + # FIXME(bogdando) replace service_path to systemd, once supported + include nova::params + include cinder::params + include neutron::params + $nova_compute_name = $::nova::params::compute_service_name + $nova_api_name = $::nova::params::api_service_name + $nova_network_name = $::nova::params::network_service_name + $cinder_volume_name = $::cinder::params::volume_service + $ovs_vswitchd_name = $::l23network::params::ovs_service_name + case $::osfamily { + 'RedHat' : { + $service_path = '/sbin/service' + } + 'Debian' : { + $service_path = '/usr/sbin/service' + } + default : { + fail("Unsupported osfamily: ${osfamily} for os ${operatingsystem}") + } + } +} + +#HARDCODED PARAMETERS + +$multi_host = true +$mirror_type = 'external' +Exec { logoutput => true } + + +################################################################# +# we need to evaluate ceph here, because ceph notifies/requires +# other services that are declared in openstack manifests +if ($use_ceph and !$storage_hash['volumes_lvm']) { + $primary_mons = $controllers + $primary_mon = $controllers[0]['name'] + + if ($use_neutron) { + prepare_network_config(hiera_hash('network_scheme')) + $ceph_cluster_network = get_network_role_property('ceph/replication', 'network') + $ceph_public_network = get_network_role_property('ceph/public', 'network') + } else { + $ceph_cluster_network = hiera('storage_network_range') + $ceph_public_network = hiera('management_network_range') + } + + class {'ceph': + primary_mon => $primary_mon, + mon_hosts => nodes_with_roles($nodes_hash, ['primary-controller', 'controller', 'ceph-mon'], 'name'), + mon_ip_addresses => nodes_with_roles($nodes_hash, ['primary-controller', 'controller', 'ceph-mon'], 'internal_address'), + cluster_node_address => $public_vip, + osd_pool_default_size => $storage_hash['osd_pool_size'], + osd_pool_default_pg_num => $storage_hash['pg_num'], + osd_pool_default_pgp_num => $storage_hash['pg_num'], + use_rgw => $storage_hash['objects_ceph'], + glance_backend => $glance_backend, + rgw_pub_ip => $public_vip, + rgw_adm_ip => $management_vip, + rgw_int_ip => $management_vip, + cluster_network => $ceph_cluster_network, + public_network => $ceph_public_network, + use_syslog => $use_syslog, + syslog_log_facility => $syslog_log_facility_ceph, + rgw_keystone_admin_token => $keystone_hash['admin_token'], + ephemeral_ceph => $storage_hash['ephemeral_ceph'] + } +} + +################################################################# + +include keystone::python +#FIXME(bogdando) notify services on python-amqp update, if needed +package { 'python-amqp': + ensure => present +} +if member($roles, 'controller') or member($roles, 'primary-controller') { + $bind_host = get_network_role_property('cinder/api', 'ipaddr') +} else { + $bind_host = false + # Configure auth_strategy on cinder node, if cinder and controller are + # on the same node this parameter is configured by ::cinder::api + cinder_config { + 'DEFAULT/auth_strategy': value => 'keystone'; + } +} + +# NOTE(bogdando) deploy cinder volume node with disabled cinder-volume +# service #LP1398817. The orchestration will start and enable it back +# after the deployment is done. +class { 'openstack::cinder': + enable_volumes => false, + sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_name}?charset=utf8&read_timeout=60", + glance_api_servers => $glance_api_servers, + bind_host => $bind_host, + queue_provider => $queue_provider, + amqp_hosts => hiera('amqp_hosts',''), + amqp_user => $rabbit_hash['user'], + amqp_password => $rabbit_hash['password'], + rabbit_ha_queues => hiera('rabbit_ha_queues', false), + volume_group => 'cinder', + manage_volumes => $manage_volumes, + iser => $storage_hash['iser'], + enabled => true, + auth_host => $service_endpoint, + iscsi_bind_host => $storage_address, + keystone_user => $keystone_user, + keystone_tenant => $keystone_tenant, + cinder_user_password => $cinder_hash[user_password], + syslog_log_facility => $syslog_log_facility_cinder, + debug => $debug, + verbose => $verbose, + use_stderr => $use_stderr, + use_syslog => $use_syslog, + max_retries => $max_retries, + max_pool_size => $max_pool_size, + max_overflow => $max_overflow, + idle_timeout => $idle_timeout, + ceilometer => $ceilometer_hash[enabled], + vmware_host_ip => $vcenter_hash['host_ip'], + vmware_host_username => $vcenter_hash['vc_user'], + vmware_host_password => $vcenter_hash['vc_password'], + auth_uri => $auth_uri, + identity_uri => $auth_uri, +} + +cinder_config { 'keymgr/fixed_key': + value => $cinder_hash[fixed_key]; +} + +# FIXME(bogdando) replace service_path and action to systemd, once supported +if $use_monit_real { + monit::process { $cinder_volume_name : + ensure => running, + matching => '/usr/bin/python /usr/bin/cinder-volume', + start_command => "${service_path} ${cinder_volume_name} restart", + stop_command => "${service_path} ${cinder_volume_name} stop", + pidfile => false, + } +} +################################################################# + +# vim: set ts=2 sw=2 et : diff --git a/f2s/resources/top-role-cinder/meta.yaml b/f2s/resources/top-role-cinder/meta.yaml new file mode 100644 index 00000000..1e8d09c7 --- /dev/null +++ b/f2s/resources/top-role-cinder/meta.yaml @@ -0,0 +1,10 @@ +id: top-role-cinder +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/top-role-compute-vmware/actions/run.pp b/f2s/resources/top-role-compute-vmware/actions/run.pp new file mode 100644 index 00000000..16bd8e02 --- /dev/null +++ b/f2s/resources/top-role-compute-vmware/actions/run.pp @@ -0,0 +1,18 @@ +notice('MODULAR: vmware/compute-vmware.pp') + +$role = hiera('role') + +$debug = hiera('debug', true) +$ceilometer_hash = hiera('ceilometer',{}) + +$vcenter_hash = hiera('vcenter', {}) +$computes_hash = parse_vcenter_settings($vcenter_hash['computes']) + +$uid = hiera('uid') +$node_name = "node-$uid" +$defaults = { + current_node => $node_name, + vlan_interface => $vcenter_hash['esxi_vlan_interface'] + } + +create_resources(vmware::compute_vmware, $computes_hash, $defaults) diff --git a/f2s/resources/top-role-compute-vmware/meta.yaml b/f2s/resources/top-role-compute-vmware/meta.yaml new file mode 100644 index 00000000..14ecd29c --- /dev/null +++ b/f2s/resources/top-role-compute-vmware/meta.yaml @@ -0,0 +1,10 @@ +id: top-role-compute-vmware +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/top-role-compute/actions/run.pp b/f2s/resources/top-role-compute/actions/run.pp new file mode 100644 index 00000000..f2539a74 --- /dev/null +++ b/f2s/resources/top-role-compute/actions/run.pp @@ -0,0 +1,339 @@ +notice('MODULAR: compute.pp') + +$network_scheme = hiera_hash('network_scheme', {}) +$network_metadata = hiera_hash('network_metadata', {}) +prepare_network_config($network_scheme) + +# Pulling hiera +$compute_hash = hiera_hash('compute', {}) +$node_name = hiera('node_name') +$public_int = hiera('public_int', undef) +$public_vip = hiera('public_vip') +$management_vip = hiera('management_vip') +$database_vip = hiera('database_vip') +$service_endpoint = hiera('service_endpoint') +$primary_controller = hiera('primary_controller') +$use_neutron = hiera('use_neutron', false) +$sahara_hash = hiera('sahara', {}) +$murano_hash = hiera('murano', {}) +$mp_hash = hiera('mp') +$verbose = pick($compute_hash['verbose'], true) +$debug = pick($compute_hash['debug'], hiera('debug', true)) +$use_monit = false +$auto_assign_floating_ip = hiera('auto_assign_floating_ip', false) +$nodes_hash = hiera('nodes', {}) +$storage_hash = hiera_hash('storage_hash', {}) +$vcenter_hash = hiera('vcenter', {}) +$nova_hash = hiera_hash('nova_hash', {}) +$nova_custom_hash = hiera_hash('nova_custom_hash', {}) +$rabbit_hash = hiera_hash('rabbit_hash', {}) +$glance_hash = hiera_hash('glance_hash', {}) +$keystone_hash = hiera_hash('keystone_hash', {}) +$swift_hash = hiera_hash('swift_hash', {}) +$cinder_hash = hiera_hash('cinder_hash', {}) +$ceilometer_hash = hiera_hash('ceilometer_hash',{}) +$access_hash = hiera('access', {}) +$swift_proxies = hiera('swift_proxies') +$swift_master_role = hiera('swift_master_role', 'primary-controller') +$neutron_mellanox = hiera('neutron_mellanox', false) +$syslog_hash = hiera('syslog', {}) +$base_syslog_hash = hiera('base_syslog', {}) +$use_syslog = hiera('use_syslog', true) +$use_stderr = hiera('use_stderr', false) +$syslog_log_facility_glance = hiera('syslog_log_facility_glance', 'LOG_LOCAL2') +$syslog_log_facility_cinder = hiera('syslog_log_facility_cinder', 'LOG_LOCAL3') +$syslog_log_facility_neutron = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4') +$syslog_log_facility_nova = hiera('syslog_log_facility_nova','LOG_LOCAL6') +$syslog_log_facility_keystone = hiera('syslog_log_facility_keystone', 'LOG_LOCAL7') +$syslog_log_facility_murano = hiera('syslog_log_facility_murano', 'LOG_LOCAL0') +$syslog_log_facility_sahara = hiera('syslog_log_facility_sahara','LOG_LOCAL0') +$nova_rate_limits = hiera('nova_rate_limits') +$nova_report_interval = hiera('nova_report_interval') +$nova_service_down_time = hiera('nova_service_down_time') +$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") +$config_drive_format = 'vfat' + +$public_ssl_hash = hiera('public_ssl') +$vncproxy_host = $public_ssl_hash['services'] ? { + true => $public_ssl_hash['hostname'], + default => $public_vip, +} + +$db_host = pick($nova_hash['db_host'], $database_vip) + +$block_device_allocate_retries = hiera('block_device_allocate_retries', 300) +$block_device_allocate_retries_interval = hiera('block_device_allocate_retries_interval', 3) + +# TODO: openstack_version is confusing, there's such string var in hiera and hardcoded hash +$hiera_openstack_version = hiera('openstack_version') +$openstack_version = { + 'keystone' => 'installed', + 'glance' => 'installed', + 'horizon' => 'installed', + 'nova' => 'installed', + 'novncproxy' => 'installed', + 'cinder' => 'installed', +} + +$queue_provider = hiera('queue_provider', 'rabbitmq') + +# Do the stuff +if $neutron_mellanox { + $mellanox_mode = $neutron_mellanox['plugin'] +} else { + $mellanox_mode = 'disabled' +} + +if $use_neutron { + $novanetwork_params = {} + $network_provider = 'neutron' + $neutron_config = hiera_hash('quantum_settings') + $neutron_db_password = $neutron_config['database']['passwd'] + $neutron_user_password = $neutron_config['keystone']['admin_password'] + $neutron_metadata_proxy_secret = $neutron_config['metadata']['metadata_proxy_shared_secret'] + $base_mac = $neutron_config['L2']['base_mac'] +} else { + $network_provider = 'nova' + $floating_ips_range = hiera('floating_network_range') + $neutron_config = {} + $novanetwork_params = hiera('novanetwork_parameters') +} + +if $primary_controller { + if ($mellanox_mode == 'ethernet') { + $test_vm_pkg = 'cirros-testvm-mellanox' + } else { + $test_vm_pkg = 'cirros-testvm' + } + package { 'cirros-testvm' : + ensure => 'installed', + name => $test_vm_pkg, + } +} + +if !$rabbit_hash['user'] { + $rabbit_hash['user'] = 'nova' +} + +$floating_hash = {} + +##CALCULATED PARAMETERS + +##TODO: simply parse nodes array +$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles')) +$memcache_ipaddrs = ipsort(values(get_node_to_ipaddr_map_by_network_role($memcache_nodes,'mgmt/memcache'))) +$roles = $network_metadata['nodes'][$node_name]['node_roles'] +$mountpoints = filter_hash($mp_hash,'point') + +# SQLAlchemy backend configuration +$max_pool_size = min($::processorcount * 5 + 0, 30 + 0) +$max_overflow = min($::processorcount * 5 + 0, 60 + 0) +$max_retries = '-1' +$idle_timeout = '3600' + +if ($storage_hash['volumes_lvm']) { + nova_config { 'keymgr/fixed_key': + value => $cinder_hash[fixed_key]; + } +} + +# Determine who should get the volume service + +if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) { + $manage_volumes = 'iscsi' +} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) { + $manage_volumes = 'vmdk' +} elsif ($storage_hash['volumes_ceph']) { + $manage_volumes = 'ceph' +} else { + $manage_volumes = false +} + +#Determine who should be the default backend + +if ($storage_hash['images_ceph']) { + $glance_backend = 'ceph' + $glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ] +} elsif ($storage_hash['images_vcenter']) { + $glance_backend = 'vmware' + $glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ] +} else { + $glance_backend = 'swift' + $glance_known_stores = [ 'glance.store.swift.Store', 'glance.store.http.Store' ] +} + +# Use Swift if it isn't replaced by vCenter, Ceph for BOTH images and objects +if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { + $use_swift = true +} else { + $use_swift = false +} + +# NOTE(bogdando) for controller nodes running Corosync with Pacemaker +# we delegate all of the monitor functions to RA instead of monit. +if member($roles, 'controller') or member($roles, 'primary-controller') { + $use_monit_real = false +} else { + $use_monit_real = $use_monit +} + +if $use_monit_real { + # Configure service names for monit watchdogs and 'service' system path + # FIXME(bogdando) replace service_path to systemd, once supported + include nova::params + include cinder::params + include neutron::params + $nova_compute_name = $::nova::params::compute_service_name + $nova_api_name = $::nova::params::api_service_name + $nova_network_name = $::nova::params::network_service_name + $cinder_volume_name = $::cinder::params::volume_service + $ovs_vswitchd_name = $::l23network::params::ovs_service_name + case $::osfamily { + 'RedHat' : { + $service_path = '/sbin/service' + } + 'Debian' : { + $service_path = '/usr/sbin/service' + } + default : { + fail("Unsupported osfamily: ${osfamily} for os ${operatingsystem}") + } + } +} + +#HARDCODED PARAMETERS +if hiera('use_vcenter', false) { + $multi_host = false +} else { + $multi_host = true +} + +$mirror_type = 'external' +Exec { logoutput => true } + +include osnailyfacter::test_compute + +if ($::mellanox_mode == 'ethernet') { + $neutron_private_net = pick($neutron_config['default_private_net'], 'net04') + $physnet = $neutron_config['predefined_networks'][$neutron_private_net]['L2']['physnet'] + class { 'mellanox_openstack::compute': + physnet => $physnet, + physifc => $neutron_mellanox['physical_port'], + } +} + +# NOTE(bogdando) deploy compute node with disabled nova-compute +# service #LP1398817. The orchestration will start and enable it back +# after the deployment is done. +# FIXME(bogdando) This should be changed once the host aggregates implemented, bp disable-new-computes +class { 'openstack::compute': + enabled => false, + public_interface => $public_int ? { undef=>'', default=>$public_int}, + private_interface => $use_neutron ? { true=>false, default=>hiera('private_int', undef)}, + internal_address => get_network_role_property('nova/api', 'ipaddr'), + libvirt_type => hiera('libvirt_type', undef), + fixed_range => $use_neutron ? { true=>false, default=>hiera('fixed_network_range', undef)}, + network_manager => hiera('network_manager', undef), + network_config => hiera('network_config', {}), + multi_host => $multi_host, + queue_provider => $queue_provider, + amqp_hosts => hiera('amqp_hosts',''), + amqp_user => $rabbit_hash['user'], + amqp_password => $rabbit_hash['password'], + rabbit_ha_queues => $rabbit_ha_queues, + auto_assign_floating_ip => $auto_assign_floating_ip, + glance_api_servers => $glance_api_servers, + vncproxy_host => $vncproxy_host, + vncserver_listen => '0.0.0.0', + migration_support => true, + debug => $debug, + verbose => $verbose, + use_stderr => $use_stderr, + cinder_volume_group => 'cinder', + vnc_enabled => true, + manage_volumes => $manage_volumes, + nova_user_password => $nova_hash[user_password], + nova_hash => $nova_hash, + cache_server_ip => $memcache_ipaddrs, + service_endpoint => $service_endpoint, + cinder => true, + cinder_iscsi_bind_addr => get_network_role_property('cinder/iscsi', 'ipaddr'), + cinder_user_password => $cinder_hash[user_password], + cinder_db_password => $cinder_hash[db_password], + ceilometer => $ceilometer_hash[enabled], + ceilometer_metering_secret => $ceilometer_hash[metering_secret], + ceilometer_user_password => $ceilometer_hash[user_password], + db_host => $db_host, + network_provider => $network_provider, + neutron_user_password => $use_neutron ? { true=>$neutron_config['keystone']['admin_password'], default=>undef}, + base_mac => $base_mac, + + use_syslog => $use_syslog, + syslog_log_facility => $syslog_log_facility_nova, + syslog_log_facility_neutron => $syslog_log_facility_neutron, + nova_rate_limits => $nova_rate_limits, + nova_report_interval => $nova_report_interval, + nova_service_down_time => $nova_service_down_time, + state_path => $nova_hash[state_path], + neutron_settings => $neutron_config, + storage_hash => $storage_hash, + config_drive_format => $config_drive_format, +} + +# Required for fping API extension, see LP#1486404 +ensure_packages('fping') + +$nova_config_hash = { + 'DEFAULT/resume_guests_state_on_host_boot' => { value => hiera('resume_guests_state_on_host_boot', 'False') }, + 'DEFAULT/use_cow_images' => { value => hiera('use_cow_images', 'True') }, + 'DEFAULT/block_device_allocate_retries' => { value => $block_device_allocate_retries }, + 'DEFAULT/block_device_allocate_retries_interval' => { value => $block_device_allocate_retries_interval }, + 'libvirt/libvirt_inject_key' => { value => 'true' }, + 'libvirt/libvirt_inject_password' => { value => 'true' }, +} + +$nova_complete_hash = merge($nova_config_hash, $nova_custom_hash) + +class {'nova::config': + nova_config => $nova_complete_hash, +} + +# Configure monit watchdogs +# FIXME(bogdando) replace service_path and action to systemd, once supported +if $use_monit_real { + monit::process { $nova_compute_name : + ensure => running, + matching => '/usr/bin/python /usr/bin/nova-compute', + start_command => "${service_path} ${nova_compute_name} restart", + stop_command => "${service_path} ${nova_compute_name} stop", + pidfile => false, + } + if $use_neutron { + monit::process { $ovs_vswitchd_name : + ensure => running, + start_command => "${service_path} ${ovs_vswitchd_name} restart", + stop_command => "${service_path} ${ovs_vswitchd_name} stop", + pidfile => '/var/run/openvswitch/ovs-vswitchd.pid', + } + } else { + monit::process { $nova_network_name : + ensure => running, + matching => '/usr/bin/python /usr/bin/nova-network', + start_command => "${service_path} ${nova_network_name} restart", + stop_command => "${service_path} ${nova_network_name} stop", + pidfile => false, + } + monit::process { $nova_api_name : + ensure => running, + matching => '/usr/bin/python /usr/bin/nova-api', + start_command => "${service_path} ${nova_api_name} restart", + stop_command => "${service_path} ${nova_api_name} stop", + pidfile => false, + } + } +} + +######################################################################## + + +# vim: set ts=2 sw=2 et : diff --git a/f2s/resources/top-role-compute/meta.yaml b/f2s/resources/top-role-compute/meta.yaml new file mode 100644 index 00000000..37de35d3 --- /dev/null +++ b/f2s/resources/top-role-compute/meta.yaml @@ -0,0 +1,10 @@ +id: top-role-compute +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/top-role-mongo/actions/run.pp b/f2s/resources/top-role-mongo/actions/run.pp new file mode 100644 index 00000000..9007e549 --- /dev/null +++ b/f2s/resources/top-role-mongo/actions/run.pp @@ -0,0 +1,32 @@ +notice('MODULAR: mongo.pp') + +prepare_network_config(hiera('network_scheme', {})) +$mongo_hash = hiera_hash('mongo', {}) +$mongo_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('mongo_roles')) +$mongo_address_map = get_node_to_ipaddr_map_by_network_role($mongo_nodes, 'mongo/db') +$bind_address = get_network_role_property('mongo/db', 'ipaddr') +$use_syslog = hiera('use_syslog', true) +$debug = pick($mongo_hash['debug'], hiera('debug', false)) +$ceilometer_hash = hiera_hash('ceilometer_hash') +$roles = hiera('roles') +$replset_name = 'ceilometer' +$mongodb_port = hiera('mongodb_port', '27017') + +#################################################################### +class { 'openstack::mongo': + mongodb_bind_address => [ '127.0.0.1', $bind_address ], + mongodb_port => $mongodb_port, + ceilometer_metering_secret => $ceilometer_hash['metering_secret'], + ceilometer_db_password => $ceilometer_hash['db_password'], + ceilometer_replset_members => values($mongo_address_map), + replset_name => $replset_name, + mongo_version => '2.6.10', + use_syslog => $use_syslog, + debug => $debug, +} + +if !(member($roles, 'controller') or member($roles, 'primary-controller')) { + sysctl::value { 'net.ipv4.tcp_keepalive_time': + value => '300', + } +} diff --git a/f2s/resources/top-role-mongo/meta.yaml b/f2s/resources/top-role-mongo/meta.yaml new file mode 100644 index 00000000..3503a37f --- /dev/null +++ b/f2s/resources/top-role-mongo/meta.yaml @@ -0,0 +1,10 @@ +id: top-role-mongo +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/top-role-primary-mongo/actions/run.pp b/f2s/resources/top-role-primary-mongo/actions/run.pp new file mode 100644 index 00000000..9007e549 --- /dev/null +++ b/f2s/resources/top-role-primary-mongo/actions/run.pp @@ -0,0 +1,32 @@ +notice('MODULAR: mongo.pp') + +prepare_network_config(hiera('network_scheme', {})) +$mongo_hash = hiera_hash('mongo', {}) +$mongo_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('mongo_roles')) +$mongo_address_map = get_node_to_ipaddr_map_by_network_role($mongo_nodes, 'mongo/db') +$bind_address = get_network_role_property('mongo/db', 'ipaddr') +$use_syslog = hiera('use_syslog', true) +$debug = pick($mongo_hash['debug'], hiera('debug', false)) +$ceilometer_hash = hiera_hash('ceilometer_hash') +$roles = hiera('roles') +$replset_name = 'ceilometer' +$mongodb_port = hiera('mongodb_port', '27017') + +#################################################################### +class { 'openstack::mongo': + mongodb_bind_address => [ '127.0.0.1', $bind_address ], + mongodb_port => $mongodb_port, + ceilometer_metering_secret => $ceilometer_hash['metering_secret'], + ceilometer_db_password => $ceilometer_hash['db_password'], + ceilometer_replset_members => values($mongo_address_map), + replset_name => $replset_name, + mongo_version => '2.6.10', + use_syslog => $use_syslog, + debug => $debug, +} + +if !(member($roles, 'controller') or member($roles, 'primary-controller')) { + sysctl::value { 'net.ipv4.tcp_keepalive_time': + value => '300', + } +} diff --git a/f2s/resources/top-role-primary-mongo/meta.yaml b/f2s/resources/top-role-primary-mongo/meta.yaml new file mode 100644 index 00000000..949bd9cf --- /dev/null +++ b/f2s/resources/top-role-primary-mongo/meta.yaml @@ -0,0 +1,10 @@ +id: top-role-primary-mongo +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/umm/actions/run.pp b/f2s/resources/umm/actions/run.pp new file mode 100644 index 00000000..5b6b58a7 --- /dev/null +++ b/f2s/resources/umm/actions/run.pp @@ -0,0 +1,3 @@ +notice('MODULAR: umm.pp') + +class {'umm': } diff --git a/f2s/resources/umm/meta.yaml b/f2s/resources/umm/meta.yaml new file mode 100644 index 00000000..5ac77d1b --- /dev/null +++ b/f2s/resources/umm/meta.yaml @@ -0,0 +1,10 @@ +id: umm +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/update_hosts/actions/run.pp b/f2s/resources/update_hosts/actions/run.pp new file mode 100644 index 00000000..e82bddff --- /dev/null +++ b/f2s/resources/update_hosts/actions/run.pp @@ -0,0 +1,5 @@ +notice('MODULAR: hosts.pp') + +class { "l23network::hosts_file": + nodes => hiera('nodes'), +} diff --git a/f2s/resources/update_hosts/meta.yaml b/f2s/resources/update_hosts/meta.yaml new file mode 100644 index 00000000..1bd6c7d3 --- /dev/null +++ b/f2s/resources/update_hosts/meta.yaml @@ -0,0 +1,10 @@ +id: update_hosts +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + nodes: + value: null + puppet_modules: + value: null diff --git a/f2s/resources/updatedb/actions/run.pp b/f2s/resources/updatedb/actions/run.pp new file mode 100644 index 00000000..ec929499 --- /dev/null +++ b/f2s/resources/updatedb/actions/run.pp @@ -0,0 +1,21 @@ +notice('MODULAR: ceph/updatedb.pp') + +$storage_hash = hiera('storage', {}) + +if ($storage_hash['volumes_ceph'] or + $storage_hash['images_ceph'] or + $storage_hash['objects_ceph'] +) { + $use_ceph = true +} else { + $use_ceph = false +} + +if $use_ceph { + + exec {"Ensure /var/lib/ceph in the updatedb PRUNEPATH": + path => [ '/usr/bin', '/bin' ], + command => "sed -i -Ee 's|(PRUNEPATHS *= *\"[^\"]*)|\\1 /var/lib/ceph|' /etc/updatedb.conf", + unless => "test ! -f /etc/updatedb.conf || grep 'PRUNEPATHS *= *.*/var/lib/ceph.*' /etc/updatedb.conf", + } +} diff --git a/f2s/resources/updatedb/meta.yaml b/f2s/resources/updatedb/meta.yaml new file mode 100644 index 00000000..f05727d0 --- /dev/null +++ b/f2s/resources/updatedb/meta.yaml @@ -0,0 +1,12 @@ +id: updatedb +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null + storage: + value: null diff --git a/f2s/resources/virtual_ips/actions/run.pp b/f2s/resources/virtual_ips/actions/run.pp new file mode 100644 index 00000000..b3f0fd1f --- /dev/null +++ b/f2s/resources/virtual_ips/actions/run.pp @@ -0,0 +1,3 @@ +notice('MODULAR: virtual_ips.pp') + +generate_vips() diff --git a/f2s/resources/virtual_ips/meta.yaml b/f2s/resources/virtual_ips/meta.yaml new file mode 100644 index 00000000..80e6f487 --- /dev/null +++ b/f2s/resources/virtual_ips/meta.yaml @@ -0,0 +1,14 @@ +id: virtual_ips +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + network_metadata: + value: null + network_scheme: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/vmware-vcenter/actions/run.pp b/f2s/resources/vmware-vcenter/actions/run.pp new file mode 100644 index 00000000..aabbf3d8 --- /dev/null +++ b/f2s/resources/vmware-vcenter/actions/run.pp @@ -0,0 +1,19 @@ +notice('MODULAR: vmware/vcenter.pp') + +$use_vcenter = hiera('use_vcenter', false) +$vcenter_hash = hiera('vcenter_hash') +$public_vip = hiera('public_vip') +$use_neutron = hiera('use_neutron', false) +$ceilometer_hash = hiera('ceilometer',{}) +$debug = pick($vcenter_hash['debug'], hiera('debug', false)) + +if $use_vcenter { + class { 'vmware': + vcenter_settings => $vcenter_hash['computes'], + vlan_interface => $vcenter_hash['esxi_vlan_interface'], + use_quantum => $use_neutron, + vnc_address => $public_vip, + ceilometer => $ceilometer_hash['enabled'], + debug => $debug, + } +} diff --git a/f2s/resources/vmware-vcenter/meta.yaml b/f2s/resources/vmware-vcenter/meta.yaml new file mode 100644 index 00000000..ccead16c --- /dev/null +++ b/f2s/resources/vmware-vcenter/meta.yaml @@ -0,0 +1,24 @@ +id: vmware-vcenter +handler: puppetv2 +version: '8.0' +inputs: + ceilometer: + value: null + debug: + value: null + fqdn: + value: null + novanetwork_parameters: + value: null + public_vip: + value: null + puppet_modules: + value: null + role: + value: null + use_neutron: + value: null + use_vcenter: + value: null + vcenter_hash: + value: null diff --git a/f2s/resources/workloads_collector_add/actions/run.pp b/f2s/resources/workloads_collector_add/actions/run.pp new file mode 100644 index 00000000..c9a8dbc6 --- /dev/null +++ b/f2s/resources/workloads_collector_add/actions/run.pp @@ -0,0 +1,21 @@ +notice('MODULAR: keystone/workloads_collector_add.pp') + +$workloads_hash = hiera('workloads_collector', {}) +$service_endpoint = hiera('service_endpoint') + +$haproxy_stats_url = "http://${service_endpoint}:10000/;csv" + +haproxy_backend_status { 'keystone-admin' : + name => 'keystone-2', + count => '200', + step => '6', + url => $haproxy_stats_url, +} -> + +class { 'openstack::workloads_collector': + enabled => $workloads_hash['enabled'], + workloads_username => $workloads_hash['username'], + workloads_password => $workloads_hash['password'], + workloads_tenant => $workloads_hash['tenant'], + workloads_create_user => true, +} diff --git a/f2s/resources/workloads_collector_add/meta.yaml b/f2s/resources/workloads_collector_add/meta.yaml new file mode 100644 index 00000000..19d04b77 --- /dev/null +++ b/f2s/resources/workloads_collector_add/meta.yaml @@ -0,0 +1,14 @@ +id: workloads_collector_add +handler: puppetv2 +version: '8.0' +inputs: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null + service_endpoint: + value: null + workloads_collector: + value: null diff --git a/requirements.txt b/requirements.txt index b4a24369..2ef88140 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,9 +3,9 @@ ply click==4.0 jinja2==2.7.3 networkx>=1.10 -PyYAML>=3.1.0 +PyYAML jsonschema==2.4.0 -requests==2.7.0 +requests dictdiffer==0.4.0 enum34==1.0.4 redis==2.10.3 From e2d61fd55cf9e92625ae55a2b342cdd13b3c8a88 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 15:45:12 +0200 Subject: [PATCH 25/51] Use nailgun manager and solar-celery with f2s tag --- docker-compose.yml | 2 +- f2s/resources/role_data/meta.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 0114e194..23135752 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,5 +1,5 @@ solar: - image: solarproject/solar-celery + image: solarproject/solar-celery:f2s # path inside of the container should be exactly the same as outside # because solar uses absolute path to find resoruce actions files volumes: diff --git a/f2s/resources/role_data/meta.yaml b/f2s/resources/role_data/meta.yaml index 992dfa7b..b2f94dff 100644 --- a/f2s/resources/role_data/meta.yaml +++ b/f2s/resources/role_data/meta.yaml @@ -3,7 +3,7 @@ id: role_data handler: none version: 0.0.1 managers: - - managers/from_file.py + - managers/from_nailgun.py - managers/globals.py input: # should be auto-generated based on outputs from globals.pp From fa03f073802bbf0cfaaef4d0477adc4480f40c9f Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 19:01:38 +0200 Subject: [PATCH 26/51] Add proper client for nailgun in fsclient.py --- docker-compose.yml | 6 ++++++ f2s/fsclient.py | 10 ++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 23135752..61f5b467 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -18,6 +18,12 @@ solar: links: - riak - redis + +# docker run --name solar -d -v /root/solar/solar:/solar -v /root/solar/solard:/solard -v /root/solar/templates:/vagrant/templates \ +# -v /root/solar/resources:/vagrant/resources -v /root/solar/f2s:/vagrant/f2s \ +# -v /var/lib/fuel:/var/lib/fuel -v /root/.config/fuel/fuel_client.yaml:/root/.config/fuel/fuel_client.yaml \ +# --link=riak:riak --link=redis:redis --name solar solarproject/solar-celery:f2s + riak: image: tutum/riak ports: diff --git a/f2s/fsclient.py b/f2s/fsclient.py index a59658be..63fe8b2f 100755 --- a/f2s/fsclient.py +++ b/f2s/fsclient.py @@ -15,13 +15,15 @@ class NailgunSource(object): def nodes(self, uids): from fuelclient.objects.node import Node nodes_obj = map(Node, nodes) - return [] + return [str(n.data['id']), str(n.data['ip']), str(n.data['cluster'])] - def roles(self, roles): - return [] + def roles(self, uid): + from fuelclient.objects.node import Node + node = Node(uid) + return node.data['roles'] + node.data['pending_roles'] def master(self): - return 'master', '' + return 'master', '10.20.0.2' class DumbSource(object): From 5de8818efb9e929cf804099309c31a33ba85a30a Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 11 Nov 2015 20:37:29 +0200 Subject: [PATCH 27/51] Add additional packages and config for container --- Dockerfile | 3 +++ docker-compose.yml | 3 ++- f2s/resources/role_data/managers/from_nailgun.py | 2 +- f2s/resources/role_data/managers/globals.pp | 4 ++-- f2s/resources/role_data/managers/globals.py | 1 + f2s/vrs/fuel_node.yaml | 2 ++ run.sh | 2 +- 7 files changed, 12 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1202cd57..cc0c548e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,5 +23,8 @@ RUN ansible-playbook -v -i "localhost," -c local /celery.yaml --tags install RUN pip install riak peewee RUN pip install -U setuptools>=17.1 RUN pip install -U python-fuelclient +RUN apt-get install -y puppet +RUN gem install hiera +RUN mkdir -p /etc/puppet/hieradata/ CMD ["/run.sh"] diff --git a/docker-compose.yml b/docker-compose.yml index 61f5b467..fa471b3f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -21,7 +21,8 @@ solar: # docker run --name solar -d -v /root/solar/solar:/solar -v /root/solar/solard:/solard -v /root/solar/templates:/vagrant/templates \ # -v /root/solar/resources:/vagrant/resources -v /root/solar/f2s:/vagrant/f2s \ -# -v /var/lib/fuel:/var/lib/fuel -v /root/.config/fuel/fuel_client.yaml:/root/.config/fuel/fuel_client.yaml \ +# -v /var/lib/fuel:/var/lib/fuel -v /root/.config/fuel/fuel_client.yaml:/etc/fuel/client/config.yaml -v /etc/puppet/modules:/etc/puppet/modules \ +# -v /root/.ssh:/root/.ssh \ # --link=riak:riak --link=redis:redis --name solar solarproject/solar-celery:f2s riak: diff --git a/f2s/resources/role_data/managers/from_nailgun.py b/f2s/resources/role_data/managers/from_nailgun.py index be2eb526..507ec0d1 100755 --- a/f2s/resources/role_data/managers/from_nailgun.py +++ b/f2s/resources/role_data/managers/from_nailgun.py @@ -10,4 +10,4 @@ ARGS = json.loads(sys.stdin.read()) env = Environment(ARGS['env']) facts = env.get_default_facts('deployment', [ARGS['uid']]) -sys.stdout.write(json.dumps(facts)) +sys.stdout.write(json.dumps(facts[0])) diff --git a/f2s/resources/role_data/managers/globals.pp b/f2s/resources/role_data/managers/globals.pp index c9bdf906..8d1de5a3 100644 --- a/f2s/resources/role_data/managers/globals.pp +++ b/f2s/resources/role_data/managers/globals.pp @@ -1,8 +1,8 @@ notice('MODULAR: globals.pp') $service_token_off = false - -$globals_yaml_file = "/etc/puppet/${uid}globals.yaml" +$identity = hiera('uid') +$globals_yaml_file = "/etc/puppet/${identity}globals.yaml" # remove cached globals values before anything else remove_file($globals_yaml_file) diff --git a/f2s/resources/role_data/managers/globals.py b/f2s/resources/role_data/managers/globals.py index b9c34dee..7e7da6f5 100755 --- a/f2s/resources/role_data/managers/globals.py +++ b/f2s/resources/role_data/managers/globals.py @@ -47,6 +47,7 @@ def collect_results(): def main(): prepare_hiera() + run_command() rst = collect_results() sys.stdout.write(json.dumps(rst)) diff --git a/f2s/vrs/fuel_node.yaml b/f2s/vrs/fuel_node.yaml index 1f54c360..1ea6185a 100644 --- a/f2s/vrs/fuel_node.yaml +++ b/f2s/vrs/fuel_node.yaml @@ -4,9 +4,11 @@ resources: from: resources/transport_ssh values: ssh_user: 'root' + ssh_key: '/root/.ssh/id_rsa' - id: transports{{index}} from: resources/transports values: + transports:key: ssh_transport{{index}}::ssh_key transports:user: ssh_transport{{index}}::ssh_user transports:port: ssh_transport{{index}}::ssh_port transports:name: ssh_transport{{index}}::name diff --git a/run.sh b/run.sh index e5def136..1d1f3cf7 100755 --- a/run.sh +++ b/run.sh @@ -6,6 +6,6 @@ if [ -d /solar ]; then fi #used only to start celery on docker -ansible-playbook -v -i "localhost," -c local /celery.yaml --skip-tags slave +ansible-playbook -v -i "localhost," -c local /celery.yaml --skip-tags slave,stop tail -f /var/run/celery/*.log From 3306bf83ed1c95e860fdbb18c87dbcc0daec4036 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Thu, 12 Nov 2015 12:40:35 +0200 Subject: [PATCH 28/51] Fixes in client and solar inputs --- Dockerfile | 11 ++++++----- f2s/fsclient.py | 25 ++++++++++++++----------- f2s/resources/genkeys/meta.yaml | 2 +- f2s/vrs/fuel_node.yaml | 2 ++ solar/core/resource/resource.py | 2 +- solar/core/resource/virtual_resource.py | 1 + 6 files changed, 25 insertions(+), 18 deletions(-) diff --git a/Dockerfile b/Dockerfile index cc0c548e..b02f277e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,17 +2,17 @@ FROM ubuntu:14.04 WORKDIR / -RUN apt-get update -# Install pip's dependency: setuptools: -RUN apt-get install -y python python-dev python-distribute python-pip -RUN pip install ansible - ADD bootstrap/playbooks/celery.yaml /celery.yaml ADD resources /resources ADD templates /templates ADD run.sh /run.sh ADD f2s /f2s +RUN apt-get update +# Install pip's dependency: setuptools: +RUN apt-get install -y python python-dev python-distribute python-pip \ + libyaml-dev vim libffi-dev libssl-dev +RUN pip install ansible RUN apt-get install -y libffi-dev libssl-dev @@ -20,6 +20,7 @@ RUN pip install https://github.com/Mirantis/solar/archive/master.zip RUN pip install https://github.com/Mirantis/solar-agent/archive/master.zip RUN ansible-playbook -v -i "localhost," -c local /celery.yaml --tags install + RUN pip install riak peewee RUN pip install -U setuptools>=17.1 RUN pip install -U python-fuelclient diff --git a/f2s/fsclient.py b/f2s/fsclient.py index 63fe8b2f..e20b661b 100755 --- a/f2s/fsclient.py +++ b/f2s/fsclient.py @@ -14,8 +14,9 @@ class NailgunSource(object): def nodes(self, uids): from fuelclient.objects.node import Node - nodes_obj = map(Node, nodes) - return [str(n.data['id']), str(n.data['ip']), str(n.data['cluster'])] + nodes_obj = map(Node, uids) + return [(str(n.data['id']), str(n.data['ip']), str(n.data['cluster'])) + for n in nodes_obj] def roles(self, uid): from fuelclient.objects.node import Node @@ -37,26 +38,28 @@ class DumbSource(object): def master(self): return 'master', '0.0.0.0' -source = DumbSource() +source = NailgunSource() @main.command() @click.argument('uids', nargs=-1) def nodes(uids): - master = source.master() - vr.create('master', 'f2s/vrs/fuel_node.yaml', - {'index': master[0], 'ip': master[1]}) for uid, ip, env in source.nodes(uids): vr.create('fuel_node', 'f2s/vrs/fuel_node.yaml', {'index': uid, 'ip': ip}) @main.command() -@click.argument('uids', nargs=-1) -def basic(uids): - master_index = source.master()[0] +def master(): + master = source.master() + vr.create('master', 'f2s/vrs/fuel_node.yaml', + {'index': master[0], 'ip': master[1]}) vr.create('genkeys', 'f2s/vrs/genkeys.yaml', { - 'node': 'node'+master_index, - 'index': master_index}) + 'node': 'node'+master[0], + 'index': master[0]}) + +@main.command() +@click.argument('uids', nargs=-1) +def prep(uids): for uid, ip, env in source.nodes(uids): vr.create('prep', 'f2s/vrs/prep.yaml', {'index': uid, 'env': env, 'node': 'node'+uid}) diff --git a/f2s/resources/genkeys/meta.yaml b/f2s/resources/genkeys/meta.yaml index a0250344..30bc5b1c 100644 --- a/f2s/resources/genkeys/meta.yaml +++ b/f2s/resources/genkeys/meta.yaml @@ -1,5 +1,5 @@ id: genkeys -handler: bash +handler: shell version: 0.0.1 inputs: uid: diff --git a/f2s/vrs/fuel_node.yaml b/f2s/vrs/fuel_node.yaml index 1ea6185a..9d04b803 100644 --- a/f2s/vrs/fuel_node.yaml +++ b/f2s/vrs/fuel_node.yaml @@ -1,5 +1,6 @@ id: fuel_node resources: +{% for i in range(1|int) %} - id: ssh_transport{{index}} from: resources/transport_ssh values: @@ -18,3 +19,4 @@ resources: name: node{{index}} ip: {{ip}} transports_id: transports{{index}}::transports_id +{% endfor %} diff --git a/solar/core/resource/resource.py b/solar/core/resource/resource.py index 9804d2ac..737483a8 100644 --- a/solar/core/resource/resource.py +++ b/solar/core/resource/resource.py @@ -234,7 +234,7 @@ class Resource(object): def resource_inputs(self): return self.db_obj.inputs - def to_dict(self, inputs=False): + def to_dict(self, inputs=True): ret = self.db_obj.to_dict() if inputs: ret['inputs'] = self.db_obj.inputs.as_dict() diff --git a/solar/core/resource/virtual_resource.py b/solar/core/resource/virtual_resource.py index 22bbc974..803bcef5 100644 --- a/solar/core/resource/virtual_resource.py +++ b/solar/core/resource/virtual_resource.py @@ -142,6 +142,7 @@ def create_resources(resources, tags=None): r = new_resources[0] node.connect(r, mapping={}) r.add_tags('location={}'.format(node.name)) + update_inputs(resource_name, args) if values_from: From cf5b0edafa6ee09b20442a6101d123cd37152359 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Thu, 12 Nov 2015 14:08:46 +0200 Subject: [PATCH 29/51] Fix inputs in resources --- f2s/resources/apache/meta.yaml | 5 ++- f2s/resources/api-proxy/meta.yaml | 5 ++- f2s/resources/ceilometer-compute/meta.yaml | 5 ++- f2s/resources/ceilometer-controller/meta.yaml | 5 ++- f2s/resources/ceilometer-keystone/meta.yaml | 5 ++- .../ceilometer-radosgw-user/meta.yaml | 5 ++- f2s/resources/ceph-compute/meta.yaml | 5 ++- f2s/resources/ceph-mon/meta.yaml | 5 ++- f2s/resources/ceph-radosgw/meta.yaml | 5 ++- f2s/resources/ceph_create_pools/meta.yaml | 5 ++- f2s/resources/cinder-db/meta.yaml | 5 ++- f2s/resources/cinder-keystone/meta.yaml | 5 ++- f2s/resources/cluster-haproxy/meta.yaml | 5 ++- f2s/resources/cluster-vrouter/meta.yaml | 5 ++- f2s/resources/cluster/meta.yaml | 5 ++- f2s/resources/cluster_health/meta.yaml | 5 ++- .../configure_default_route/meta.yaml | 5 ++- f2s/resources/connectivity_tests/meta.yaml | 5 ++- f2s/resources/conntrackd/meta.yaml | 5 ++- .../controller_remaining_tasks/meta.yaml | 5 ++- f2s/resources/database/meta.yaml | 5 ++- .../disable_keystone_service_token/meta.yaml | 5 ++- f2s/resources/dns-client/meta.yaml | 5 ++- f2s/resources/dns-server/meta.yaml | 5 ++- .../dump_rabbitmq_definitions/actions/run.pp | 9 +++-- .../dump_rabbitmq_definitions/meta.yaml | 5 ++- .../enable_cinder_volume_service/meta.yaml | 5 ++- .../enable_nova_compute_service/meta.yaml | 5 ++- f2s/resources/enable_rados/meta.yaml | 5 ++- f2s/resources/firewall/actions/run.pp | 1 + f2s/resources/firewall/meta.yaml | 5 ++- f2s/resources/fuel_pkgs/meta.yaml | 5 ++- f2s/resources/generate_vms/meta.yaml | 5 ++- f2s/resources/genkeys/meta.yaml | 2 +- f2s/resources/glance-db/meta.yaml | 5 ++- f2s/resources/glance-keystone/meta.yaml | 5 ++- f2s/resources/glance/meta.yaml | 5 ++- f2s/resources/globals/meta.yaml | 5 ++- f2s/resources/heat-db/meta.yaml | 5 ++- f2s/resources/heat-keystone/meta.yaml | 5 ++- f2s/resources/heat/meta.yaml | 5 ++- f2s/resources/hiera/meta.yaml | 5 ++- f2s/resources/horizon/meta.yaml | 5 ++- f2s/resources/hosts/meta.yaml | 5 ++- f2s/resources/ironic-api/meta.yaml | 5 ++- f2s/resources/ironic-compute/meta.yaml | 5 ++- f2s/resources/ironic-conductor/meta.yaml | 5 ++- f2s/resources/ironic-db/meta.yaml | 5 ++- f2s/resources/ironic-keystone/meta.yaml | 5 ++- f2s/resources/keystone-db/meta.yaml | 5 ++- f2s/resources/keystone/actions/run.pp | 6 ++++ f2s/resources/keystone/meta.yaml | 5 ++- f2s/resources/logging/meta.yaml | 5 ++- f2s/resources/memcached/meta.yaml | 5 ++- f2s/resources/murano-db/meta.yaml | 5 ++- f2s/resources/murano-keystone/meta.yaml | 5 ++- f2s/resources/murano-rabbitmq/actions/run.pp | 13 +++++++ f2s/resources/murano-rabbitmq/meta.yaml | 7 ++++ f2s/resources/murano/actions/run.pp | 36 +++++++------------ f2s/resources/murano/meta.yaml | 5 ++- f2s/resources/netconfig/meta.yaml | 5 ++- f2s/resources/neutron-db/meta.yaml | 5 ++- f2s/resources/neutron-keystone/meta.yaml | 5 ++- f2s/resources/nova-db/meta.yaml | 5 ++- f2s/resources/nova-keystone/meta.yaml | 5 ++- f2s/resources/ntp-check/meta.yaml | 5 ++- f2s/resources/ntp-client/meta.yaml | 5 ++- f2s/resources/ntp-server/meta.yaml | 5 ++- f2s/resources/openstack-cinder/meta.yaml | 5 ++- f2s/resources/openstack-controller/meta.yaml | 5 ++- .../openstack-haproxy-ceilometer/meta.yaml | 5 ++- .../openstack-haproxy-cinder/meta.yaml | 5 ++- .../openstack-haproxy-glance/meta.yaml | 5 ++- .../openstack-haproxy-heat/meta.yaml | 5 ++- .../openstack-haproxy-horizon/meta.yaml | 5 ++- .../openstack-haproxy-ironic/meta.yaml | 5 ++- .../openstack-haproxy-keystone/meta.yaml | 5 ++- .../openstack-haproxy-murano/meta.yaml | 5 ++- .../openstack-haproxy-mysqld/meta.yaml | 5 ++- .../openstack-haproxy-neutron/meta.yaml | 5 ++- .../openstack-haproxy-nova/meta.yaml | 5 ++- .../openstack-haproxy-radosgw/actions/run.pp | 17 +++++---- .../openstack-haproxy-radosgw/meta.yaml | 5 ++- .../openstack-haproxy-sahara/meta.yaml | 5 ++- .../openstack-haproxy-stats/meta.yaml | 5 ++- .../openstack-haproxy-swift/meta.yaml | 5 ++- f2s/resources/openstack-haproxy/meta.yaml | 5 ++- .../actions/run.pp | 2 +- .../openstack-network-agents-dhcp/meta.yaml | 5 ++- .../actions/run.pp | 2 +- .../openstack-network-agents-l3/meta.yaml | 5 ++- .../actions/run.pp | 3 +- .../meta.yaml | 5 ++- .../openstack-network-common-config/meta.yaml | 5 ++- .../openstack-network-compute-nova/meta.yaml | 5 ++- .../openstack-network-networks/meta.yaml | 5 ++- .../actions/run.pp | 11 +++--- .../openstack-network-plugins-l2/meta.yaml | 5 ++- .../openstack-network-routers/meta.yaml | 5 ++- .../openstack-network-server-config/meta.yaml | 5 ++- .../actions/run.pp | 27 +++++++++++--- .../openstack-network-server-nova/meta.yaml | 5 ++- f2s/resources/pre_hiera_config/meta.yaml | 5 ++- f2s/resources/public_vip_ping/meta.yaml | 5 ++- f2s/resources/rabbitmq/actions/run.pp | 2 -- f2s/resources/rabbitmq/meta.yaml | 5 ++- f2s/resources/sahara-db/meta.yaml | 5 ++- f2s/resources/sahara-keystone/meta.yaml | 5 ++- f2s/resources/sahara/meta.yaml | 5 ++- f2s/resources/ssl-add-trust-chain/meta.yaml | 5 ++- f2s/resources/ssl-keys-saving/meta.yaml | 5 ++- f2s/resources/swift-keystone/meta.yaml | 5 ++- f2s/resources/swift-rebalance-cron/meta.yaml | 5 ++- f2s/resources/swift/meta.yaml | 5 ++- f2s/resources/tools/meta.yaml | 5 ++- f2s/resources/top-role-ceph-osd/meta.yaml | 5 ++- .../top-role-cinder-vmware/meta.yaml | 5 ++- f2s/resources/top-role-cinder/meta.yaml | 5 ++- .../top-role-compute-vmware/meta.yaml | 5 ++- f2s/resources/top-role-compute/meta.yaml | 5 ++- f2s/resources/top-role-mongo/meta.yaml | 5 ++- .../top-role-primary-mongo/meta.yaml | 5 ++- f2s/resources/umm/meta.yaml | 5 ++- f2s/resources/update_hosts/meta.yaml | 5 ++- f2s/resources/updatedb/meta.yaml | 5 ++- f2s/resources/virtual_ips/meta.yaml | 5 ++- f2s/resources/vmware-vcenter/meta.yaml | 5 ++- .../workloads_collector_add/meta.yaml | 5 ++- f2s/vrs/genkeys.yaml | 5 +++ 129 files changed, 549 insertions(+), 164 deletions(-) create mode 100644 f2s/resources/murano-rabbitmq/actions/run.pp create mode 100644 f2s/resources/murano-rabbitmq/meta.yaml diff --git a/f2s/resources/apache/meta.yaml b/f2s/resources/apache/meta.yaml index 8a642df7..a1fc1be8 100644 --- a/f2s/resources/apache/meta.yaml +++ b/f2s/resources/apache/meta.yaml @@ -1,7 +1,10 @@ id: apache handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: apache_ports: value: null fqdn: diff --git a/f2s/resources/api-proxy/meta.yaml b/f2s/resources/api-proxy/meta.yaml index 786ec7f9..4c152bc5 100644 --- a/f2s/resources/api-proxy/meta.yaml +++ b/f2s/resources/api-proxy/meta.yaml @@ -1,7 +1,10 @@ id: api-proxy handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: apache_ports: value: null fqdn: diff --git a/f2s/resources/ceilometer-compute/meta.yaml b/f2s/resources/ceilometer-compute/meta.yaml index a96b8ea0..82e53cf4 100644 --- a/f2s/resources/ceilometer-compute/meta.yaml +++ b/f2s/resources/ceilometer-compute/meta.yaml @@ -1,7 +1,10 @@ id: ceilometer-compute handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/ceilometer-controller/meta.yaml b/f2s/resources/ceilometer-controller/meta.yaml index 871b296d..8dcd033b 100644 --- a/f2s/resources/ceilometer-controller/meta.yaml +++ b/f2s/resources/ceilometer-controller/meta.yaml @@ -1,7 +1,10 @@ id: ceilometer-controller handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: ceilometer: value: null debug: diff --git a/f2s/resources/ceilometer-keystone/meta.yaml b/f2s/resources/ceilometer-keystone/meta.yaml index 5163496e..fd8e2a4b 100644 --- a/f2s/resources/ceilometer-keystone/meta.yaml +++ b/f2s/resources/ceilometer-keystone/meta.yaml @@ -1,7 +1,10 @@ id: ceilometer-keystone handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: ceilometer: value: null fqdn: diff --git a/f2s/resources/ceilometer-radosgw-user/meta.yaml b/f2s/resources/ceilometer-radosgw-user/meta.yaml index 145ea0a7..e6484dd3 100644 --- a/f2s/resources/ceilometer-radosgw-user/meta.yaml +++ b/f2s/resources/ceilometer-radosgw-user/meta.yaml @@ -1,7 +1,10 @@ id: ceilometer-radosgw-user handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: ceilometer: value: null fqdn: diff --git a/f2s/resources/ceph-compute/meta.yaml b/f2s/resources/ceph-compute/meta.yaml index 8690c5f0..5a1ec76d 100644 --- a/f2s/resources/ceph-compute/meta.yaml +++ b/f2s/resources/ceph-compute/meta.yaml @@ -1,7 +1,10 @@ id: ceph-compute handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/ceph-mon/meta.yaml b/f2s/resources/ceph-mon/meta.yaml index 19d61849..e1f332b4 100644 --- a/f2s/resources/ceph-mon/meta.yaml +++ b/f2s/resources/ceph-mon/meta.yaml @@ -1,7 +1,10 @@ id: ceph-mon handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: ceph_monitor_nodes: value: null ceph_primary_monitor_node: diff --git a/f2s/resources/ceph-radosgw/meta.yaml b/f2s/resources/ceph-radosgw/meta.yaml index c577ef39..e60e9dfd 100644 --- a/f2s/resources/ceph-radosgw/meta.yaml +++ b/f2s/resources/ceph-radosgw/meta.yaml @@ -1,7 +1,10 @@ id: ceph-radosgw handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: ceph_monitor_nodes: value: null fqdn: diff --git a/f2s/resources/ceph_create_pools/meta.yaml b/f2s/resources/ceph_create_pools/meta.yaml index 97de7472..92b66ee5 100644 --- a/f2s/resources/ceph_create_pools/meta.yaml +++ b/f2s/resources/ceph_create_pools/meta.yaml @@ -1,7 +1,10 @@ id: ceph_create_pools handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/cinder-db/meta.yaml b/f2s/resources/cinder-db/meta.yaml index 6865e031..039be257 100644 --- a/f2s/resources/cinder-db/meta.yaml +++ b/f2s/resources/cinder-db/meta.yaml @@ -1,7 +1,10 @@ id: cinder-db handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: cinder: value: null database_vip: diff --git a/f2s/resources/cinder-keystone/meta.yaml b/f2s/resources/cinder-keystone/meta.yaml index c2ec3ec4..a715dad7 100644 --- a/f2s/resources/cinder-keystone/meta.yaml +++ b/f2s/resources/cinder-keystone/meta.yaml @@ -1,7 +1,10 @@ id: cinder-keystone handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: cinder: value: null fqdn: diff --git a/f2s/resources/cluster-haproxy/meta.yaml b/f2s/resources/cluster-haproxy/meta.yaml index c1e45fb8..d89e791b 100644 --- a/f2s/resources/cluster-haproxy/meta.yaml +++ b/f2s/resources/cluster-haproxy/meta.yaml @@ -1,7 +1,10 @@ id: cluster-haproxy handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: database_vip: value: null debug: diff --git a/f2s/resources/cluster-vrouter/meta.yaml b/f2s/resources/cluster-vrouter/meta.yaml index 6f6fd6b5..87e61697 100644 --- a/f2s/resources/cluster-vrouter/meta.yaml +++ b/f2s/resources/cluster-vrouter/meta.yaml @@ -1,7 +1,10 @@ id: cluster-vrouter handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null network_scheme: diff --git a/f2s/resources/cluster/meta.yaml b/f2s/resources/cluster/meta.yaml index a01566c4..594d2060 100644 --- a/f2s/resources/cluster/meta.yaml +++ b/f2s/resources/cluster/meta.yaml @@ -1,7 +1,10 @@ id: cluster handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: corosync_roles: value: null fqdn: diff --git a/f2s/resources/cluster_health/meta.yaml b/f2s/resources/cluster_health/meta.yaml index 9ffaf7b6..f68396fe 100644 --- a/f2s/resources/cluster_health/meta.yaml +++ b/f2s/resources/cluster_health/meta.yaml @@ -1,7 +1,10 @@ id: cluster_health handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: corosync_disk_monitor: value: null corosync_disk_monitor_interval: diff --git a/f2s/resources/configure_default_route/meta.yaml b/f2s/resources/configure_default_route/meta.yaml index bc69b391..2f075982 100644 --- a/f2s/resources/configure_default_route/meta.yaml +++ b/f2s/resources/configure_default_route/meta.yaml @@ -1,7 +1,10 @@ id: configure_default_route handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/connectivity_tests/meta.yaml b/f2s/resources/connectivity_tests/meta.yaml index a1408eec..7cb4d8d8 100644 --- a/f2s/resources/connectivity_tests/meta.yaml +++ b/f2s/resources/connectivity_tests/meta.yaml @@ -1,7 +1,10 @@ id: connectivity_tests handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/conntrackd/meta.yaml b/f2s/resources/conntrackd/meta.yaml index 45f8f638..315a5ca8 100644 --- a/f2s/resources/conntrackd/meta.yaml +++ b/f2s/resources/conntrackd/meta.yaml @@ -1,7 +1,10 @@ id: conntrackd handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null network_scheme: diff --git a/f2s/resources/controller_remaining_tasks/meta.yaml b/f2s/resources/controller_remaining_tasks/meta.yaml index e5d4d401..d90480ca 100644 --- a/f2s/resources/controller_remaining_tasks/meta.yaml +++ b/f2s/resources/controller_remaining_tasks/meta.yaml @@ -1,7 +1,10 @@ id: controller_remaining_tasks handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null neutron_mellanox: diff --git a/f2s/resources/database/meta.yaml b/f2s/resources/database/meta.yaml index 4da75f77..be3499ba 100644 --- a/f2s/resources/database/meta.yaml +++ b/f2s/resources/database/meta.yaml @@ -1,7 +1,10 @@ id: database handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: database_nodes: value: null database_vip: diff --git a/f2s/resources/disable_keystone_service_token/meta.yaml b/f2s/resources/disable_keystone_service_token/meta.yaml index 19882fda..5e77e649 100644 --- a/f2s/resources/disable_keystone_service_token/meta.yaml +++ b/f2s/resources/disable_keystone_service_token/meta.yaml @@ -1,7 +1,10 @@ id: disable_keystone_service_token handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null keystone_hash: diff --git a/f2s/resources/dns-client/meta.yaml b/f2s/resources/dns-client/meta.yaml index a53f2682..e2a8ef6c 100644 --- a/f2s/resources/dns-client/meta.yaml +++ b/f2s/resources/dns-client/meta.yaml @@ -1,7 +1,10 @@ id: dns-client handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null management_vrouter_vip: diff --git a/f2s/resources/dns-server/meta.yaml b/f2s/resources/dns-server/meta.yaml index 7faa8ea2..12174361 100644 --- a/f2s/resources/dns-server/meta.yaml +++ b/f2s/resources/dns-server/meta.yaml @@ -1,7 +1,10 @@ id: dns-server handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: external_dns: value: null fqdn: diff --git a/f2s/resources/dump_rabbitmq_definitions/actions/run.pp b/f2s/resources/dump_rabbitmq_definitions/actions/run.pp index 063d90e1..1ddca51b 100644 --- a/f2s/resources/dump_rabbitmq_definitions/actions/run.pp +++ b/f2s/resources/dump_rabbitmq_definitions/actions/run.pp @@ -1,6 +1,7 @@ notice('MODULAR: dump_rabbitmq_definitions.pp') $definitions_dump_file = '/etc/rabbitmq/definitions' +$original_definitions_dump_file = '/etc/rabbitmq/definitions.full' $rabbit_hash = hiera_hash('rabbit_hash', { 'user' => false, @@ -16,10 +17,14 @@ if ($rabbit_enabled) { exec { 'rabbitmq-dump-definitions': path => ['/usr/bin', '/usr/sbin', '/sbin', '/bin'], - command => "curl -u ${rabbit_credentials} ${rabbit_api_endpoint} -o ${definitions_dump_file}", + command => "curl -u ${rabbit_credentials} ${rabbit_api_endpoint} -o ${original_definitions_dump_file}", + }-> + exec { 'rabbitmq-dump-clean': + path => ['/usr/bin', '/usr/sbin', '/sbin', '/bin'], + command => "rabbitmq-dump-clean.py < ${original_definitions_dump_file} > ${definitions_dump_file}", } - file { $definitions_dump_file: + file { [$definitions_dump_file, $original_definitions_dump_file]: ensure => file, owner => 'root', group => 'root', diff --git a/f2s/resources/dump_rabbitmq_definitions/meta.yaml b/f2s/resources/dump_rabbitmq_definitions/meta.yaml index 44e9109c..454d12e3 100644 --- a/f2s/resources/dump_rabbitmq_definitions/meta.yaml +++ b/f2s/resources/dump_rabbitmq_definitions/meta.yaml @@ -1,7 +1,10 @@ id: dump_rabbitmq_definitions handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/enable_cinder_volume_service/meta.yaml b/f2s/resources/enable_cinder_volume_service/meta.yaml index 404d0853..57957c6e 100644 --- a/f2s/resources/enable_cinder_volume_service/meta.yaml +++ b/f2s/resources/enable_cinder_volume_service/meta.yaml @@ -1,7 +1,10 @@ id: enable_cinder_volume_service handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/enable_nova_compute_service/meta.yaml b/f2s/resources/enable_nova_compute_service/meta.yaml index dd1bd484..cdad50de 100644 --- a/f2s/resources/enable_nova_compute_service/meta.yaml +++ b/f2s/resources/enable_nova_compute_service/meta.yaml @@ -1,7 +1,10 @@ id: enable_nova_compute_service handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/enable_rados/meta.yaml b/f2s/resources/enable_rados/meta.yaml index b2706e00..189f1313 100644 --- a/f2s/resources/enable_rados/meta.yaml +++ b/f2s/resources/enable_rados/meta.yaml @@ -1,7 +1,10 @@ id: enable_rados handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/firewall/actions/run.pp b/f2s/resources/firewall/actions/run.pp index e22fe133..ffac1cd8 100644 --- a/f2s/resources/firewall/actions/run.pp +++ b/f2s/resources/firewall/actions/run.pp @@ -50,6 +50,7 @@ class { 'openstack::firewall' : nova_api_ip_range => get_network_role_property('nova/api', 'network'), libvirt_network => get_network_role_property('management', 'network'), keystone_network => get_network_role_property('keystone/api', 'network'), + iscsi_ip => get_network_role_property('cinder/iscsi', 'ipaddr'), } if $ironic_hash['enabled'] { diff --git a/f2s/resources/firewall/meta.yaml b/f2s/resources/firewall/meta.yaml index 927da034..eea19f9a 100644 --- a/f2s/resources/firewall/meta.yaml +++ b/f2s/resources/firewall/meta.yaml @@ -1,7 +1,10 @@ id: firewall handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null ironic: diff --git a/f2s/resources/fuel_pkgs/meta.yaml b/f2s/resources/fuel_pkgs/meta.yaml index 364f7324..788e7dc5 100644 --- a/f2s/resources/fuel_pkgs/meta.yaml +++ b/f2s/resources/fuel_pkgs/meta.yaml @@ -1,7 +1,10 @@ id: fuel_pkgs handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/generate_vms/meta.yaml b/f2s/resources/generate_vms/meta.yaml index dc6c9553..e84e320a 100644 --- a/f2s/resources/generate_vms/meta.yaml +++ b/f2s/resources/generate_vms/meta.yaml @@ -1,7 +1,10 @@ id: generate_vms handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/genkeys/meta.yaml b/f2s/resources/genkeys/meta.yaml index 30bc5b1c..cbd450c6 100644 --- a/f2s/resources/genkeys/meta.yaml +++ b/f2s/resources/genkeys/meta.yaml @@ -1,7 +1,7 @@ id: genkeys handler: shell version: 0.0.1 -inputs: +input: uid: schema: str! value: diff --git a/f2s/resources/glance-db/meta.yaml b/f2s/resources/glance-db/meta.yaml index 47107d52..cc0676a7 100644 --- a/f2s/resources/glance-db/meta.yaml +++ b/f2s/resources/glance-db/meta.yaml @@ -1,7 +1,10 @@ id: glance-db handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: database_vip: value: null fqdn: diff --git a/f2s/resources/glance-keystone/meta.yaml b/f2s/resources/glance-keystone/meta.yaml index 59a923d2..a53edfb5 100644 --- a/f2s/resources/glance-keystone/meta.yaml +++ b/f2s/resources/glance-keystone/meta.yaml @@ -1,7 +1,10 @@ id: glance-keystone handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null glance: diff --git a/f2s/resources/glance/meta.yaml b/f2s/resources/glance/meta.yaml index e65d4f01..ac98a933 100644 --- a/f2s/resources/glance/meta.yaml +++ b/f2s/resources/glance/meta.yaml @@ -1,7 +1,10 @@ id: glance handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: amqp_hosts: value: null ceilometer: diff --git a/f2s/resources/globals/meta.yaml b/f2s/resources/globals/meta.yaml index f2fb6373..d2638d91 100644 --- a/f2s/resources/globals/meta.yaml +++ b/f2s/resources/globals/meta.yaml @@ -1,7 +1,10 @@ id: globals handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: access: value: null amqp_hosts: diff --git a/f2s/resources/heat-db/meta.yaml b/f2s/resources/heat-db/meta.yaml index 5dad6756..dc3d384d 100644 --- a/f2s/resources/heat-db/meta.yaml +++ b/f2s/resources/heat-db/meta.yaml @@ -1,7 +1,10 @@ id: heat-db handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: database_vip: value: null fqdn: diff --git a/f2s/resources/heat-keystone/meta.yaml b/f2s/resources/heat-keystone/meta.yaml index f6376fe6..f06820eb 100644 --- a/f2s/resources/heat-keystone/meta.yaml +++ b/f2s/resources/heat-keystone/meta.yaml @@ -1,7 +1,10 @@ id: heat-keystone handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null heat: diff --git a/f2s/resources/heat/meta.yaml b/f2s/resources/heat/meta.yaml index 56e72f52..a088ea0e 100644 --- a/f2s/resources/heat/meta.yaml +++ b/f2s/resources/heat/meta.yaml @@ -1,7 +1,10 @@ id: heat handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: amqp_hosts: value: null database_vip: diff --git a/f2s/resources/hiera/meta.yaml b/f2s/resources/hiera/meta.yaml index 00e83c27..4affe804 100644 --- a/f2s/resources/hiera/meta.yaml +++ b/f2s/resources/hiera/meta.yaml @@ -1,7 +1,10 @@ id: hiera handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/horizon/meta.yaml b/f2s/resources/horizon/meta.yaml index 36337d34..bad3bce9 100644 --- a/f2s/resources/horizon/meta.yaml +++ b/f2s/resources/horizon/meta.yaml @@ -1,7 +1,10 @@ id: horizon handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: apache_ports: value: null debug: diff --git a/f2s/resources/hosts/meta.yaml b/f2s/resources/hosts/meta.yaml index 8d8ece6a..20f8d190 100644 --- a/f2s/resources/hosts/meta.yaml +++ b/f2s/resources/hosts/meta.yaml @@ -1,7 +1,10 @@ id: hosts handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null nodes: diff --git a/f2s/resources/ironic-api/meta.yaml b/f2s/resources/ironic-api/meta.yaml index caebd7d7..5e52fdc2 100644 --- a/f2s/resources/ironic-api/meta.yaml +++ b/f2s/resources/ironic-api/meta.yaml @@ -1,7 +1,10 @@ id: ironic-api handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: ironic: value: null puppet_modules: diff --git a/f2s/resources/ironic-compute/meta.yaml b/f2s/resources/ironic-compute/meta.yaml index 8debec41..4227a04e 100644 --- a/f2s/resources/ironic-compute/meta.yaml +++ b/f2s/resources/ironic-compute/meta.yaml @@ -1,7 +1,10 @@ id: ironic-compute handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/ironic-conductor/meta.yaml b/f2s/resources/ironic-conductor/meta.yaml index a3762264..f6bf0ff2 100644 --- a/f2s/resources/ironic-conductor/meta.yaml +++ b/f2s/resources/ironic-conductor/meta.yaml @@ -1,7 +1,10 @@ id: ironic-conductor handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/ironic-db/meta.yaml b/f2s/resources/ironic-db/meta.yaml index 5f307f46..796de371 100644 --- a/f2s/resources/ironic-db/meta.yaml +++ b/f2s/resources/ironic-db/meta.yaml @@ -1,7 +1,10 @@ id: ironic-db handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: database_vip: value: null fqdn: diff --git a/f2s/resources/ironic-keystone/meta.yaml b/f2s/resources/ironic-keystone/meta.yaml index ccf4ca9a..cb3221dc 100644 --- a/f2s/resources/ironic-keystone/meta.yaml +++ b/f2s/resources/ironic-keystone/meta.yaml @@ -1,7 +1,10 @@ id: ironic-keystone handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null ironic: diff --git a/f2s/resources/keystone-db/meta.yaml b/f2s/resources/keystone-db/meta.yaml index 40db7bb2..b5dbc5b7 100644 --- a/f2s/resources/keystone-db/meta.yaml +++ b/f2s/resources/keystone-db/meta.yaml @@ -1,7 +1,10 @@ id: keystone-db handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: database_vip: value: null fqdn: diff --git a/f2s/resources/keystone/actions/run.pp b/f2s/resources/keystone/actions/run.pp index d37f3a0e..839e5c6a 100644 --- a/f2s/resources/keystone/actions/run.pp +++ b/f2s/resources/keystone/actions/run.pp @@ -234,3 +234,9 @@ if ($::operatingsystem == 'Ubuntu') { package_name => 'keystone', } } + +# Override confguration options +$override_configuration = hiera_hash('configuration', {}) +override_resources { 'keystone_config': + data => $override_configuration['keystone_config'] +} diff --git a/f2s/resources/keystone/meta.yaml b/f2s/resources/keystone/meta.yaml index fce3075d..61803602 100644 --- a/f2s/resources/keystone/meta.yaml +++ b/f2s/resources/keystone/meta.yaml @@ -1,7 +1,10 @@ id: keystone handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: access: value: null amqp_hosts: diff --git a/f2s/resources/logging/meta.yaml b/f2s/resources/logging/meta.yaml index 443636ca..e6becfd2 100644 --- a/f2s/resources/logging/meta.yaml +++ b/f2s/resources/logging/meta.yaml @@ -1,7 +1,10 @@ id: logging handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: base_syslog_hash: value: null debug: diff --git a/f2s/resources/memcached/meta.yaml b/f2s/resources/memcached/meta.yaml index a7395ba4..07f75733 100644 --- a/f2s/resources/memcached/meta.yaml +++ b/f2s/resources/memcached/meta.yaml @@ -1,7 +1,10 @@ id: memcached handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null network_scheme: diff --git a/f2s/resources/murano-db/meta.yaml b/f2s/resources/murano-db/meta.yaml index a2ebcfdc..f7a86eff 100644 --- a/f2s/resources/murano-db/meta.yaml +++ b/f2s/resources/murano-db/meta.yaml @@ -1,7 +1,10 @@ id: murano-db handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: database_vip: value: null fqdn: diff --git a/f2s/resources/murano-keystone/meta.yaml b/f2s/resources/murano-keystone/meta.yaml index 1283949d..27c270b4 100644 --- a/f2s/resources/murano-keystone/meta.yaml +++ b/f2s/resources/murano-keystone/meta.yaml @@ -1,7 +1,10 @@ id: murano-keystone handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null management_vip: diff --git a/f2s/resources/murano-rabbitmq/actions/run.pp b/f2s/resources/murano-rabbitmq/actions/run.pp new file mode 100644 index 00000000..fa766e01 --- /dev/null +++ b/f2s/resources/murano-rabbitmq/actions/run.pp @@ -0,0 +1,13 @@ +notice('MODULAR: murano/rabbitmq.pp') + +$rabbit_hash = hiera_hash('rabbit_hash', {}) + +################################################################# + +rabbitmq_vhost { '/murano': } + +rabbitmq_user_permissions { "${rabbit_hash['user']}@/murano": + configure_permission => '.*', + read_permission => '.*', + write_permission => '.*', +} diff --git a/f2s/resources/murano-rabbitmq/meta.yaml b/f2s/resources/murano-rabbitmq/meta.yaml new file mode 100644 index 00000000..e306d66b --- /dev/null +++ b/f2s/resources/murano-rabbitmq/meta.yaml @@ -0,0 +1,7 @@ +id: murano-rabbitmq +handler: puppetv2 +version: '8.0' +actions: + run: actions/run.pp + update: actions/run.pp +input: {} diff --git a/f2s/resources/murano/actions/run.pp b/f2s/resources/murano/actions/run.pp index 90750555..cb163e90 100644 --- a/f2s/resources/murano/actions/run.pp +++ b/f2s/resources/murano/actions/run.pp @@ -5,7 +5,6 @@ prepare_network_config(hiera('network_scheme', {})) $murano_hash = hiera_hash('murano_hash', {}) $murano_settings_hash = hiera_hash('murano_settings', {}) $rabbit_hash = hiera_hash('rabbit_hash', {}) -$heat_hash = hiera_hash('heat_hash', {}) $neutron_config = hiera_hash('neutron_config', {}) $node_role = hiera('node_role') $public_ip = hiera('public_vip') @@ -80,24 +79,26 @@ if $murano_hash['enabled'] { use_stderr => $use_stderr, log_facility => $syslog_log_facility_murano, database_connection => $sql_connection, - keystone_uri => "${public_protocol}://${public_address}:5000/v2.0/", - keystone_username => $murano_user, - keystone_password => $murano_hash['user_password'], - keystone_tenant => $tenant, + auth_uri => "${public_protocol}://${public_address}:5000/v2.0/", + admin_user => $murano_user, + admin_password => $murano_hash['user_password'], + admin_tenant_name => $tenant, identity_uri => "http://${service_endpoint}:35357/", use_neutron => $use_neutron, rabbit_os_user => $rabbit_hash['user'], rabbit_os_password => $rabbit_hash['password'], rabbit_os_port => $amqp_port, - rabbit_os_hosts => split($amqp_hosts, ','), + rabbit_os_host => split($amqp_hosts, ','), rabbit_ha_queues => $rabbit_ha_queues, rabbit_own_host => $public_ip, - rabbit_own_port => '55572', - rabbit_own_user => 'murano', - rabbit_own_password => $heat_hash['rabbit_password'], + rabbit_own_port => $amqp_port, + rabbit_own_vhost => 'murano', + rabbit_own_user => $rabbit_hash['user'], + rabbit_own_password => $rabbit_hash['password'], service_host => $api_bind_host, service_port => $api_bind_port, external_network => $external_network, + use_trusts => true, } class { 'murano::api': @@ -114,12 +115,6 @@ if $murano_hash['enabled'] { repo_url => $repository_url, } - class { 'murano::rabbitmq': - rabbit_user => 'murano', - rabbit_password => $heat_hash['rabbit_password'], - rabbit_port => '55572', - } - $haproxy_stats_url = "http://${management_ip}:10000/;csv" haproxy_backend_status { 'murano-api' : @@ -138,20 +133,13 @@ if $murano_hash['enabled'] { url => $haproxy_stats_url, } - murano::application { 'io.murano' : - os_tenant_name => $tenant, - os_username => $murano_user, - os_password => $murano_hash['user_password'], - os_auth_url => "${public_protocol}://${public_address}:5000/v2.0/", - os_region => $region, - mandatory => true, - } + murano::application { 'io.murano' : } Haproxy_backend_status['keystone-admin'] -> Haproxy_backend_status['murano-api'] Haproxy_backend_status['keystone-public'] -> Haproxy_backend_status['murano-api'] Haproxy_backend_status['murano-api'] -> Murano::Application['io.murano'] - Service['murano-api'] -> Murano::Application<| mandatory == true |> + Service['murano-api'] -> Murano::Application['io.murano'] } Firewall[$firewall_rule] -> Class['murano::api'] diff --git a/f2s/resources/murano/meta.yaml b/f2s/resources/murano/meta.yaml index 4482611a..05d8120e 100644 --- a/f2s/resources/murano/meta.yaml +++ b/f2s/resources/murano/meta.yaml @@ -1,7 +1,10 @@ id: murano handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: amqp_hosts: value: null amqp_port: diff --git a/f2s/resources/netconfig/meta.yaml b/f2s/resources/netconfig/meta.yaml index 695c9e22..e577d6e7 100644 --- a/f2s/resources/netconfig/meta.yaml +++ b/f2s/resources/netconfig/meta.yaml @@ -1,7 +1,10 @@ id: netconfig handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: default_gateway: value: null fqdn: diff --git a/f2s/resources/neutron-db/meta.yaml b/f2s/resources/neutron-db/meta.yaml index bf93b0e4..1675f853 100644 --- a/f2s/resources/neutron-db/meta.yaml +++ b/f2s/resources/neutron-db/meta.yaml @@ -1,7 +1,10 @@ id: neutron-db handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: database_vip: value: null fqdn: diff --git a/f2s/resources/neutron-keystone/meta.yaml b/f2s/resources/neutron-keystone/meta.yaml index 9f3a3479..b80ae155 100644 --- a/f2s/resources/neutron-keystone/meta.yaml +++ b/f2s/resources/neutron-keystone/meta.yaml @@ -1,7 +1,10 @@ id: neutron-keystone handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null management_vip: diff --git a/f2s/resources/nova-db/meta.yaml b/f2s/resources/nova-db/meta.yaml index 7335e140..bcb80469 100644 --- a/f2s/resources/nova-db/meta.yaml +++ b/f2s/resources/nova-db/meta.yaml @@ -1,7 +1,10 @@ id: nova-db handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: database_vip: value: null fqdn: diff --git a/f2s/resources/nova-keystone/meta.yaml b/f2s/resources/nova-keystone/meta.yaml index 58021f68..51629daa 100644 --- a/f2s/resources/nova-keystone/meta.yaml +++ b/f2s/resources/nova-keystone/meta.yaml @@ -1,7 +1,10 @@ id: nova-keystone handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null management_vip: diff --git a/f2s/resources/ntp-check/meta.yaml b/f2s/resources/ntp-check/meta.yaml index a25929c7..320cee96 100644 --- a/f2s/resources/ntp-check/meta.yaml +++ b/f2s/resources/ntp-check/meta.yaml @@ -1,7 +1,10 @@ id: ntp-check handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: external_ntp: value: null fqdn: diff --git a/f2s/resources/ntp-client/meta.yaml b/f2s/resources/ntp-client/meta.yaml index 568e2f2e..c2a57442 100644 --- a/f2s/resources/ntp-client/meta.yaml +++ b/f2s/resources/ntp-client/meta.yaml @@ -1,7 +1,10 @@ id: ntp-client handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/ntp-server/meta.yaml b/f2s/resources/ntp-server/meta.yaml index 78918ad7..83125c74 100644 --- a/f2s/resources/ntp-server/meta.yaml +++ b/f2s/resources/ntp-server/meta.yaml @@ -1,7 +1,10 @@ id: ntp-server handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: external_ntp: value: null fqdn: diff --git a/f2s/resources/openstack-cinder/meta.yaml b/f2s/resources/openstack-cinder/meta.yaml index 5b0ade7a..36155c50 100644 --- a/f2s/resources/openstack-cinder/meta.yaml +++ b/f2s/resources/openstack-cinder/meta.yaml @@ -1,7 +1,10 @@ id: openstack-cinder handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: amqp_hosts: value: null ceilometer_hash: diff --git a/f2s/resources/openstack-controller/meta.yaml b/f2s/resources/openstack-controller/meta.yaml index 05b1cf94..28f7381d 100644 --- a/f2s/resources/openstack-controller/meta.yaml +++ b/f2s/resources/openstack-controller/meta.yaml @@ -1,7 +1,10 @@ id: openstack-controller handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: access: value: null amqp_hosts: diff --git a/f2s/resources/openstack-haproxy-ceilometer/meta.yaml b/f2s/resources/openstack-haproxy-ceilometer/meta.yaml index f61cb4dc..d5cf609a 100644 --- a/f2s/resources/openstack-haproxy-ceilometer/meta.yaml +++ b/f2s/resources/openstack-haproxy-ceilometer/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-ceilometer handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: ceilometer: value: null ceilometer_nodes: diff --git a/f2s/resources/openstack-haproxy-cinder/meta.yaml b/f2s/resources/openstack-haproxy-cinder/meta.yaml index e831c7d4..c8848dbf 100644 --- a/f2s/resources/openstack-haproxy-cinder/meta.yaml +++ b/f2s/resources/openstack-haproxy-cinder/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-cinder handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: cinder_hash: value: null cinder_ipaddresses: diff --git a/f2s/resources/openstack-haproxy-glance/meta.yaml b/f2s/resources/openstack-haproxy-glance/meta.yaml index 0b24818f..514203b7 100644 --- a/f2s/resources/openstack-haproxy-glance/meta.yaml +++ b/f2s/resources/openstack-haproxy-glance/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-glance handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null glance: diff --git a/f2s/resources/openstack-haproxy-heat/meta.yaml b/f2s/resources/openstack-haproxy-heat/meta.yaml index eaef0fa4..ebbdea8f 100644 --- a/f2s/resources/openstack-haproxy-heat/meta.yaml +++ b/f2s/resources/openstack-haproxy-heat/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-heat handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null heat: diff --git a/f2s/resources/openstack-haproxy-horizon/meta.yaml b/f2s/resources/openstack-haproxy-horizon/meta.yaml index 01b55985..fdf25e4d 100644 --- a/f2s/resources/openstack-haproxy-horizon/meta.yaml +++ b/f2s/resources/openstack-haproxy-horizon/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-horizon handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null horizon: diff --git a/f2s/resources/openstack-haproxy-ironic/meta.yaml b/f2s/resources/openstack-haproxy-ironic/meta.yaml index 88ca5135..388349f2 100644 --- a/f2s/resources/openstack-haproxy-ironic/meta.yaml +++ b/f2s/resources/openstack-haproxy-ironic/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-ironic handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: ironic: value: null puppet_modules: diff --git a/f2s/resources/openstack-haproxy-keystone/meta.yaml b/f2s/resources/openstack-haproxy-keystone/meta.yaml index f55d40d0..98589eec 100644 --- a/f2s/resources/openstack-haproxy-keystone/meta.yaml +++ b/f2s/resources/openstack-haproxy-keystone/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-keystone handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null keystone: diff --git a/f2s/resources/openstack-haproxy-murano/meta.yaml b/f2s/resources/openstack-haproxy-murano/meta.yaml index 98c4a60a..99054c74 100644 --- a/f2s/resources/openstack-haproxy-murano/meta.yaml +++ b/f2s/resources/openstack-haproxy-murano/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-murano handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null murano_hash: diff --git a/f2s/resources/openstack-haproxy-mysqld/meta.yaml b/f2s/resources/openstack-haproxy-mysqld/meta.yaml index 749d697e..d7d7eb2b 100644 --- a/f2s/resources/openstack-haproxy-mysqld/meta.yaml +++ b/f2s/resources/openstack-haproxy-mysqld/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-mysqld handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: custom_mysql_setup_class: value: null database_nodes: diff --git a/f2s/resources/openstack-haproxy-neutron/meta.yaml b/f2s/resources/openstack-haproxy-neutron/meta.yaml index b99d5a34..644057c7 100644 --- a/f2s/resources/openstack-haproxy-neutron/meta.yaml +++ b/f2s/resources/openstack-haproxy-neutron/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-neutron handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null management_vip: diff --git a/f2s/resources/openstack-haproxy-nova/meta.yaml b/f2s/resources/openstack-haproxy-nova/meta.yaml index a4dfc852..79983279 100644 --- a/f2s/resources/openstack-haproxy-nova/meta.yaml +++ b/f2s/resources/openstack-haproxy-nova/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-nova handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null management_vip: diff --git a/f2s/resources/openstack-haproxy-radosgw/actions/run.pp b/f2s/resources/openstack-haproxy-radosgw/actions/run.pp index f5911423..81473365 100644 --- a/f2s/resources/openstack-haproxy-radosgw/actions/run.pp +++ b/f2s/resources/openstack-haproxy-radosgw/actions/run.pp @@ -3,7 +3,7 @@ notice('MODULAR: openstack-haproxy-radosgw.pp') $network_metadata = hiera_hash('network_metadata') $storage_hash = hiera_hash('storage', {}) $public_ssl_hash = hiera('public_ssl') - +$ironic_hash = hiera_hash('ironic', {}) if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { $use_swift = true @@ -23,12 +23,17 @@ if $use_radosgw { $public_virtual_ip = hiera('public_vip') $internal_virtual_ip = hiera('management_vip') + if $ironic_hash['enabled'] { + $baremetal_virtual_ip = $network_metadata['vips']['baremetal']['ipaddr'] + } + # configure radosgw ha proxy class { '::openstack::ha::radosgw': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], + internal_virtual_ip => $internal_virtual_ip, + ipaddresses => $ipaddresses, + public_virtual_ip => $public_virtual_ip, + server_names => $server_names, + public_ssl => $public_ssl_hash['services'], + baremetal_virtual_ip => $baremetal_virtual_ip, } } diff --git a/f2s/resources/openstack-haproxy-radosgw/meta.yaml b/f2s/resources/openstack-haproxy-radosgw/meta.yaml index 4fe901ff..1cbd7bae 100644 --- a/f2s/resources/openstack-haproxy-radosgw/meta.yaml +++ b/f2s/resources/openstack-haproxy-radosgw/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-radosgw handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null network_metadata: diff --git a/f2s/resources/openstack-haproxy-sahara/meta.yaml b/f2s/resources/openstack-haproxy-sahara/meta.yaml index fdfcbe77..aa7c4d3e 100644 --- a/f2s/resources/openstack-haproxy-sahara/meta.yaml +++ b/f2s/resources/openstack-haproxy-sahara/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-sahara handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null network_metadata: diff --git a/f2s/resources/openstack-haproxy-stats/meta.yaml b/f2s/resources/openstack-haproxy-stats/meta.yaml index 98072cdc..532c27c3 100644 --- a/f2s/resources/openstack-haproxy-stats/meta.yaml +++ b/f2s/resources/openstack-haproxy-stats/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-stats handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: database_vip: value: null fqdn: diff --git a/f2s/resources/openstack-haproxy-swift/meta.yaml b/f2s/resources/openstack-haproxy-swift/meta.yaml index 985d727a..f330b9be 100644 --- a/f2s/resources/openstack-haproxy-swift/meta.yaml +++ b/f2s/resources/openstack-haproxy-swift/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy-swift handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null ironic: diff --git a/f2s/resources/openstack-haproxy/meta.yaml b/f2s/resources/openstack-haproxy/meta.yaml index 7ce44222..e5284964 100644 --- a/f2s/resources/openstack-haproxy/meta.yaml +++ b/f2s/resources/openstack-haproxy/meta.yaml @@ -1,7 +1,10 @@ id: openstack-haproxy handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/openstack-network-agents-dhcp/actions/run.pp b/f2s/resources/openstack-network-agents-dhcp/actions/run.pp index 2c8a35f8..21968843 100644 --- a/f2s/resources/openstack-network-agents-dhcp/actions/run.pp +++ b/f2s/resources/openstack-network-agents-dhcp/actions/run.pp @@ -30,7 +30,7 @@ if $use_neutron { } } - #======================== + # stub package for 'neutron::agents::dhcp' class package { 'neutron': name => 'binutils', ensure => 'installed', diff --git a/f2s/resources/openstack-network-agents-dhcp/meta.yaml b/f2s/resources/openstack-network-agents-dhcp/meta.yaml index 0a9fdba2..d32720b3 100644 --- a/f2s/resources/openstack-network-agents-dhcp/meta.yaml +++ b/f2s/resources/openstack-network-agents-dhcp/meta.yaml @@ -1,7 +1,10 @@ id: openstack-network-agents-dhcp handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: debug: value: null fqdn: diff --git a/f2s/resources/openstack-network-agents-l3/actions/run.pp b/f2s/resources/openstack-network-agents-l3/actions/run.pp index 8a275768..889c1868 100644 --- a/f2s/resources/openstack-network-agents-l3/actions/run.pp +++ b/f2s/resources/openstack-network-agents-l3/actions/run.pp @@ -50,7 +50,7 @@ if $use_neutron and ($controller or ($dvr and $compute)) { } } - #======================== + # stub package for 'neutron::agents::l3' class package { 'neutron': name => 'binutils', ensure => 'installed', diff --git a/f2s/resources/openstack-network-agents-l3/meta.yaml b/f2s/resources/openstack-network-agents-l3/meta.yaml index 83a470da..250c767b 100644 --- a/f2s/resources/openstack-network-agents-l3/meta.yaml +++ b/f2s/resources/openstack-network-agents-l3/meta.yaml @@ -1,7 +1,10 @@ id: openstack-network-agents-l3 handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: debug: value: null fqdn: diff --git a/f2s/resources/openstack-network-agents-metadata/actions/run.pp b/f2s/resources/openstack-network-agents-metadata/actions/run.pp index 56f24e3a..b4b2f9b7 100644 --- a/f2s/resources/openstack-network-agents-metadata/actions/run.pp +++ b/f2s/resources/openstack-network-agents-metadata/actions/run.pp @@ -38,7 +38,6 @@ if $use_neutron { metadata_ip => $nova_endpoint, manage_service => true, enabled => true, - } if $ha_agent { @@ -48,7 +47,7 @@ if $use_neutron { } } - #======================== + # stub package for 'neutron::agents::metadata' class package { 'neutron': name => 'binutils', ensure => 'installed', diff --git a/f2s/resources/openstack-network-agents-metadata/meta.yaml b/f2s/resources/openstack-network-agents-metadata/meta.yaml index 6b288cd4..75d532fe 100644 --- a/f2s/resources/openstack-network-agents-metadata/meta.yaml +++ b/f2s/resources/openstack-network-agents-metadata/meta.yaml @@ -1,7 +1,10 @@ id: openstack-network-agents-metadata handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: debug: value: null fqdn: diff --git a/f2s/resources/openstack-network-common-config/meta.yaml b/f2s/resources/openstack-network-common-config/meta.yaml index b13c01fc..c2edaad1 100644 --- a/f2s/resources/openstack-network-common-config/meta.yaml +++ b/f2s/resources/openstack-network-common-config/meta.yaml @@ -1,7 +1,10 @@ id: openstack-network-common-config handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: amqp_hosts: value: null ceilometer: diff --git a/f2s/resources/openstack-network-compute-nova/meta.yaml b/f2s/resources/openstack-network-compute-nova/meta.yaml index fda3312e..1c967c5e 100644 --- a/f2s/resources/openstack-network-compute-nova/meta.yaml +++ b/f2s/resources/openstack-network-compute-nova/meta.yaml @@ -1,7 +1,10 @@ id: openstack-network-compute-nova handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/openstack-network-networks/meta.yaml b/f2s/resources/openstack-network-networks/meta.yaml index ff50ce0a..7904d440 100644 --- a/f2s/resources/openstack-network-networks/meta.yaml +++ b/f2s/resources/openstack-network-networks/meta.yaml @@ -1,7 +1,10 @@ id: openstack-network-networks handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: access: value: null fqdn: diff --git a/f2s/resources/openstack-network-plugins-l2/actions/run.pp b/f2s/resources/openstack-network-plugins-l2/actions/run.pp index 2c0ecf02..60300365 100644 --- a/f2s/resources/openstack-network-plugins-l2/actions/run.pp +++ b/f2s/resources/openstack-network-plugins-l2/actions/run.pp @@ -48,16 +48,15 @@ if $use_neutron { $physnet_ironic_bridge = try_get_value($neutron_config, 'L2/phys_nets/physnet-ironic/bridge', false) if $physnet_ironic_bridge { - $physnet_ironic = "physnet-ironic:${physnet_ironic_bridge}" - }else { - $physnet_ironic = [] + $bridge_mappings = [$physnet2, "physnet-ironic:${physnet_ironic_bridge}"] + } else { + $bridge_mappings = [$physnet2] } - $physnets_array = [$physnet2, $physnet_ironic] - $bridge_mappings = delete_undef_values($physnets_array) $physical_network_mtus = ["physnet2:${physical_net_mtu}"] $tunnel_id_ranges = [] $network_type = 'vlan' + $tunnel_types = [] } else { $net_role_property = 'neutron/mesh' $tunneling_ip = get_network_role_property($net_role_property, 'ipaddr') @@ -75,6 +74,7 @@ if $use_neutron { $mtu_offset = '50' $network_type = 'vxlan' } + $tunnel_types = [$network_type] if $physical_net_mtu { $overlay_net_mtu = $physical_net_mtu - $mtu_offset @@ -83,7 +83,6 @@ if $use_neutron { } $enable_tunneling = true - $tunnel_types = [$network_type] } $type_drivers = ['local', 'flat', 'vlan', 'gre', 'vxlan'] diff --git a/f2s/resources/openstack-network-plugins-l2/meta.yaml b/f2s/resources/openstack-network-plugins-l2/meta.yaml index 1e9bfe50..ae35e197 100644 --- a/f2s/resources/openstack-network-plugins-l2/meta.yaml +++ b/f2s/resources/openstack-network-plugins-l2/meta.yaml @@ -1,7 +1,10 @@ id: openstack-network-plugins-l2 handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null management_vip: diff --git a/f2s/resources/openstack-network-routers/meta.yaml b/f2s/resources/openstack-network-routers/meta.yaml index 2902337c..a238457e 100644 --- a/f2s/resources/openstack-network-routers/meta.yaml +++ b/f2s/resources/openstack-network-routers/meta.yaml @@ -1,7 +1,10 @@ id: openstack-network-routers handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: access: value: null fqdn: diff --git a/f2s/resources/openstack-network-server-config/meta.yaml b/f2s/resources/openstack-network-server-config/meta.yaml index a75a3086..3a824f01 100644 --- a/f2s/resources/openstack-network-server-config/meta.yaml +++ b/f2s/resources/openstack-network-server-config/meta.yaml @@ -1,7 +1,10 @@ id: openstack-network-server-config handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: database_vip: value: null fqdn: diff --git a/f2s/resources/openstack-network-server-nova/actions/run.pp b/f2s/resources/openstack-network-server-nova/actions/run.pp index 4640a581..721a8c2f 100644 --- a/f2s/resources/openstack-network-server-nova/actions/run.pp +++ b/f2s/resources/openstack-network-server-nova/actions/run.pp @@ -48,11 +48,20 @@ if $use_neutron { $fixed_range = hiera('fixed_network_range', undef) $network_manager = hiera('network_manager', undef) $network_config = hiera('network_config', { }) - $create_networks = true $num_networks = hiera('num_networks', undef) $network_size = hiera('network_size', undef) $nameservers = hiera('dns_nameservers', undef) $enable_nova_net = false + #NOTE(degorenko): lp/1501767 + if $nameservers { + if count($nameservers) >= 2 { + $dns_opts = "--dns1 ${nameservers[0]} --dns2 ${nameservers[1]}" + } else { + $dns_opts = "--dns1 ${nameservers[0]}" + } + } else { + $dns_opts = "" + } class { 'nova::network' : ensure_package => $ensure_package, @@ -62,20 +71,28 @@ if $use_neutron { floating_range => false, network_manager => $network_manager, config_overrides => $network_config, - create_networks => $create_networks, + create_networks => false, # lp/1501767 num_networks => $num_networks, network_size => $network_size, dns1 => $nameservers[0], dns2 => $nameservers[1], enabled => $enable_nova_net, - install_service => false, # bacause controller + install_service => false, # because controller + } + + #NOTE(degorenko): lp/1501767 + $primary_controller = hiera('primary_controller') + if $primary_controller { + exec { 'create_private_nova_network': + path => '/usr/bin', + command => "nova-manage network create novanetwork ${fixed_range} ${num_networks} ${network_size} ${dns_opts}", + } } # NOTE(aglarendil): lp/1381164 nova_config { 'DEFAULT/force_snat_range' : value => '0.0.0.0/0' } -# ========================================================================= - + # stub resource for 'nova::network' class file { '/etc/nova/nova.conf' : ensure => 'present' } } diff --git a/f2s/resources/openstack-network-server-nova/meta.yaml b/f2s/resources/openstack-network-server-nova/meta.yaml index a4fe1708..7c01a38e 100644 --- a/f2s/resources/openstack-network-server-nova/meta.yaml +++ b/f2s/resources/openstack-network-server-nova/meta.yaml @@ -1,7 +1,10 @@ id: openstack-network-server-nova handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null management_vip: diff --git a/f2s/resources/pre_hiera_config/meta.yaml b/f2s/resources/pre_hiera_config/meta.yaml index 3eada3fc..b94987a9 100644 --- a/f2s/resources/pre_hiera_config/meta.yaml +++ b/f2s/resources/pre_hiera_config/meta.yaml @@ -1,7 +1,10 @@ id: pre_hiera_config handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/public_vip_ping/meta.yaml b/f2s/resources/public_vip_ping/meta.yaml index e4f7bec4..c3a9a591 100644 --- a/f2s/resources/public_vip_ping/meta.yaml +++ b/f2s/resources/public_vip_ping/meta.yaml @@ -1,7 +1,10 @@ id: public_vip_ping handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null network_scheme: diff --git a/f2s/resources/rabbitmq/actions/run.pp b/f2s/resources/rabbitmq/actions/run.pp index 6856787c..ed980929 100644 --- a/f2s/resources/rabbitmq/actions/run.pp +++ b/f2s/resources/rabbitmq/actions/run.pp @@ -137,8 +137,6 @@ if $queue_provider == 'rabbitmq' { class { 'nova::rabbitmq': enabled => $enabled, - # Do not install rabbitmq from nova classes - rabbitmq_class => false, userid => $rabbit_hash['user'], password => $rabbit_hash['password'], require => Class['::rabbitmq'], diff --git a/f2s/resources/rabbitmq/meta.yaml b/f2s/resources/rabbitmq/meta.yaml index 7438c205..4cc7fd88 100644 --- a/f2s/resources/rabbitmq/meta.yaml +++ b/f2s/resources/rabbitmq/meta.yaml @@ -1,7 +1,10 @@ id: rabbitmq handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: amqp_port: value: null debug: diff --git a/f2s/resources/sahara-db/meta.yaml b/f2s/resources/sahara-db/meta.yaml index 0e3b85d4..b6dcb499 100644 --- a/f2s/resources/sahara-db/meta.yaml +++ b/f2s/resources/sahara-db/meta.yaml @@ -1,7 +1,10 @@ id: sahara-db handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: database_vip: value: null fqdn: diff --git a/f2s/resources/sahara-keystone/meta.yaml b/f2s/resources/sahara-keystone/meta.yaml index e5716756..69a6115e 100644 --- a/f2s/resources/sahara-keystone/meta.yaml +++ b/f2s/resources/sahara-keystone/meta.yaml @@ -1,7 +1,10 @@ id: sahara-keystone handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null management_vip: diff --git a/f2s/resources/sahara/meta.yaml b/f2s/resources/sahara/meta.yaml index e74714fb..6ee2e224 100644 --- a/f2s/resources/sahara/meta.yaml +++ b/f2s/resources/sahara/meta.yaml @@ -1,7 +1,10 @@ id: sahara handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: access_hash: value: null amqp_hosts: diff --git a/f2s/resources/ssl-add-trust-chain/meta.yaml b/f2s/resources/ssl-add-trust-chain/meta.yaml index 40c9ce36..f34c5e60 100644 --- a/f2s/resources/ssl-add-trust-chain/meta.yaml +++ b/f2s/resources/ssl-add-trust-chain/meta.yaml @@ -1,7 +1,10 @@ id: ssl-add-trust-chain handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null public_ssl: diff --git a/f2s/resources/ssl-keys-saving/meta.yaml b/f2s/resources/ssl-keys-saving/meta.yaml index e59e90fe..02e3ad26 100644 --- a/f2s/resources/ssl-keys-saving/meta.yaml +++ b/f2s/resources/ssl-keys-saving/meta.yaml @@ -1,7 +1,10 @@ id: ssl-keys-saving handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null public_ssl: diff --git a/f2s/resources/swift-keystone/meta.yaml b/f2s/resources/swift-keystone/meta.yaml index 10f75482..54f007f0 100644 --- a/f2s/resources/swift-keystone/meta.yaml +++ b/f2s/resources/swift-keystone/meta.yaml @@ -1,7 +1,10 @@ id: swift-keystone handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null management_vip: diff --git a/f2s/resources/swift-rebalance-cron/meta.yaml b/f2s/resources/swift-rebalance-cron/meta.yaml index 380a46b2..313ede9d 100644 --- a/f2s/resources/swift-rebalance-cron/meta.yaml +++ b/f2s/resources/swift-rebalance-cron/meta.yaml @@ -1,7 +1,10 @@ id: swift-rebalance-cron handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null is_primary_swift_proxy: diff --git a/f2s/resources/swift/meta.yaml b/f2s/resources/swift/meta.yaml index bcbe1073..99cfad83 100644 --- a/f2s/resources/swift/meta.yaml +++ b/f2s/resources/swift/meta.yaml @@ -1,7 +1,10 @@ id: swift handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: debug: value: null deploy_swift_proxy: diff --git a/f2s/resources/tools/meta.yaml b/f2s/resources/tools/meta.yaml index 480a3777..e23aae59 100644 --- a/f2s/resources/tools/meta.yaml +++ b/f2s/resources/tools/meta.yaml @@ -1,7 +1,10 @@ id: tools handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: deployment_mode: value: null fqdn: diff --git a/f2s/resources/top-role-ceph-osd/meta.yaml b/f2s/resources/top-role-ceph-osd/meta.yaml index 17500bcd..331ed767 100644 --- a/f2s/resources/top-role-ceph-osd/meta.yaml +++ b/f2s/resources/top-role-ceph-osd/meta.yaml @@ -1,7 +1,10 @@ id: top-role-ceph-osd handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/top-role-cinder-vmware/meta.yaml b/f2s/resources/top-role-cinder-vmware/meta.yaml index b990a06d..dec1771e 100644 --- a/f2s/resources/top-role-cinder-vmware/meta.yaml +++ b/f2s/resources/top-role-cinder-vmware/meta.yaml @@ -1,7 +1,10 @@ id: top-role-cinder-vmware handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/top-role-cinder/meta.yaml b/f2s/resources/top-role-cinder/meta.yaml index 1e8d09c7..605bf00c 100644 --- a/f2s/resources/top-role-cinder/meta.yaml +++ b/f2s/resources/top-role-cinder/meta.yaml @@ -1,7 +1,10 @@ id: top-role-cinder handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/top-role-compute-vmware/meta.yaml b/f2s/resources/top-role-compute-vmware/meta.yaml index 14ecd29c..eca28a2c 100644 --- a/f2s/resources/top-role-compute-vmware/meta.yaml +++ b/f2s/resources/top-role-compute-vmware/meta.yaml @@ -1,7 +1,10 @@ id: top-role-compute-vmware handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/top-role-compute/meta.yaml b/f2s/resources/top-role-compute/meta.yaml index 37de35d3..2f464777 100644 --- a/f2s/resources/top-role-compute/meta.yaml +++ b/f2s/resources/top-role-compute/meta.yaml @@ -1,7 +1,10 @@ id: top-role-compute handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/top-role-mongo/meta.yaml b/f2s/resources/top-role-mongo/meta.yaml index 3503a37f..e3448e5c 100644 --- a/f2s/resources/top-role-mongo/meta.yaml +++ b/f2s/resources/top-role-mongo/meta.yaml @@ -1,7 +1,10 @@ id: top-role-mongo handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/top-role-primary-mongo/meta.yaml b/f2s/resources/top-role-primary-mongo/meta.yaml index 949bd9cf..4e3b46a9 100644 --- a/f2s/resources/top-role-primary-mongo/meta.yaml +++ b/f2s/resources/top-role-primary-mongo/meta.yaml @@ -1,7 +1,10 @@ id: top-role-primary-mongo handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/umm/meta.yaml b/f2s/resources/umm/meta.yaml index 5ac77d1b..949e385e 100644 --- a/f2s/resources/umm/meta.yaml +++ b/f2s/resources/umm/meta.yaml @@ -1,7 +1,10 @@ id: umm handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/update_hosts/meta.yaml b/f2s/resources/update_hosts/meta.yaml index 1bd6c7d3..e2b649cf 100644 --- a/f2s/resources/update_hosts/meta.yaml +++ b/f2s/resources/update_hosts/meta.yaml @@ -1,7 +1,10 @@ id: update_hosts handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null nodes: diff --git a/f2s/resources/updatedb/meta.yaml b/f2s/resources/updatedb/meta.yaml index f05727d0..7c273f3e 100644 --- a/f2s/resources/updatedb/meta.yaml +++ b/f2s/resources/updatedb/meta.yaml @@ -1,7 +1,10 @@ id: updatedb handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/resources/virtual_ips/meta.yaml b/f2s/resources/virtual_ips/meta.yaml index 80e6f487..72746ccc 100644 --- a/f2s/resources/virtual_ips/meta.yaml +++ b/f2s/resources/virtual_ips/meta.yaml @@ -1,7 +1,10 @@ id: virtual_ips handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null network_metadata: diff --git a/f2s/resources/vmware-vcenter/meta.yaml b/f2s/resources/vmware-vcenter/meta.yaml index ccead16c..ee372308 100644 --- a/f2s/resources/vmware-vcenter/meta.yaml +++ b/f2s/resources/vmware-vcenter/meta.yaml @@ -1,7 +1,10 @@ id: vmware-vcenter handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: ceilometer: value: null debug: diff --git a/f2s/resources/workloads_collector_add/meta.yaml b/f2s/resources/workloads_collector_add/meta.yaml index 19d04b77..70ea732d 100644 --- a/f2s/resources/workloads_collector_add/meta.yaml +++ b/f2s/resources/workloads_collector_add/meta.yaml @@ -1,7 +1,10 @@ id: workloads_collector_add handler: puppetv2 version: '8.0' -inputs: +actions: + run: actions/run.pp + update: actions/run.pp +input: fqdn: value: null puppet_modules: diff --git a/f2s/vrs/genkeys.yaml b/f2s/vrs/genkeys.yaml index c62517b1..3b2d4024 100644 --- a/f2s/vrs/genkeys.yaml +++ b/f2s/vrs/genkeys.yaml @@ -3,3 +3,8 @@ resources: - id: genkeys{{index}} from: f2s/resources/genkeys location: {{node}} + values: + uid: '{{index}}' + path: /var/lib/fuel/keys/ + ssl: [mongo] + ssh: [neutron, nova, mysql] From 9d8415ba197617f75f38b98527a9d537c3c4465c Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Thu, 12 Nov 2015 14:10:04 +0200 Subject: [PATCH 30/51] Add session to a client --- f2s/f2s.py | 7 +++++-- f2s/fsclient.py | 7 +++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/f2s/f2s.py b/f2s/f2s.py index 331af492..218816de 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -24,7 +24,7 @@ ensure_dir(RESOURCE_TMP_WORKDIR) RESOURCE_DIR = os.path.join(CURDIR, 'resources') VR_TMP_DIR = os.path.join(CURDIR, 'tmp/vrs') ensure_dir(VR_TMP_DIR) -INPUTS_LOCATION = "/tmp/fuel_specs/" +INPUTS_LOCATION = "/root/latest/" DEPLOYMENT_GROUP_PATH = os.path.join(LIBRARY_PATH, 'deployment', 'puppet', 'deployment_groups', 'tasks.yaml') @@ -102,7 +102,10 @@ class Task(object): data = OrderedDict([('id', self.name), ('handler', 'puppetv2'), ('version', '8.0'), - ('inputs', self.inputs())]) + ('actions', { + 'run': 'actions/run.pp', + 'update': 'actions/run.pp'}), + ('input', self.inputs()),]) return ordered_dump(data, default_flow_style=False) @property diff --git a/f2s/fsclient.py b/f2s/fsclient.py index e20b661b..a1629290 100755 --- a/f2s/fsclient.py +++ b/f2s/fsclient.py @@ -3,6 +3,7 @@ import click from solar.core.resource import virtual_resource as vr +from solar.dblayer.model import ModelMeta @@ -48,14 +49,15 @@ def nodes(uids): {'index': uid, 'ip': ip}) @main.command() -def master(): +@click.argument('env') +def master(env): master = source.master() vr.create('master', 'f2s/vrs/fuel_node.yaml', {'index': master[0], 'ip': master[1]}) vr.create('genkeys', 'f2s/vrs/genkeys.yaml', { 'node': 'node'+master[0], - 'index': master[0]}) + 'index': env}) @main.command() @click.argument('uids', nargs=-1) @@ -77,3 +79,4 @@ def roles(uids): if __name__ == '__main__': main() + ModelMeta.session_end() From c6416517ca8557626f77da3a3339cf52c37d6dc9 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Thu, 12 Nov 2015 17:13:08 +0200 Subject: [PATCH 31/51] Fix tasks, ssh config and prefetching information from nailgun --- f2s/f2s.py | 3 ++- f2s/fsclient.py | 8 +++---- f2s/resources/genkeys/actions/run.sh | 4 ++-- f2s/resources/role_data/managers/globals.py | 8 ++++--- f2s/resources/role_data/meta.yaml | 5 ++++- f2s/vrs/compute.yml | 1 + f2s/vrs/controller.yml | 1 + f2s/vrs/fuel_node.yaml | 4 +--- f2s/vrs/prep.yaml | 24 ++++++++------------- f2s/vrs/primary-controller.yml | 1 + solar/core/resource/resource.py | 7 +++--- 11 files changed, 33 insertions(+), 33 deletions(-) diff --git a/f2s/f2s.py b/f2s/f2s.py index 218816de..71f55433 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -170,7 +170,8 @@ class DGroup(object): ('from', 'f2s/resources/'+RoleData.name), ('location', "{{node}}"), ('values', {'uid': '{{index}}', - 'env': '{{env}}'})]) + 'env': '{{env}}', + 'puppet_modules': '/etc/puppet/modules'})]) for t, _, _ in self.tasks: if t.name in self.filtered: diff --git a/f2s/fsclient.py b/f2s/fsclient.py index a1629290..db785b14 100755 --- a/f2s/fsclient.py +++ b/f2s/fsclient.py @@ -34,7 +34,7 @@ class DumbSource(object): return [(uid, ip_mask % uid, 1) for uid in uids] def roles(self, uid): - return 'primary-controller' + return ['primary-controller'] def master(self): return 'master', '0.0.0.0' @@ -72,9 +72,9 @@ def prep(uids): def roles(uids): for uid, ip, env in source.nodes(uids): - role = source.roles(uid) - vr.create(role, 'f2s/vrs/'+role +'.yml', - {'index': uid, 'env': env, 'node': 'node'+uid}) + for role in source.roles(uid): + vr.create(role, 'f2s/vrs/'+role +'.yml', + {'index': uid, 'env': env, 'node': 'node'+uid}) if __name__ == '__main__': diff --git a/f2s/resources/genkeys/actions/run.sh b/f2s/resources/genkeys/actions/run.sh index 1c0651f3..182f3007 100644 --- a/f2s/resources/genkeys/actions/run.sh +++ b/f2s/resources/genkeys/actions/run.sh @@ -1,8 +1,8 @@ #!/bin/sh cluster_id={{uid}} -open_ssl_keys={{ssl|join(' ')}} -ssh_keys={{ ssh|join(' ') }} +open_ssl_keys='{{ssl|join(' ')}}' +ssh_keys='{{ ssh|join(' ') }}' keys_path={{path}} BASE_PATH=$keys_path/$cluster_id/ diff --git a/f2s/resources/role_data/managers/globals.py b/f2s/resources/role_data/managers/globals.py index 7e7da6f5..50ee4a39 100755 --- a/f2s/resources/role_data/managers/globals.py +++ b/f2s/resources/role_data/managers/globals.py @@ -30,13 +30,15 @@ def prepare_hiera(): with open('/etc/puppet/hiera.yaml', 'w') as f: f.write(hiera_conf) - + # dont dump null values + sanitized = {key:ARGS[key] for key in ARGS if ARGS.get(key)} with open('/etc/puppet/hieradata/{}.yaml'.format(ARGS['uid']), 'w') as f: - f.write(yaml.safe_dump(ARGS)) + f.write(yaml.safe_dump(sanitized)) def run_command(): cmd = [ - 'puppet', 'apply', '--modulepath={}'.format(ARGS['puppet_modules']), + 'puppet', 'apply', '--hiera_config=/etc/puppet/hiera.yaml', + '--modulepath={}'.format(ARGS['puppet_modules']), os.path.join(CURDIR, 'globals.pp')] return execute(cmd) diff --git a/f2s/resources/role_data/meta.yaml b/f2s/resources/role_data/meta.yaml index b2f94dff..895451c8 100644 --- a/f2s/resources/role_data/meta.yaml +++ b/f2s/resources/role_data/meta.yaml @@ -6,7 +6,6 @@ managers: - managers/from_nailgun.py - managers/globals.py input: - # should be auto-generated based on outputs from globals.pp puppet_modules: type: str! value: /etc/puppet/modules @@ -16,6 +15,10 @@ input: env: type: str! value: + tasks: + value: + ironic: + value: access: value: null access_hash: diff --git a/f2s/vrs/compute.yml b/f2s/vrs/compute.yml index 3b4244fa..4bece7d2 100644 --- a/f2s/vrs/compute.yml +++ b/f2s/vrs/compute.yml @@ -6,6 +6,7 @@ resources: values: env: '{{env}}' uid: '{{index}}' + puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} from: f2s/resources/fuel_pkgs location: '{{node}}' diff --git a/f2s/vrs/controller.yml b/f2s/vrs/controller.yml index 39651c69..4cdc1496 100644 --- a/f2s/vrs/controller.yml +++ b/f2s/vrs/controller.yml @@ -6,6 +6,7 @@ resources: values: env: '{{env}}' uid: '{{index}}' + puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} from: f2s/resources/fuel_pkgs location: '{{node}}' diff --git a/f2s/vrs/fuel_node.yaml b/f2s/vrs/fuel_node.yaml index 9d04b803..23c01ec9 100644 --- a/f2s/vrs/fuel_node.yaml +++ b/f2s/vrs/fuel_node.yaml @@ -1,11 +1,10 @@ id: fuel_node resources: -{% for i in range(1|int) %} - id: ssh_transport{{index}} from: resources/transport_ssh values: ssh_user: 'root' - ssh_key: '/root/.ssh/id_rsa' + ssh_key: '/root/.ssh/bootstrap.rsa' - id: transports{{index}} from: resources/transports values: @@ -19,4 +18,3 @@ resources: name: node{{index}} ip: {{ip}} transports_id: transports{{index}}::transports_id -{% endfor %} diff --git a/f2s/vrs/prep.yaml b/f2s/vrs/prep.yaml index 09614ea4..401aa628 100644 --- a/f2s/vrs/prep.yaml +++ b/f2s/vrs/prep.yaml @@ -5,22 +5,16 @@ resources: location: {{node}} values: sources: - - src: /var/lib/fuel/keys/{{env}}/neutron/neutron.pub - dst: /var/lib/astute/neutron/neutron.pub - - src: /var/lib/fuel/keys/{{env}}/neutron/neutron - dst: /var/lib/astute/neutron/neutron - - src: /var/lib/fuel/keys/{{env}}/nova/nova.pub - dst: /var/lib/astute/nova/nova.pub - - src: /var/lib/fuel/keys/{{env}}/nova/nova - dst: /var/lib/astute/nova/nova - - src: /var/lib/fuel/keys/{{env}}/mysql/mysql.pub - dst: /var/lib/astute/mysql/mysql.pub - - src: /var/lib/fuel/keys/{{env}}/mysql/mysql - dst: /var/lib/astute/mysql/mysql - - src: /var/lib/fuel/keys/{{env}}/mongodb/mongodb.key - dst: /var/lib/astute/mongodb/mongodb.key + - src: /var/lib/fuel/keys/{{env}}/neutron + dst: /var/lib/astute + - src: /var/lib/fuel/keys/{{env}}/nova + dst: /var/lib/astute + - src: /var/lib/fuel/keys/{{env}}/mysql + dst: /var/lib/astute + - src: /var/lib/fuel/keys/{{env}}/mongo + dst: /var/lib/astute - src: /etc/puppet/modules - dst: /etc/puppet/modules + dst: /etc/puppet/ - id: mos_repos{{index}} from: templates/mos_repos.yaml values: diff --git a/f2s/vrs/primary-controller.yml b/f2s/vrs/primary-controller.yml index ebdfee83..cb373d4a 100644 --- a/f2s/vrs/primary-controller.yml +++ b/f2s/vrs/primary-controller.yml @@ -6,6 +6,7 @@ resources: values: env: '{{env}}' uid: '{{index}}' + puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} from: f2s/resources/fuel_pkgs location: '{{node}}' diff --git a/solar/core/resource/resource.py b/solar/core/resource/resource.py index 737483a8..9310c934 100644 --- a/solar/core/resource/resource.py +++ b/solar/core/resource/resource.py @@ -290,12 +290,11 @@ class Resource(object): if not self.db_obj.managers: return - manager_stack = self.db_obj.managers - while manager_stack: - manager = manager_stack.pop(0) + for manager in self.db_obj.managers: manager_path = os.path.join(self.db_obj.base_path, manager) rst = utils.communicate([manager_path], json.dumps(self.args)) - self.update(json.loads(rst)) + if rst: + self.update(json.loads(rst)) def load(name): r = DBResource.get(name) From de76c08a6a383187576e202f8bdec6b085733c6a Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Thu, 12 Nov 2015 17:52:35 +0200 Subject: [PATCH 32/51] Change path to actions --- f2s/f2s.py | 4 ++-- f2s/resources/apache/meta.yaml | 4 ++-- f2s/resources/api-proxy/meta.yaml | 4 ++-- f2s/resources/ceilometer-compute/meta.yaml | 4 ++-- f2s/resources/ceilometer-controller/meta.yaml | 4 ++-- f2s/resources/ceilometer-keystone/meta.yaml | 4 ++-- f2s/resources/ceilometer-radosgw-user/meta.yaml | 4 ++-- f2s/resources/ceph-compute/meta.yaml | 4 ++-- f2s/resources/ceph-mon/meta.yaml | 4 ++-- f2s/resources/ceph-radosgw/meta.yaml | 4 ++-- f2s/resources/ceph_create_pools/meta.yaml | 4 ++-- f2s/resources/cinder-db/meta.yaml | 4 ++-- f2s/resources/cinder-keystone/meta.yaml | 4 ++-- f2s/resources/cluster-haproxy/meta.yaml | 4 ++-- f2s/resources/cluster-vrouter/meta.yaml | 4 ++-- f2s/resources/cluster/meta.yaml | 4 ++-- f2s/resources/cluster_health/meta.yaml | 4 ++-- f2s/resources/configure_default_route/meta.yaml | 4 ++-- f2s/resources/connectivity_tests/meta.yaml | 4 ++-- f2s/resources/conntrackd/meta.yaml | 4 ++-- f2s/resources/controller_remaining_tasks/meta.yaml | 4 ++-- f2s/resources/database/meta.yaml | 4 ++-- .../disable_keystone_service_token/meta.yaml | 4 ++-- f2s/resources/dns-client/meta.yaml | 4 ++-- f2s/resources/dns-server/meta.yaml | 4 ++-- f2s/resources/dump_rabbitmq_definitions/meta.yaml | 4 ++-- f2s/resources/enable_cinder_volume_service/meta.yaml | 4 ++-- f2s/resources/enable_nova_compute_service/meta.yaml | 4 ++-- f2s/resources/enable_rados/meta.yaml | 4 ++-- f2s/resources/firewall/meta.yaml | 4 ++-- f2s/resources/fuel_pkgs/meta.yaml | 4 ++-- f2s/resources/generate_vms/meta.yaml | 4 ++-- f2s/resources/glance-db/meta.yaml | 4 ++-- f2s/resources/glance-keystone/meta.yaml | 4 ++-- f2s/resources/glance/meta.yaml | 4 ++-- f2s/resources/globals/meta.yaml | 4 ++-- f2s/resources/heat-db/meta.yaml | 4 ++-- f2s/resources/heat-keystone/meta.yaml | 4 ++-- f2s/resources/heat/meta.yaml | 4 ++-- f2s/resources/hiera/meta.yaml | 4 ++-- f2s/resources/horizon/actions/run.pp | 11 +++++++++++ f2s/resources/horizon/meta.yaml | 4 ++-- f2s/resources/hosts/meta.yaml | 4 ++-- f2s/resources/ironic-api/meta.yaml | 4 ++-- f2s/resources/ironic-compute/meta.yaml | 4 ++-- f2s/resources/ironic-conductor/meta.yaml | 4 ++-- f2s/resources/ironic-db/meta.yaml | 4 ++-- f2s/resources/ironic-keystone/meta.yaml | 4 ++-- f2s/resources/keystone-db/meta.yaml | 4 ++-- f2s/resources/keystone/meta.yaml | 4 ++-- f2s/resources/logging/meta.yaml | 4 ++-- f2s/resources/memcached/meta.yaml | 4 ++-- f2s/resources/murano-db/meta.yaml | 4 ++-- f2s/resources/murano-keystone/meta.yaml | 4 ++-- f2s/resources/murano-rabbitmq/meta.yaml | 4 ++-- f2s/resources/murano/meta.yaml | 4 ++-- f2s/resources/netconfig/meta.yaml | 4 ++-- f2s/resources/neutron-db/meta.yaml | 4 ++-- f2s/resources/neutron-keystone/meta.yaml | 4 ++-- f2s/resources/nova-db/meta.yaml | 4 ++-- f2s/resources/nova-keystone/meta.yaml | 4 ++-- f2s/resources/ntp-check/meta.yaml | 4 ++-- f2s/resources/ntp-client/meta.yaml | 4 ++-- f2s/resources/ntp-server/meta.yaml | 4 ++-- f2s/resources/openstack-cinder/meta.yaml | 4 ++-- f2s/resources/openstack-controller/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-ceilometer/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-cinder/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-glance/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-heat/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-horizon/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-ironic/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-keystone/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-murano/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-mysqld/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-neutron/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-nova/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-radosgw/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-sahara/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-stats/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy-swift/meta.yaml | 4 ++-- f2s/resources/openstack-haproxy/meta.yaml | 4 ++-- f2s/resources/openstack-network-agents-dhcp/meta.yaml | 4 ++-- f2s/resources/openstack-network-agents-l3/meta.yaml | 4 ++-- .../openstack-network-agents-metadata/meta.yaml | 4 ++-- .../openstack-network-common-config/meta.yaml | 4 ++-- .../openstack-network-compute-nova/meta.yaml | 4 ++-- f2s/resources/openstack-network-networks/meta.yaml | 4 ++-- f2s/resources/openstack-network-plugins-l2/meta.yaml | 4 ++-- f2s/resources/openstack-network-routers/meta.yaml | 4 ++-- .../openstack-network-server-config/meta.yaml | 4 ++-- f2s/resources/openstack-network-server-nova/meta.yaml | 4 ++-- f2s/resources/pre_hiera_config/meta.yaml | 4 ++-- f2s/resources/public_vip_ping/meta.yaml | 4 ++-- f2s/resources/rabbitmq/meta.yaml | 4 ++-- f2s/resources/sahara-db/meta.yaml | 4 ++-- f2s/resources/sahara-keystone/meta.yaml | 4 ++-- f2s/resources/sahara/meta.yaml | 4 ++-- f2s/resources/ssl-add-trust-chain/meta.yaml | 4 ++-- f2s/resources/ssl-keys-saving/meta.yaml | 4 ++-- f2s/resources/swift-keystone/meta.yaml | 4 ++-- f2s/resources/swift-rebalance-cron/meta.yaml | 4 ++-- f2s/resources/swift/meta.yaml | 4 ++-- f2s/resources/tools/meta.yaml | 4 ++-- f2s/resources/top-role-ceph-osd/meta.yaml | 4 ++-- f2s/resources/top-role-cinder-vmware/meta.yaml | 4 ++-- f2s/resources/top-role-cinder/meta.yaml | 4 ++-- f2s/resources/top-role-compute-vmware/meta.yaml | 4 ++-- f2s/resources/top-role-compute/meta.yaml | 4 ++-- f2s/resources/top-role-mongo/meta.yaml | 4 ++-- f2s/resources/top-role-primary-mongo/meta.yaml | 4 ++-- f2s/resources/umm/meta.yaml | 4 ++-- f2s/resources/update_hosts/meta.yaml | 4 ++-- f2s/resources/updatedb/meta.yaml | 4 ++-- f2s/resources/virtual_ips/meta.yaml | 4 ++-- f2s/resources/vmware-vcenter/meta.yaml | 4 ++-- f2s/resources/workloads_collector_add/meta.yaml | 4 ++-- 117 files changed, 243 insertions(+), 232 deletions(-) diff --git a/f2s/f2s.py b/f2s/f2s.py index 71f55433..4adc742e 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -103,8 +103,8 @@ class Task(object): ('handler', 'puppetv2'), ('version', '8.0'), ('actions', { - 'run': 'actions/run.pp', - 'update': 'actions/run.pp'}), + 'run': 'run.pp', + 'update': 'run.pp'}), ('input', self.inputs()),]) return ordered_dump(data, default_flow_style=False) diff --git a/f2s/resources/apache/meta.yaml b/f2s/resources/apache/meta.yaml index a1fc1be8..54f06566 100644 --- a/f2s/resources/apache/meta.yaml +++ b/f2s/resources/apache/meta.yaml @@ -2,8 +2,8 @@ id: apache handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: apache_ports: value: null diff --git a/f2s/resources/api-proxy/meta.yaml b/f2s/resources/api-proxy/meta.yaml index 4c152bc5..10aae969 100644 --- a/f2s/resources/api-proxy/meta.yaml +++ b/f2s/resources/api-proxy/meta.yaml @@ -2,8 +2,8 @@ id: api-proxy handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: apache_ports: value: null diff --git a/f2s/resources/ceilometer-compute/meta.yaml b/f2s/resources/ceilometer-compute/meta.yaml index 82e53cf4..d38534aa 100644 --- a/f2s/resources/ceilometer-compute/meta.yaml +++ b/f2s/resources/ceilometer-compute/meta.yaml @@ -2,8 +2,8 @@ id: ceilometer-compute handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/ceilometer-controller/meta.yaml b/f2s/resources/ceilometer-controller/meta.yaml index 8dcd033b..6e9749d4 100644 --- a/f2s/resources/ceilometer-controller/meta.yaml +++ b/f2s/resources/ceilometer-controller/meta.yaml @@ -2,8 +2,8 @@ id: ceilometer-controller handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: ceilometer: value: null diff --git a/f2s/resources/ceilometer-keystone/meta.yaml b/f2s/resources/ceilometer-keystone/meta.yaml index fd8e2a4b..e18c0932 100644 --- a/f2s/resources/ceilometer-keystone/meta.yaml +++ b/f2s/resources/ceilometer-keystone/meta.yaml @@ -2,8 +2,8 @@ id: ceilometer-keystone handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: ceilometer: value: null diff --git a/f2s/resources/ceilometer-radosgw-user/meta.yaml b/f2s/resources/ceilometer-radosgw-user/meta.yaml index e6484dd3..e4cc3b05 100644 --- a/f2s/resources/ceilometer-radosgw-user/meta.yaml +++ b/f2s/resources/ceilometer-radosgw-user/meta.yaml @@ -2,8 +2,8 @@ id: ceilometer-radosgw-user handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: ceilometer: value: null diff --git a/f2s/resources/ceph-compute/meta.yaml b/f2s/resources/ceph-compute/meta.yaml index 5a1ec76d..b439d846 100644 --- a/f2s/resources/ceph-compute/meta.yaml +++ b/f2s/resources/ceph-compute/meta.yaml @@ -2,8 +2,8 @@ id: ceph-compute handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/ceph-mon/meta.yaml b/f2s/resources/ceph-mon/meta.yaml index e1f332b4..b3cdbb68 100644 --- a/f2s/resources/ceph-mon/meta.yaml +++ b/f2s/resources/ceph-mon/meta.yaml @@ -2,8 +2,8 @@ id: ceph-mon handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: ceph_monitor_nodes: value: null diff --git a/f2s/resources/ceph-radosgw/meta.yaml b/f2s/resources/ceph-radosgw/meta.yaml index e60e9dfd..5c589f2f 100644 --- a/f2s/resources/ceph-radosgw/meta.yaml +++ b/f2s/resources/ceph-radosgw/meta.yaml @@ -2,8 +2,8 @@ id: ceph-radosgw handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: ceph_monitor_nodes: value: null diff --git a/f2s/resources/ceph_create_pools/meta.yaml b/f2s/resources/ceph_create_pools/meta.yaml index 92b66ee5..9a8d7e57 100644 --- a/f2s/resources/ceph_create_pools/meta.yaml +++ b/f2s/resources/ceph_create_pools/meta.yaml @@ -2,8 +2,8 @@ id: ceph_create_pools handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/cinder-db/meta.yaml b/f2s/resources/cinder-db/meta.yaml index 039be257..72c5c1d5 100644 --- a/f2s/resources/cinder-db/meta.yaml +++ b/f2s/resources/cinder-db/meta.yaml @@ -2,8 +2,8 @@ id: cinder-db handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: cinder: value: null diff --git a/f2s/resources/cinder-keystone/meta.yaml b/f2s/resources/cinder-keystone/meta.yaml index a715dad7..7c15a73d 100644 --- a/f2s/resources/cinder-keystone/meta.yaml +++ b/f2s/resources/cinder-keystone/meta.yaml @@ -2,8 +2,8 @@ id: cinder-keystone handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: cinder: value: null diff --git a/f2s/resources/cluster-haproxy/meta.yaml b/f2s/resources/cluster-haproxy/meta.yaml index d89e791b..9552c31f 100644 --- a/f2s/resources/cluster-haproxy/meta.yaml +++ b/f2s/resources/cluster-haproxy/meta.yaml @@ -2,8 +2,8 @@ id: cluster-haproxy handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: database_vip: value: null diff --git a/f2s/resources/cluster-vrouter/meta.yaml b/f2s/resources/cluster-vrouter/meta.yaml index 87e61697..7f302dab 100644 --- a/f2s/resources/cluster-vrouter/meta.yaml +++ b/f2s/resources/cluster-vrouter/meta.yaml @@ -2,8 +2,8 @@ id: cluster-vrouter handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/cluster/meta.yaml b/f2s/resources/cluster/meta.yaml index 594d2060..b029a5ba 100644 --- a/f2s/resources/cluster/meta.yaml +++ b/f2s/resources/cluster/meta.yaml @@ -2,8 +2,8 @@ id: cluster handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: corosync_roles: value: null diff --git a/f2s/resources/cluster_health/meta.yaml b/f2s/resources/cluster_health/meta.yaml index f68396fe..2e759c9a 100644 --- a/f2s/resources/cluster_health/meta.yaml +++ b/f2s/resources/cluster_health/meta.yaml @@ -2,8 +2,8 @@ id: cluster_health handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: corosync_disk_monitor: value: null diff --git a/f2s/resources/configure_default_route/meta.yaml b/f2s/resources/configure_default_route/meta.yaml index 2f075982..5e4bc30f 100644 --- a/f2s/resources/configure_default_route/meta.yaml +++ b/f2s/resources/configure_default_route/meta.yaml @@ -2,8 +2,8 @@ id: configure_default_route handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/connectivity_tests/meta.yaml b/f2s/resources/connectivity_tests/meta.yaml index 7cb4d8d8..31564b65 100644 --- a/f2s/resources/connectivity_tests/meta.yaml +++ b/f2s/resources/connectivity_tests/meta.yaml @@ -2,8 +2,8 @@ id: connectivity_tests handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/conntrackd/meta.yaml b/f2s/resources/conntrackd/meta.yaml index 315a5ca8..5debbebe 100644 --- a/f2s/resources/conntrackd/meta.yaml +++ b/f2s/resources/conntrackd/meta.yaml @@ -2,8 +2,8 @@ id: conntrackd handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/controller_remaining_tasks/meta.yaml b/f2s/resources/controller_remaining_tasks/meta.yaml index d90480ca..c7437a01 100644 --- a/f2s/resources/controller_remaining_tasks/meta.yaml +++ b/f2s/resources/controller_remaining_tasks/meta.yaml @@ -2,8 +2,8 @@ id: controller_remaining_tasks handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/database/meta.yaml b/f2s/resources/database/meta.yaml index be3499ba..e2cd878e 100644 --- a/f2s/resources/database/meta.yaml +++ b/f2s/resources/database/meta.yaml @@ -2,8 +2,8 @@ id: database handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: database_nodes: value: null diff --git a/f2s/resources/disable_keystone_service_token/meta.yaml b/f2s/resources/disable_keystone_service_token/meta.yaml index 5e77e649..ae29a628 100644 --- a/f2s/resources/disable_keystone_service_token/meta.yaml +++ b/f2s/resources/disable_keystone_service_token/meta.yaml @@ -2,8 +2,8 @@ id: disable_keystone_service_token handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/dns-client/meta.yaml b/f2s/resources/dns-client/meta.yaml index e2a8ef6c..f1f9d6bc 100644 --- a/f2s/resources/dns-client/meta.yaml +++ b/f2s/resources/dns-client/meta.yaml @@ -2,8 +2,8 @@ id: dns-client handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/dns-server/meta.yaml b/f2s/resources/dns-server/meta.yaml index 12174361..24ee68a1 100644 --- a/f2s/resources/dns-server/meta.yaml +++ b/f2s/resources/dns-server/meta.yaml @@ -2,8 +2,8 @@ id: dns-server handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: external_dns: value: null diff --git a/f2s/resources/dump_rabbitmq_definitions/meta.yaml b/f2s/resources/dump_rabbitmq_definitions/meta.yaml index 454d12e3..93ece163 100644 --- a/f2s/resources/dump_rabbitmq_definitions/meta.yaml +++ b/f2s/resources/dump_rabbitmq_definitions/meta.yaml @@ -2,8 +2,8 @@ id: dump_rabbitmq_definitions handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/enable_cinder_volume_service/meta.yaml b/f2s/resources/enable_cinder_volume_service/meta.yaml index 57957c6e..ce45311b 100644 --- a/f2s/resources/enable_cinder_volume_service/meta.yaml +++ b/f2s/resources/enable_cinder_volume_service/meta.yaml @@ -2,8 +2,8 @@ id: enable_cinder_volume_service handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/enable_nova_compute_service/meta.yaml b/f2s/resources/enable_nova_compute_service/meta.yaml index cdad50de..37cca6e9 100644 --- a/f2s/resources/enable_nova_compute_service/meta.yaml +++ b/f2s/resources/enable_nova_compute_service/meta.yaml @@ -2,8 +2,8 @@ id: enable_nova_compute_service handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/enable_rados/meta.yaml b/f2s/resources/enable_rados/meta.yaml index 189f1313..1af87035 100644 --- a/f2s/resources/enable_rados/meta.yaml +++ b/f2s/resources/enable_rados/meta.yaml @@ -2,8 +2,8 @@ id: enable_rados handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/firewall/meta.yaml b/f2s/resources/firewall/meta.yaml index eea19f9a..50a062b8 100644 --- a/f2s/resources/firewall/meta.yaml +++ b/f2s/resources/firewall/meta.yaml @@ -2,8 +2,8 @@ id: firewall handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/fuel_pkgs/meta.yaml b/f2s/resources/fuel_pkgs/meta.yaml index 788e7dc5..ee066f1f 100644 --- a/f2s/resources/fuel_pkgs/meta.yaml +++ b/f2s/resources/fuel_pkgs/meta.yaml @@ -2,8 +2,8 @@ id: fuel_pkgs handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/generate_vms/meta.yaml b/f2s/resources/generate_vms/meta.yaml index e84e320a..feff4a76 100644 --- a/f2s/resources/generate_vms/meta.yaml +++ b/f2s/resources/generate_vms/meta.yaml @@ -2,8 +2,8 @@ id: generate_vms handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/glance-db/meta.yaml b/f2s/resources/glance-db/meta.yaml index cc0676a7..a22af0a3 100644 --- a/f2s/resources/glance-db/meta.yaml +++ b/f2s/resources/glance-db/meta.yaml @@ -2,8 +2,8 @@ id: glance-db handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: database_vip: value: null diff --git a/f2s/resources/glance-keystone/meta.yaml b/f2s/resources/glance-keystone/meta.yaml index a53edfb5..584c59a1 100644 --- a/f2s/resources/glance-keystone/meta.yaml +++ b/f2s/resources/glance-keystone/meta.yaml @@ -2,8 +2,8 @@ id: glance-keystone handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/glance/meta.yaml b/f2s/resources/glance/meta.yaml index ac98a933..d1e28e4f 100644 --- a/f2s/resources/glance/meta.yaml +++ b/f2s/resources/glance/meta.yaml @@ -2,8 +2,8 @@ id: glance handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: amqp_hosts: value: null diff --git a/f2s/resources/globals/meta.yaml b/f2s/resources/globals/meta.yaml index d2638d91..7bc10735 100644 --- a/f2s/resources/globals/meta.yaml +++ b/f2s/resources/globals/meta.yaml @@ -2,8 +2,8 @@ id: globals handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: access: value: null diff --git a/f2s/resources/heat-db/meta.yaml b/f2s/resources/heat-db/meta.yaml index dc3d384d..7a4ae4c1 100644 --- a/f2s/resources/heat-db/meta.yaml +++ b/f2s/resources/heat-db/meta.yaml @@ -2,8 +2,8 @@ id: heat-db handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: database_vip: value: null diff --git a/f2s/resources/heat-keystone/meta.yaml b/f2s/resources/heat-keystone/meta.yaml index f06820eb..727409ea 100644 --- a/f2s/resources/heat-keystone/meta.yaml +++ b/f2s/resources/heat-keystone/meta.yaml @@ -2,8 +2,8 @@ id: heat-keystone handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/heat/meta.yaml b/f2s/resources/heat/meta.yaml index a088ea0e..362afea5 100644 --- a/f2s/resources/heat/meta.yaml +++ b/f2s/resources/heat/meta.yaml @@ -2,8 +2,8 @@ id: heat handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: amqp_hosts: value: null diff --git a/f2s/resources/hiera/meta.yaml b/f2s/resources/hiera/meta.yaml index 4affe804..0b9a6580 100644 --- a/f2s/resources/hiera/meta.yaml +++ b/f2s/resources/hiera/meta.yaml @@ -2,8 +2,8 @@ id: hiera handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/horizon/actions/run.pp b/f2s/resources/horizon/actions/run.pp index 4368443b..23bd602c 100644 --- a/f2s/resources/horizon/actions/run.pp +++ b/f2s/resources/horizon/actions/run.pp @@ -65,4 +65,15 @@ haproxy_backend_status { 'keystone-public' : Class['openstack::horizon'] -> Haproxy_backend_status['keystone-admin'] Class['openstack::horizon'] -> Haproxy_backend_status['keystone-public'] +# TODO(aschultz): remove this if openstack-dashboard stops installing +# openstack-dashboard-apache +if $::osfamily == 'Debian' { + # LP#1513252 - remove this package if it's installed by the + # openstack-dashboard package installation. + package { 'openstack-dashboard-apache': + ensure => 'absent', + require => Package['openstack-dashboard'] + } ~> Service[$::apache::params::service_name] +} + include ::tweaks::apache_wrappers diff --git a/f2s/resources/horizon/meta.yaml b/f2s/resources/horizon/meta.yaml index bad3bce9..2e8ad481 100644 --- a/f2s/resources/horizon/meta.yaml +++ b/f2s/resources/horizon/meta.yaml @@ -2,8 +2,8 @@ id: horizon handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: apache_ports: value: null diff --git a/f2s/resources/hosts/meta.yaml b/f2s/resources/hosts/meta.yaml index 20f8d190..2ba7e90f 100644 --- a/f2s/resources/hosts/meta.yaml +++ b/f2s/resources/hosts/meta.yaml @@ -2,8 +2,8 @@ id: hosts handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/ironic-api/meta.yaml b/f2s/resources/ironic-api/meta.yaml index 5e52fdc2..df25c34b 100644 --- a/f2s/resources/ironic-api/meta.yaml +++ b/f2s/resources/ironic-api/meta.yaml @@ -2,8 +2,8 @@ id: ironic-api handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: ironic: value: null diff --git a/f2s/resources/ironic-compute/meta.yaml b/f2s/resources/ironic-compute/meta.yaml index 4227a04e..83e62fa0 100644 --- a/f2s/resources/ironic-compute/meta.yaml +++ b/f2s/resources/ironic-compute/meta.yaml @@ -2,8 +2,8 @@ id: ironic-compute handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/ironic-conductor/meta.yaml b/f2s/resources/ironic-conductor/meta.yaml index f6bf0ff2..3ccd6f0a 100644 --- a/f2s/resources/ironic-conductor/meta.yaml +++ b/f2s/resources/ironic-conductor/meta.yaml @@ -2,8 +2,8 @@ id: ironic-conductor handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/ironic-db/meta.yaml b/f2s/resources/ironic-db/meta.yaml index 796de371..f2e12eb0 100644 --- a/f2s/resources/ironic-db/meta.yaml +++ b/f2s/resources/ironic-db/meta.yaml @@ -2,8 +2,8 @@ id: ironic-db handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: database_vip: value: null diff --git a/f2s/resources/ironic-keystone/meta.yaml b/f2s/resources/ironic-keystone/meta.yaml index cb3221dc..54d62f52 100644 --- a/f2s/resources/ironic-keystone/meta.yaml +++ b/f2s/resources/ironic-keystone/meta.yaml @@ -2,8 +2,8 @@ id: ironic-keystone handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/keystone-db/meta.yaml b/f2s/resources/keystone-db/meta.yaml index b5dbc5b7..fb74c231 100644 --- a/f2s/resources/keystone-db/meta.yaml +++ b/f2s/resources/keystone-db/meta.yaml @@ -2,8 +2,8 @@ id: keystone-db handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: database_vip: value: null diff --git a/f2s/resources/keystone/meta.yaml b/f2s/resources/keystone/meta.yaml index 61803602..3d6630ce 100644 --- a/f2s/resources/keystone/meta.yaml +++ b/f2s/resources/keystone/meta.yaml @@ -2,8 +2,8 @@ id: keystone handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: access: value: null diff --git a/f2s/resources/logging/meta.yaml b/f2s/resources/logging/meta.yaml index e6becfd2..7ce66c81 100644 --- a/f2s/resources/logging/meta.yaml +++ b/f2s/resources/logging/meta.yaml @@ -2,8 +2,8 @@ id: logging handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: base_syslog_hash: value: null diff --git a/f2s/resources/memcached/meta.yaml b/f2s/resources/memcached/meta.yaml index 07f75733..0800af06 100644 --- a/f2s/resources/memcached/meta.yaml +++ b/f2s/resources/memcached/meta.yaml @@ -2,8 +2,8 @@ id: memcached handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/murano-db/meta.yaml b/f2s/resources/murano-db/meta.yaml index f7a86eff..d0ef0ebe 100644 --- a/f2s/resources/murano-db/meta.yaml +++ b/f2s/resources/murano-db/meta.yaml @@ -2,8 +2,8 @@ id: murano-db handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: database_vip: value: null diff --git a/f2s/resources/murano-keystone/meta.yaml b/f2s/resources/murano-keystone/meta.yaml index 27c270b4..47f2ecce 100644 --- a/f2s/resources/murano-keystone/meta.yaml +++ b/f2s/resources/murano-keystone/meta.yaml @@ -2,8 +2,8 @@ id: murano-keystone handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/murano-rabbitmq/meta.yaml b/f2s/resources/murano-rabbitmq/meta.yaml index e306d66b..d8c17ef1 100644 --- a/f2s/resources/murano-rabbitmq/meta.yaml +++ b/f2s/resources/murano-rabbitmq/meta.yaml @@ -2,6 +2,6 @@ id: murano-rabbitmq handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: {} diff --git a/f2s/resources/murano/meta.yaml b/f2s/resources/murano/meta.yaml index 05d8120e..3162a425 100644 --- a/f2s/resources/murano/meta.yaml +++ b/f2s/resources/murano/meta.yaml @@ -2,8 +2,8 @@ id: murano handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: amqp_hosts: value: null diff --git a/f2s/resources/netconfig/meta.yaml b/f2s/resources/netconfig/meta.yaml index e577d6e7..5472a442 100644 --- a/f2s/resources/netconfig/meta.yaml +++ b/f2s/resources/netconfig/meta.yaml @@ -2,8 +2,8 @@ id: netconfig handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: default_gateway: value: null diff --git a/f2s/resources/neutron-db/meta.yaml b/f2s/resources/neutron-db/meta.yaml index 1675f853..847541d7 100644 --- a/f2s/resources/neutron-db/meta.yaml +++ b/f2s/resources/neutron-db/meta.yaml @@ -2,8 +2,8 @@ id: neutron-db handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: database_vip: value: null diff --git a/f2s/resources/neutron-keystone/meta.yaml b/f2s/resources/neutron-keystone/meta.yaml index b80ae155..44b19133 100644 --- a/f2s/resources/neutron-keystone/meta.yaml +++ b/f2s/resources/neutron-keystone/meta.yaml @@ -2,8 +2,8 @@ id: neutron-keystone handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/nova-db/meta.yaml b/f2s/resources/nova-db/meta.yaml index bcb80469..1164afa3 100644 --- a/f2s/resources/nova-db/meta.yaml +++ b/f2s/resources/nova-db/meta.yaml @@ -2,8 +2,8 @@ id: nova-db handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: database_vip: value: null diff --git a/f2s/resources/nova-keystone/meta.yaml b/f2s/resources/nova-keystone/meta.yaml index 51629daa..5cba5bae 100644 --- a/f2s/resources/nova-keystone/meta.yaml +++ b/f2s/resources/nova-keystone/meta.yaml @@ -2,8 +2,8 @@ id: nova-keystone handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/ntp-check/meta.yaml b/f2s/resources/ntp-check/meta.yaml index 320cee96..8a753150 100644 --- a/f2s/resources/ntp-check/meta.yaml +++ b/f2s/resources/ntp-check/meta.yaml @@ -2,8 +2,8 @@ id: ntp-check handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: external_ntp: value: null diff --git a/f2s/resources/ntp-client/meta.yaml b/f2s/resources/ntp-client/meta.yaml index c2a57442..cf874f6a 100644 --- a/f2s/resources/ntp-client/meta.yaml +++ b/f2s/resources/ntp-client/meta.yaml @@ -2,8 +2,8 @@ id: ntp-client handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/ntp-server/meta.yaml b/f2s/resources/ntp-server/meta.yaml index 83125c74..338e23dd 100644 --- a/f2s/resources/ntp-server/meta.yaml +++ b/f2s/resources/ntp-server/meta.yaml @@ -2,8 +2,8 @@ id: ntp-server handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: external_ntp: value: null diff --git a/f2s/resources/openstack-cinder/meta.yaml b/f2s/resources/openstack-cinder/meta.yaml index 36155c50..3e10bd60 100644 --- a/f2s/resources/openstack-cinder/meta.yaml +++ b/f2s/resources/openstack-cinder/meta.yaml @@ -2,8 +2,8 @@ id: openstack-cinder handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: amqp_hosts: value: null diff --git a/f2s/resources/openstack-controller/meta.yaml b/f2s/resources/openstack-controller/meta.yaml index 28f7381d..632aafbc 100644 --- a/f2s/resources/openstack-controller/meta.yaml +++ b/f2s/resources/openstack-controller/meta.yaml @@ -2,8 +2,8 @@ id: openstack-controller handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: access: value: null diff --git a/f2s/resources/openstack-haproxy-ceilometer/meta.yaml b/f2s/resources/openstack-haproxy-ceilometer/meta.yaml index d5cf609a..82b06c8c 100644 --- a/f2s/resources/openstack-haproxy-ceilometer/meta.yaml +++ b/f2s/resources/openstack-haproxy-ceilometer/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-ceilometer handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: ceilometer: value: null diff --git a/f2s/resources/openstack-haproxy-cinder/meta.yaml b/f2s/resources/openstack-haproxy-cinder/meta.yaml index c8848dbf..c16a2b9c 100644 --- a/f2s/resources/openstack-haproxy-cinder/meta.yaml +++ b/f2s/resources/openstack-haproxy-cinder/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-cinder handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: cinder_hash: value: null diff --git a/f2s/resources/openstack-haproxy-glance/meta.yaml b/f2s/resources/openstack-haproxy-glance/meta.yaml index 514203b7..3726b277 100644 --- a/f2s/resources/openstack-haproxy-glance/meta.yaml +++ b/f2s/resources/openstack-haproxy-glance/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-glance handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-haproxy-heat/meta.yaml b/f2s/resources/openstack-haproxy-heat/meta.yaml index ebbdea8f..3570bb5f 100644 --- a/f2s/resources/openstack-haproxy-heat/meta.yaml +++ b/f2s/resources/openstack-haproxy-heat/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-heat handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-haproxy-horizon/meta.yaml b/f2s/resources/openstack-haproxy-horizon/meta.yaml index fdf25e4d..eb66d189 100644 --- a/f2s/resources/openstack-haproxy-horizon/meta.yaml +++ b/f2s/resources/openstack-haproxy-horizon/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-horizon handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-haproxy-ironic/meta.yaml b/f2s/resources/openstack-haproxy-ironic/meta.yaml index 388349f2..0ff64180 100644 --- a/f2s/resources/openstack-haproxy-ironic/meta.yaml +++ b/f2s/resources/openstack-haproxy-ironic/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-ironic handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: ironic: value: null diff --git a/f2s/resources/openstack-haproxy-keystone/meta.yaml b/f2s/resources/openstack-haproxy-keystone/meta.yaml index 98589eec..63528a46 100644 --- a/f2s/resources/openstack-haproxy-keystone/meta.yaml +++ b/f2s/resources/openstack-haproxy-keystone/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-keystone handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-haproxy-murano/meta.yaml b/f2s/resources/openstack-haproxy-murano/meta.yaml index 99054c74..9533f803 100644 --- a/f2s/resources/openstack-haproxy-murano/meta.yaml +++ b/f2s/resources/openstack-haproxy-murano/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-murano handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-haproxy-mysqld/meta.yaml b/f2s/resources/openstack-haproxy-mysqld/meta.yaml index d7d7eb2b..584ece90 100644 --- a/f2s/resources/openstack-haproxy-mysqld/meta.yaml +++ b/f2s/resources/openstack-haproxy-mysqld/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-mysqld handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: custom_mysql_setup_class: value: null diff --git a/f2s/resources/openstack-haproxy-neutron/meta.yaml b/f2s/resources/openstack-haproxy-neutron/meta.yaml index 644057c7..17f37f85 100644 --- a/f2s/resources/openstack-haproxy-neutron/meta.yaml +++ b/f2s/resources/openstack-haproxy-neutron/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-neutron handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-haproxy-nova/meta.yaml b/f2s/resources/openstack-haproxy-nova/meta.yaml index 79983279..031ce49b 100644 --- a/f2s/resources/openstack-haproxy-nova/meta.yaml +++ b/f2s/resources/openstack-haproxy-nova/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-nova handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-haproxy-radosgw/meta.yaml b/f2s/resources/openstack-haproxy-radosgw/meta.yaml index 1cbd7bae..a0baa0b2 100644 --- a/f2s/resources/openstack-haproxy-radosgw/meta.yaml +++ b/f2s/resources/openstack-haproxy-radosgw/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-radosgw handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-haproxy-sahara/meta.yaml b/f2s/resources/openstack-haproxy-sahara/meta.yaml index aa7c4d3e..86fcd072 100644 --- a/f2s/resources/openstack-haproxy-sahara/meta.yaml +++ b/f2s/resources/openstack-haproxy-sahara/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-sahara handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-haproxy-stats/meta.yaml b/f2s/resources/openstack-haproxy-stats/meta.yaml index 532c27c3..0001493b 100644 --- a/f2s/resources/openstack-haproxy-stats/meta.yaml +++ b/f2s/resources/openstack-haproxy-stats/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-stats handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: database_vip: value: null diff --git a/f2s/resources/openstack-haproxy-swift/meta.yaml b/f2s/resources/openstack-haproxy-swift/meta.yaml index f330b9be..627fbce4 100644 --- a/f2s/resources/openstack-haproxy-swift/meta.yaml +++ b/f2s/resources/openstack-haproxy-swift/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy-swift handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-haproxy/meta.yaml b/f2s/resources/openstack-haproxy/meta.yaml index e5284964..4a049542 100644 --- a/f2s/resources/openstack-haproxy/meta.yaml +++ b/f2s/resources/openstack-haproxy/meta.yaml @@ -2,8 +2,8 @@ id: openstack-haproxy handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-network-agents-dhcp/meta.yaml b/f2s/resources/openstack-network-agents-dhcp/meta.yaml index d32720b3..3745593b 100644 --- a/f2s/resources/openstack-network-agents-dhcp/meta.yaml +++ b/f2s/resources/openstack-network-agents-dhcp/meta.yaml @@ -2,8 +2,8 @@ id: openstack-network-agents-dhcp handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: debug: value: null diff --git a/f2s/resources/openstack-network-agents-l3/meta.yaml b/f2s/resources/openstack-network-agents-l3/meta.yaml index 250c767b..523cf75d 100644 --- a/f2s/resources/openstack-network-agents-l3/meta.yaml +++ b/f2s/resources/openstack-network-agents-l3/meta.yaml @@ -2,8 +2,8 @@ id: openstack-network-agents-l3 handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: debug: value: null diff --git a/f2s/resources/openstack-network-agents-metadata/meta.yaml b/f2s/resources/openstack-network-agents-metadata/meta.yaml index 75d532fe..89d18b40 100644 --- a/f2s/resources/openstack-network-agents-metadata/meta.yaml +++ b/f2s/resources/openstack-network-agents-metadata/meta.yaml @@ -2,8 +2,8 @@ id: openstack-network-agents-metadata handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: debug: value: null diff --git a/f2s/resources/openstack-network-common-config/meta.yaml b/f2s/resources/openstack-network-common-config/meta.yaml index c2edaad1..8d649584 100644 --- a/f2s/resources/openstack-network-common-config/meta.yaml +++ b/f2s/resources/openstack-network-common-config/meta.yaml @@ -2,8 +2,8 @@ id: openstack-network-common-config handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: amqp_hosts: value: null diff --git a/f2s/resources/openstack-network-compute-nova/meta.yaml b/f2s/resources/openstack-network-compute-nova/meta.yaml index 1c967c5e..406e55dc 100644 --- a/f2s/resources/openstack-network-compute-nova/meta.yaml +++ b/f2s/resources/openstack-network-compute-nova/meta.yaml @@ -2,8 +2,8 @@ id: openstack-network-compute-nova handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-network-networks/meta.yaml b/f2s/resources/openstack-network-networks/meta.yaml index 7904d440..a1568945 100644 --- a/f2s/resources/openstack-network-networks/meta.yaml +++ b/f2s/resources/openstack-network-networks/meta.yaml @@ -2,8 +2,8 @@ id: openstack-network-networks handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: access: value: null diff --git a/f2s/resources/openstack-network-plugins-l2/meta.yaml b/f2s/resources/openstack-network-plugins-l2/meta.yaml index ae35e197..d3c6a937 100644 --- a/f2s/resources/openstack-network-plugins-l2/meta.yaml +++ b/f2s/resources/openstack-network-plugins-l2/meta.yaml @@ -2,8 +2,8 @@ id: openstack-network-plugins-l2 handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/openstack-network-routers/meta.yaml b/f2s/resources/openstack-network-routers/meta.yaml index a238457e..c46d43ca 100644 --- a/f2s/resources/openstack-network-routers/meta.yaml +++ b/f2s/resources/openstack-network-routers/meta.yaml @@ -2,8 +2,8 @@ id: openstack-network-routers handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: access: value: null diff --git a/f2s/resources/openstack-network-server-config/meta.yaml b/f2s/resources/openstack-network-server-config/meta.yaml index 3a824f01..711bd549 100644 --- a/f2s/resources/openstack-network-server-config/meta.yaml +++ b/f2s/resources/openstack-network-server-config/meta.yaml @@ -2,8 +2,8 @@ id: openstack-network-server-config handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: database_vip: value: null diff --git a/f2s/resources/openstack-network-server-nova/meta.yaml b/f2s/resources/openstack-network-server-nova/meta.yaml index 7c01a38e..9e74caf1 100644 --- a/f2s/resources/openstack-network-server-nova/meta.yaml +++ b/f2s/resources/openstack-network-server-nova/meta.yaml @@ -2,8 +2,8 @@ id: openstack-network-server-nova handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/pre_hiera_config/meta.yaml b/f2s/resources/pre_hiera_config/meta.yaml index b94987a9..d55b2fbf 100644 --- a/f2s/resources/pre_hiera_config/meta.yaml +++ b/f2s/resources/pre_hiera_config/meta.yaml @@ -2,8 +2,8 @@ id: pre_hiera_config handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/public_vip_ping/meta.yaml b/f2s/resources/public_vip_ping/meta.yaml index c3a9a591..90d9f79c 100644 --- a/f2s/resources/public_vip_ping/meta.yaml +++ b/f2s/resources/public_vip_ping/meta.yaml @@ -2,8 +2,8 @@ id: public_vip_ping handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/rabbitmq/meta.yaml b/f2s/resources/rabbitmq/meta.yaml index 4cc7fd88..3339b286 100644 --- a/f2s/resources/rabbitmq/meta.yaml +++ b/f2s/resources/rabbitmq/meta.yaml @@ -2,8 +2,8 @@ id: rabbitmq handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: amqp_port: value: null diff --git a/f2s/resources/sahara-db/meta.yaml b/f2s/resources/sahara-db/meta.yaml index b6dcb499..4f4542b0 100644 --- a/f2s/resources/sahara-db/meta.yaml +++ b/f2s/resources/sahara-db/meta.yaml @@ -2,8 +2,8 @@ id: sahara-db handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: database_vip: value: null diff --git a/f2s/resources/sahara-keystone/meta.yaml b/f2s/resources/sahara-keystone/meta.yaml index 69a6115e..75eff871 100644 --- a/f2s/resources/sahara-keystone/meta.yaml +++ b/f2s/resources/sahara-keystone/meta.yaml @@ -2,8 +2,8 @@ id: sahara-keystone handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/sahara/meta.yaml b/f2s/resources/sahara/meta.yaml index 6ee2e224..953d8c83 100644 --- a/f2s/resources/sahara/meta.yaml +++ b/f2s/resources/sahara/meta.yaml @@ -2,8 +2,8 @@ id: sahara handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: access_hash: value: null diff --git a/f2s/resources/ssl-add-trust-chain/meta.yaml b/f2s/resources/ssl-add-trust-chain/meta.yaml index f34c5e60..5f5e0374 100644 --- a/f2s/resources/ssl-add-trust-chain/meta.yaml +++ b/f2s/resources/ssl-add-trust-chain/meta.yaml @@ -2,8 +2,8 @@ id: ssl-add-trust-chain handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/ssl-keys-saving/meta.yaml b/f2s/resources/ssl-keys-saving/meta.yaml index 02e3ad26..90bc0ebd 100644 --- a/f2s/resources/ssl-keys-saving/meta.yaml +++ b/f2s/resources/ssl-keys-saving/meta.yaml @@ -2,8 +2,8 @@ id: ssl-keys-saving handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/swift-keystone/meta.yaml b/f2s/resources/swift-keystone/meta.yaml index 54f007f0..ba5bbc63 100644 --- a/f2s/resources/swift-keystone/meta.yaml +++ b/f2s/resources/swift-keystone/meta.yaml @@ -2,8 +2,8 @@ id: swift-keystone handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/swift-rebalance-cron/meta.yaml b/f2s/resources/swift-rebalance-cron/meta.yaml index 313ede9d..d2a04544 100644 --- a/f2s/resources/swift-rebalance-cron/meta.yaml +++ b/f2s/resources/swift-rebalance-cron/meta.yaml @@ -2,8 +2,8 @@ id: swift-rebalance-cron handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/swift/meta.yaml b/f2s/resources/swift/meta.yaml index 99cfad83..73e32aaa 100644 --- a/f2s/resources/swift/meta.yaml +++ b/f2s/resources/swift/meta.yaml @@ -2,8 +2,8 @@ id: swift handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: debug: value: null diff --git a/f2s/resources/tools/meta.yaml b/f2s/resources/tools/meta.yaml index e23aae59..01ff292c 100644 --- a/f2s/resources/tools/meta.yaml +++ b/f2s/resources/tools/meta.yaml @@ -2,8 +2,8 @@ id: tools handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: deployment_mode: value: null diff --git a/f2s/resources/top-role-ceph-osd/meta.yaml b/f2s/resources/top-role-ceph-osd/meta.yaml index 331ed767..206f65e2 100644 --- a/f2s/resources/top-role-ceph-osd/meta.yaml +++ b/f2s/resources/top-role-ceph-osd/meta.yaml @@ -2,8 +2,8 @@ id: top-role-ceph-osd handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/top-role-cinder-vmware/meta.yaml b/f2s/resources/top-role-cinder-vmware/meta.yaml index dec1771e..bbf04753 100644 --- a/f2s/resources/top-role-cinder-vmware/meta.yaml +++ b/f2s/resources/top-role-cinder-vmware/meta.yaml @@ -2,8 +2,8 @@ id: top-role-cinder-vmware handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/top-role-cinder/meta.yaml b/f2s/resources/top-role-cinder/meta.yaml index 605bf00c..75dbd887 100644 --- a/f2s/resources/top-role-cinder/meta.yaml +++ b/f2s/resources/top-role-cinder/meta.yaml @@ -2,8 +2,8 @@ id: top-role-cinder handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/top-role-compute-vmware/meta.yaml b/f2s/resources/top-role-compute-vmware/meta.yaml index eca28a2c..b2ff4732 100644 --- a/f2s/resources/top-role-compute-vmware/meta.yaml +++ b/f2s/resources/top-role-compute-vmware/meta.yaml @@ -2,8 +2,8 @@ id: top-role-compute-vmware handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/top-role-compute/meta.yaml b/f2s/resources/top-role-compute/meta.yaml index 2f464777..093ebd24 100644 --- a/f2s/resources/top-role-compute/meta.yaml +++ b/f2s/resources/top-role-compute/meta.yaml @@ -2,8 +2,8 @@ id: top-role-compute handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/top-role-mongo/meta.yaml b/f2s/resources/top-role-mongo/meta.yaml index e3448e5c..481741a0 100644 --- a/f2s/resources/top-role-mongo/meta.yaml +++ b/f2s/resources/top-role-mongo/meta.yaml @@ -2,8 +2,8 @@ id: top-role-mongo handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/top-role-primary-mongo/meta.yaml b/f2s/resources/top-role-primary-mongo/meta.yaml index 4e3b46a9..a3e935a1 100644 --- a/f2s/resources/top-role-primary-mongo/meta.yaml +++ b/f2s/resources/top-role-primary-mongo/meta.yaml @@ -2,8 +2,8 @@ id: top-role-primary-mongo handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/umm/meta.yaml b/f2s/resources/umm/meta.yaml index 949e385e..478a72dc 100644 --- a/f2s/resources/umm/meta.yaml +++ b/f2s/resources/umm/meta.yaml @@ -2,8 +2,8 @@ id: umm handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/update_hosts/meta.yaml b/f2s/resources/update_hosts/meta.yaml index e2b649cf..21e04200 100644 --- a/f2s/resources/update_hosts/meta.yaml +++ b/f2s/resources/update_hosts/meta.yaml @@ -2,8 +2,8 @@ id: update_hosts handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/updatedb/meta.yaml b/f2s/resources/updatedb/meta.yaml index 7c273f3e..cb9f6980 100644 --- a/f2s/resources/updatedb/meta.yaml +++ b/f2s/resources/updatedb/meta.yaml @@ -2,8 +2,8 @@ id: updatedb handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/virtual_ips/meta.yaml b/f2s/resources/virtual_ips/meta.yaml index 72746ccc..0b22a001 100644 --- a/f2s/resources/virtual_ips/meta.yaml +++ b/f2s/resources/virtual_ips/meta.yaml @@ -2,8 +2,8 @@ id: virtual_ips handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null diff --git a/f2s/resources/vmware-vcenter/meta.yaml b/f2s/resources/vmware-vcenter/meta.yaml index ee372308..95788c06 100644 --- a/f2s/resources/vmware-vcenter/meta.yaml +++ b/f2s/resources/vmware-vcenter/meta.yaml @@ -2,8 +2,8 @@ id: vmware-vcenter handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: ceilometer: value: null diff --git a/f2s/resources/workloads_collector_add/meta.yaml b/f2s/resources/workloads_collector_add/meta.yaml index 70ea732d..a052923c 100644 --- a/f2s/resources/workloads_collector_add/meta.yaml +++ b/f2s/resources/workloads_collector_add/meta.yaml @@ -2,8 +2,8 @@ id: workloads_collector_add handler: puppetv2 version: '8.0' actions: - run: actions/run.pp - update: actions/run.pp + run: run.pp + update: run.pp input: fqdn: value: null From 8e1f011e79e41ad506cf01466a949f151170b0de Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Thu, 12 Nov 2015 18:07:50 +0200 Subject: [PATCH 33/51] Disable mos_repos resource --- docker-compose.yml | 10 +++++----- f2s/vrs/prep.yaml | 9 --------- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index fa471b3f..26fe4178 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -19,11 +19,11 @@ solar: - riak - redis -# docker run --name solar -d -v /root/solar/solar:/solar -v /root/solar/solard:/solard -v /root/solar/templates:/vagrant/templates \ -# -v /root/solar/resources:/vagrant/resources -v /root/solar/f2s:/vagrant/f2s \ -# -v /var/lib/fuel:/var/lib/fuel -v /root/.config/fuel/fuel_client.yaml:/etc/fuel/client/config.yaml -v /etc/puppet/modules:/etc/puppet/modules \ -# -v /root/.ssh:/root/.ssh \ -# --link=riak:riak --link=redis:redis --name solar solarproject/solar-celery:f2s +docker run --name solar -d -v /root/solar/solar:/solar -v /root/solar/solard:/solard -v /root/solar/templates:/templates \ +-v /root/solar/resources:/resources -v /root/solar/f2s:/f2s \ +-v /var/lib/fuel:/var/lib/fuel -v /root/.config/fuel/fuel_client.yaml:/etc/fuel/client/config.yaml -v /etc/puppet/modules:/etc/puppet/modules \ +-v /root/.ssh:/root/.ssh \ +--link=riak:riak --link=redis:redis solarproject/solar-celery:f2s riak: image: tutum/riak diff --git a/f2s/vrs/prep.yaml b/f2s/vrs/prep.yaml index 401aa628..193ff98e 100644 --- a/f2s/vrs/prep.yaml +++ b/f2s/vrs/prep.yaml @@ -15,11 +15,6 @@ resources: dst: /var/lib/astute - src: /etc/puppet/modules dst: /etc/puppet/ - - id: mos_repos{{index}} - from: templates/mos_repos.yaml - values: - node: {{node}} - index: {{index}} events: - type: depends_on state: success @@ -32,7 +27,3 @@ events: state: success parent_action: sources{{index}}.run depend_action: role_data{{index}}.run - - type: depends_on - state: success - parent_action: managed_apt_{{index}}.run - depend_action: role_data{{index}}.run From 1624bdba745ba23324bcb5d5e803c9402648cc8e Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Thu, 12 Nov 2015 18:35:32 +0200 Subject: [PATCH 34/51] add vms_conf val --- docker-compose.yml | 10 +++++----- f2s/resources/role_data/meta.yaml | 4 ++++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 26fe4178..81be799f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -19,11 +19,11 @@ solar: - riak - redis -docker run --name solar -d -v /root/solar/solar:/solar -v /root/solar/solard:/solard -v /root/solar/templates:/templates \ --v /root/solar/resources:/resources -v /root/solar/f2s:/f2s \ --v /var/lib/fuel:/var/lib/fuel -v /root/.config/fuel/fuel_client.yaml:/etc/fuel/client/config.yaml -v /etc/puppet/modules:/etc/puppet/modules \ --v /root/.ssh:/root/.ssh \ ---link=riak:riak --link=redis:redis solarproject/solar-celery:f2s +# docker run --name solar -d -v /root/solar/solar:/solar -v /root/solar/solard:/solard -v /root/solar/templates:/templates \ +# -v /root/solar/resources:/resources -v /root/solar/f2s:/f2s \ +# -v /var/lib/fuel:/var/lib/fuel -v /root/.config/fuel/fuel_client.yaml:/etc/fuel/client/config.yaml -v /etc/puppet/modules:/etc/puppet/modules \ +# -v /root/.ssh:/root/.ssh \ +# --link=riak:riak --link=redis:redis solarproject/solar-celery:f2s riak: image: tutum/riak diff --git a/f2s/resources/role_data/meta.yaml b/f2s/resources/role_data/meta.yaml index 895451c8..6268f2c5 100644 --- a/f2s/resources/role_data/meta.yaml +++ b/f2s/resources/role_data/meta.yaml @@ -17,6 +17,10 @@ input: value: tasks: value: + vms_conf: + value: + horizon: + value: ironic: value: access: From b75f2a11bfc1fbd38b9ef4dd59dd82bbf5335562 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Thu, 12 Nov 2015 19:51:59 +0200 Subject: [PATCH 35/51] Add session for celery tasks --- solar/orchestration/tasks.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/solar/orchestration/tasks.py b/solar/orchestration/tasks.py index e392036e..c8414d09 100644 --- a/solar/orchestration/tasks.py +++ b/solar/orchestration/tasks.py @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. +import time from functools import partial import subprocess @@ -29,7 +30,24 @@ from solar.orchestration.runner import app from solar.orchestration.traversal import traverse from solar.system_log.tasks import commit_logitem from solar.system_log.tasks import error_logitem -import time +from solar.orchestration import limits +from solar.orchestration import executor +from solar.dblayer import ModelMeta + +from solar.dblayer.model import ModelMeta +from functools import wraps + +def session(func): + @wraps(func) + def inner(*args, **kwargs): + try: + ModelMeta.session_start() + rst = func(*args, **kwargs) + finally: + ModelMeta.session_end() + return rst + return inner + __all__ = ['solar_resource', 'cmd', 'sleep', 'error', 'fault_tolerance', 'schedule_start', 'schedule_next'] @@ -141,6 +159,7 @@ def schedule(plan_uid, dg): @app.task(name='schedule_start') +@session def schedule_start(plan_uid): """On receive finished task should update storage with task result: @@ -152,6 +171,7 @@ def schedule_start(plan_uid): @app.task(name='soft_stop') +@session def soft_stop(plan_uid): dg = graph.get_graph(plan_uid) for n in dg: @@ -161,6 +181,7 @@ def soft_stop(plan_uid): @app.task(name='schedule_next') +@session def schedule_next(task_id, status, errmsg=None): plan_uid, task_name = task_id.rsplit(':', 1) dg = graph.get_graph(plan_uid) From 384cb0dae5fb272d6b130e921a3a299b4b8f45ca Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Fri, 13 Nov 2015 11:06:49 +0200 Subject: [PATCH 36/51] Add README for f2s --- f2s/README.md | 85 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 f2s/README.md diff --git a/f2s/README.md b/f2s/README.md new file mode 100644 index 00000000..a68f9855 --- /dev/null +++ b/f2s/README.md @@ -0,0 +1,85 @@ +#How to install on fuel master? + +To use solar on fuel master we need to use container because of +python2.6 there. Also solar itself relies on several services. + +``` +docker run --name riak -d -p 8087:8087 -p 8098:8098 tutum/riak + +docker run --name redis -d -p 6379:6379 -e REDIS_PASS=**None** tutum/redis + +docker run --name solar -d -v /root/solar/solar:/solar -v /root/solar/solard:/solard -v /root/solar/templates:/templates \ +-v /root/solar/resources:/resources -v /root/solar/f2s:/f2s \ +-v /var/lib/fuel:/var/lib/fuel -v /root/.config/fuel/fuel_client.yaml:/etc/fuel/client/config.yaml -v /etc/puppet/modules:/etc/puppet/modules \ +-v /root/.ssh:/root/.ssh \ +--link=riak:riak --link=redis:redis solarproject/solar-celery:f2s +``` + +#f2s.py + +This script converts tasks.yaml + library actions into solar resources, +vrs, and events. + +1. Based on tasks.yaml meta.yaml is generated, you can take a look on example +at f2s/resources/netconfig/meta.yaml +2. Based on hiera lookup we generated inputs for each resource, patches can be +found at f2s/patches +3. VRs (f2s/vrs) generated based on dependencies between tasks and roles + +#fsclient.py + +This script helps to create solar resource with some of nailgun data + +`./f2s/fsclient.py master 1` +Accepts cluster id, prepares transports for master + generate keys task +for current cluster. + +`./f2s/fsclient.py nodes 1` +Prepares transports for provided nodes, ip and cluster id fetchd from nailgun. + +`./f2s/fsclient.py prep 1` +Creates tasks for syncing keys + fuel-library modules. + +`./f2s/fsclient.py roles 1` +Based on roles stored in nailgun we will assign vrs/.yaml to a given +node. Right now it takes while, so be patient. + +#fetching data from nailgun + +Special entity added which allows to fetch data from any source +*before* any actual deployment. +This entity provides mechanism to specify *manager* for resource (or list of them). +Manager accepts inputs as json in stdin, and outputs result in stdout, +with result of manager execution we will update solar storage. + +Examples can be found at f2s/resources/role_data/managers. + +Data will be fetched on solar command + +`solar res prefetch -n ` + +#tweaks + +Several things needs to be manually adjusted before you can use solar +on fuel master. + +- provision a node by fuel + `fuel node --node 1 --provision` +- create /var/lib/astute directory on remote +- install repos using fuel + `fuel node --node 1 --tasks core_repos` +- configure hiera on remote, and create /etc/puppet/hieradata directory +``` + :backends: + - yaml + #- json +:yaml: + :datadir: /etc/puppet/hieradata +:json: + :datadir: /etc/puppet/hieradata +:hierarchy: + - "%{resource_name}" + - resource +``` + +All of this things will be automated by solar eventually From 0e87d2d0115e31a3d17fa639058b366c59a8a8c7 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Fri, 13 Nov 2015 14:02:59 +0100 Subject: [PATCH 37/51] Fix f2s/README Signed-off-by: Bogdan Dobrelya --- f2s/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/f2s/README.md b/f2s/README.md index a68f9855..4589f798 100644 --- a/f2s/README.md +++ b/f2s/README.md @@ -4,6 +4,9 @@ To use solar on fuel master we need to use container because of python2.6 there. Also solar itself relies on several services. ``` +yum -y install git +git clone -b f2s https://github.com/Mirantis/solar.git + docker run --name riak -d -p 8087:8087 -p 8098:8098 tutum/riak docker run --name redis -d -p 6379:6379 -e REDIS_PASS=**None** tutum/redis From 85d111cc5b11df4dffc1e04038c78518c041e078 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Fri, 13 Nov 2015 14:02:59 +0100 Subject: [PATCH 38/51] Fix f2s/README Signed-off-by: Bogdan Dobrelya --- f2s/README.md | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/f2s/README.md b/f2s/README.md index 4589f798..e6fe945c 100644 --- a/f2s/README.md +++ b/f2s/README.md @@ -31,7 +31,8 @@ found at f2s/patches #fsclient.py -This script helps to create solar resource with some of nailgun data +This script helps to create solar resource with some of nailgun data. +Note, you should run it inside of the solar container. `./f2s/fsclient.py master 1` Accepts cluster id, prepares transports for master + generate keys task @@ -86,3 +87,23 @@ on fuel master. ``` All of this things will be automated by solar eventually + +#basic troubleshooting + +If there are any Fuel plugin installed, you should manually +create a stanza for it in the `./f2s/resources/role_data/meta.yaml`, +like: +``` +input: + foo_plugin_name: + value: null +``` + +And regenerate the data from nailgun, + +To regenerate the deployment data to Solar resources make +``` +solar res clear_all +``` + +and repeat all of the fsclient.py and fetching nailgun data steps From 808f389232f71ce97099f479f90cf398c8b3194f Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 18 Nov 2015 10:03:01 +0200 Subject: [PATCH 39/51] Add puppet_modules to every role vr --- f2s/vrs/ceph-osd.yml | 1 + f2s/vrs/cinder-vmware.yml | 1 + f2s/vrs/cinder.yml | 1 + f2s/vrs/compute-vmware.yml | 1 + f2s/vrs/ironic.yml | 1 + f2s/vrs/mongo.yml | 1 + f2s/vrs/primary-mongo.yml | 1 + f2s/vrs/virt.yml | 1 + 8 files changed, 8 insertions(+) diff --git a/f2s/vrs/ceph-osd.yml b/f2s/vrs/ceph-osd.yml index 80b3c0f2..91613111 100644 --- a/f2s/vrs/ceph-osd.yml +++ b/f2s/vrs/ceph-osd.yml @@ -6,6 +6,7 @@ resources: values: env: '{{env}}' uid: '{{index}}' + puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} from: f2s/resources/fuel_pkgs location: '{{node}}' diff --git a/f2s/vrs/cinder-vmware.yml b/f2s/vrs/cinder-vmware.yml index 67a6fd07..81f1843a 100644 --- a/f2s/vrs/cinder-vmware.yml +++ b/f2s/vrs/cinder-vmware.yml @@ -6,6 +6,7 @@ resources: values: env: '{{env}}' uid: '{{index}}' + puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} from: f2s/resources/fuel_pkgs location: '{{node}}' diff --git a/f2s/vrs/cinder.yml b/f2s/vrs/cinder.yml index b89d78bb..78a535c0 100644 --- a/f2s/vrs/cinder.yml +++ b/f2s/vrs/cinder.yml @@ -6,6 +6,7 @@ resources: values: env: '{{env}}' uid: '{{index}}' + puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} from: f2s/resources/fuel_pkgs location: '{{node}}' diff --git a/f2s/vrs/compute-vmware.yml b/f2s/vrs/compute-vmware.yml index 7a163e76..8f5ed761 100644 --- a/f2s/vrs/compute-vmware.yml +++ b/f2s/vrs/compute-vmware.yml @@ -6,6 +6,7 @@ resources: values: env: '{{env}}' uid: '{{index}}' + puppet_modules: '/etc/puppet/modules' - id: logging{{index}} from: f2s/resources/logging location: '{{node}}' diff --git a/f2s/vrs/ironic.yml b/f2s/vrs/ironic.yml index 85939851..cb8333ac 100644 --- a/f2s/vrs/ironic.yml +++ b/f2s/vrs/ironic.yml @@ -6,6 +6,7 @@ resources: values: env: '{{env}}' uid: '{{index}}' + puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} from: f2s/resources/fuel_pkgs location: '{{node}}' diff --git a/f2s/vrs/mongo.yml b/f2s/vrs/mongo.yml index af866e5c..a9e9bf50 100644 --- a/f2s/vrs/mongo.yml +++ b/f2s/vrs/mongo.yml @@ -6,6 +6,7 @@ resources: values: env: '{{env}}' uid: '{{index}}' + puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} from: f2s/resources/fuel_pkgs location: '{{node}}' diff --git a/f2s/vrs/primary-mongo.yml b/f2s/vrs/primary-mongo.yml index 0bc78a5a..cafae789 100644 --- a/f2s/vrs/primary-mongo.yml +++ b/f2s/vrs/primary-mongo.yml @@ -6,6 +6,7 @@ resources: values: env: '{{env}}' uid: '{{index}}' + puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} from: f2s/resources/fuel_pkgs location: '{{node}}' diff --git a/f2s/vrs/virt.yml b/f2s/vrs/virt.yml index 742eda39..a243212c 100644 --- a/f2s/vrs/virt.yml +++ b/f2s/vrs/virt.yml @@ -6,6 +6,7 @@ resources: values: env: '{{env}}' uid: '{{index}}' + puppet_modules: '/etc/puppet/modules' - id: ssl-keys-saving{{index}} from: f2s/resources/ssl-keys-saving location: '{{node}}' From 21bbbc897159949495dd9d86db74cc3749c3db7f Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 18 Nov 2015 10:04:54 +0200 Subject: [PATCH 40/51] Add session wrapper to solar_resource task --- solar/orchestration/tasks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/solar/orchestration/tasks.py b/solar/orchestration/tasks.py index c8414d09..f499897b 100644 --- a/solar/orchestration/tasks.py +++ b/solar/orchestration/tasks.py @@ -85,6 +85,7 @@ def end_solar_session(task_id, task, *args, **kwargs): @report_task(name='solar_resource') +@session def solar_resource(ctxt, resource_name, action): res = resource.load(resource_name) return actions.resource_action(res, action) From a9d1de432e4b5b1d422555a27b5f6f400186f824 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 18 Nov 2015 13:55:30 +0200 Subject: [PATCH 41/51] Add orch noop command to mark tasks as walked --- solar/cli/orch.py | 8 +++++++- solar/orchestration/graph.py | 12 ++++++++++++ solar/orchestration/utils.py | 3 ++- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/solar/cli/orch.py b/solar/cli/orch.py index 39220ed4..6abd0482 100755 --- a/solar/cli/orch.py +++ b/solar/cli/orch.py @@ -77,7 +77,8 @@ def click_report(uid): 'ERROR': 'red', 'SUCCESS': 'green', 'INPROGRESS': 'yellow', - 'SKIPPED': 'blue'} + 'SKIPPED': 'blue', + 'NOOP': 'black'} total = 0.0 report = graph.report_topo(uid) @@ -114,6 +115,11 @@ def filter(uid, start, end): utils.write_graph(plan) click.echo('Created {name}.png'.format(name=plan.graph['name'])) +@orchestration.command(help='Used to mark task as executed') +@click.argument('uid', type=SOLARUID) +@click.option('--task', '-t', multiple=True) +def noop(uid, task): + graph.set_states(uid, task) @orchestration.command(name='run-once') @click.argument('uid', type=SOLARUID, default='last') diff --git a/solar/orchestration/graph.py b/solar/orchestration/graph.py index 59435c30..a8006356 100644 --- a/solar/orchestration/graph.py +++ b/solar/orchestration/graph.py @@ -25,6 +25,10 @@ from solar import utils from solar.orchestration.traversal import states +from solar.dblayer.solar_models import Task +from solar.dblayer.model import clear_cache +from solar.dblayer.model import ModelMeta + def save_graph(graph): # maybe it is possible to store part of information in AsyncResult backend @@ -54,6 +58,14 @@ def update_graph(graph): task.errmsg = graph.node[n]['errmsg'] or '' task.save() +def set_states(uid, tasks): + plan = get_graph(uid) + for t in tasks: + if t not in plan.node: + raise Exception("No task %s in plan %s", t, uid) + plan.node[t]['task'].status = states.NOOP.name + plan.node[t]['task'].save_lazy() + ModelMeta.save_all_lazy() def get_graph(uid): dg = nx.MultiDiGraph() diff --git a/solar/orchestration/utils.py b/solar/orchestration/utils.py index c915da8c..045cebc3 100644 --- a/solar/orchestration/utils.py +++ b/solar/orchestration/utils.py @@ -27,7 +27,8 @@ def write_graph(plan): 'ERROR': 'red', 'SUCCESS': 'green', 'INPROGRESS': 'yellow', - 'SKIPPED': 'blue'} + 'SKIPPED': 'blue', + 'NOOP': 'black'} for n in plan: color = colors[plan.node[n]['status']] From d0c20cb5f2888e2f5f51ff47c470cb59b6363b5a Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 18 Nov 2015 14:33:02 +0200 Subject: [PATCH 42/51] Treat NOOP or SUCCESS as visited and ERROR as blocker --- solar/orchestration/executor.py | 10 ++++------ solar/orchestration/traversal.py | 9 +++++---- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/solar/orchestration/executor.py b/solar/orchestration/executor.py index a1cfeb3a..6174f128 100644 --- a/solar/orchestration/executor.py +++ b/solar/orchestration/executor.py @@ -29,12 +29,10 @@ def celery_executor(dg, tasks, control_tasks=()): task_id = '{}:{}'.format(dg.graph['uid'], task_name) task = app.tasks[dg.node[task_name]['type']] - all_ok = all_success(dg, dg.predecessors(task_name)) - if all_ok or task_name in control_tasks: - dg.node[task_name]['status'] = 'INPROGRESS' - dg.node[task_name]['start_time'] = time.time() - for t in generate_task(task, dg.node[task_name], task_id): - to_execute.append(t) + dg.node[task_name]['status'] = 'INPROGRESS' + dg.node[task_name]['start_time'] = time.time() + for t in generate_task(task, dg.node[task_name], task_id): + to_execute.append(t) return group(to_execute) diff --git a/solar/orchestration/traversal.py b/solar/orchestration/traversal.py index 606060b7..8c33f269 100644 --- a/solar/orchestration/traversal.py +++ b/solar/orchestration/traversal.py @@ -32,8 +32,8 @@ from enum import Enum states = Enum('States', 'SUCCESS ERROR NOOP INPROGRESS SKIPPED PENDING') -VISITED = (states.SUCCESS.name, states.ERROR.name, states.NOOP.name) -BLOCKED = (states.INPROGRESS.name, states.SKIPPED.name) +VISITED = (states.SUCCESS.name, states.NOOP.name) +BLOCKED = (states.INPROGRESS.name, states.SKIPPED.name, states.ERROR.name) def traverse(dg): @@ -43,7 +43,7 @@ def traverse(dg): data = dg.node[node] if data['status'] in VISITED: visited.add(node) - + rst = [] for node in dg: data = dg.node[node] @@ -51,4 +51,5 @@ def traverse(dg): continue if set(dg.predecessors(node)) <= visited: - yield node + rst.append(node) + return rst From c57937edb9e7e242693b800c75bbbbbd4312abdd Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Thu, 19 Nov 2015 10:15:08 +0200 Subject: [PATCH 43/51] Generate inputs for compute and ceph-osd --- f2s/f2s.py | 2 +- f2s/resources/ceilometer-compute/meta.yaml | 24 ++++ f2s/resources/ceph-compute/meta.yaml | 24 ++++ .../configure_default_route/meta.yaml | 4 + f2s/resources/heat/actions/run.pp | 15 ++- f2s/resources/keystone/actions/run.pp | 5 + f2s/resources/murano-rabbitmq/meta.yaml | 8 +- f2s/resources/murano/actions/run.pp | 4 +- f2s/resources/ntp-client/meta.yaml | 8 ++ .../openstack-controller/actions/run.pp | 11 +- .../actions/run.pp | 2 +- .../openstack-network-compute-nova/meta.yaml | 14 ++ f2s/resources/rabbitmq/actions/run.pp | 13 +- f2s/resources/swift-keystone/actions/run.pp | 10 +- f2s/resources/top-role-ceph-osd/meta.yaml | 34 +++++ f2s/resources/top-role-compute/meta.yaml | 120 ++++++++++++++++++ 16 files changed, 288 insertions(+), 10 deletions(-) diff --git a/f2s/f2s.py b/f2s/f2s.py index 4adc742e..2b57bda7 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -24,7 +24,7 @@ ensure_dir(RESOURCE_TMP_WORKDIR) RESOURCE_DIR = os.path.join(CURDIR, 'resources') VR_TMP_DIR = os.path.join(CURDIR, 'tmp/vrs') ensure_dir(VR_TMP_DIR) -INPUTS_LOCATION = "/root/latest/" +INPUTS_LOCATION = "/root/current/" DEPLOYMENT_GROUP_PATH = os.path.join(LIBRARY_PATH, 'deployment', 'puppet', 'deployment_groups', 'tasks.yaml') diff --git a/f2s/resources/ceilometer-compute/meta.yaml b/f2s/resources/ceilometer-compute/meta.yaml index d38534aa..512a5276 100644 --- a/f2s/resources/ceilometer-compute/meta.yaml +++ b/f2s/resources/ceilometer-compute/meta.yaml @@ -5,9 +5,33 @@ actions: run: run.pp update: run.pp input: + amqp_hosts: + value: null + ceilometer: + value: null + ceilometer_hash: + value: null + debug: + value: null fqdn: value: null + management_vip: + value: null puppet_modules: value: null + rabbit_hash: + value: null + region: + value: null role: value: null + service_endpoint: + value: null + syslog_log_facility_ceilometer: + value: null + use_stderr: + value: null + use_syslog: + value: null + verbose: + value: null diff --git a/f2s/resources/ceph-compute/meta.yaml b/f2s/resources/ceph-compute/meta.yaml index b439d846..31907d00 100644 --- a/f2s/resources/ceph-compute/meta.yaml +++ b/f2s/resources/ceph-compute/meta.yaml @@ -5,9 +5,33 @@ actions: run: run.pp update: run.pp input: + ceph_monitor_nodes: + value: null + ceph_primary_monitor_node: + value: null fqdn: value: null + keystone_hash: + value: null + management_vip: + value: null + network_scheme: + value: null + public_vip: + value: null puppet_modules: value: null role: value: null + storage: + value: null + storage_hash: + value: null + syslog_log_facility_ceph: + value: null + syslog_log_level_ceph: + value: null + use_neutron: + value: null + use_syslog: + value: null diff --git a/f2s/resources/configure_default_route/meta.yaml b/f2s/resources/configure_default_route/meta.yaml index 5e4bc30f..dfc00382 100644 --- a/f2s/resources/configure_default_route/meta.yaml +++ b/f2s/resources/configure_default_route/meta.yaml @@ -7,6 +7,10 @@ actions: input: fqdn: value: null + management_vrouter_vip: + value: null + network_scheme: + value: null puppet_modules: value: null role: diff --git a/f2s/resources/heat/actions/run.pp b/f2s/resources/heat/actions/run.pp index 528e2eb3..5ed5e9e3 100644 --- a/f2s/resources/heat/actions/run.pp +++ b/f2s/resources/heat/actions/run.pp @@ -26,6 +26,18 @@ $read_timeout = '60' $sql_connection = "mysql://${database_user}:${database_password}@${db_host}/${database_name}?read_timeout=${read_timeout}" $region = hiera('region', 'RegionOne') $public_ssl_hash = hiera('public_ssl') +$public_ip = hiera('public_vip') +$public_protocol = pick($public_ssl_hash['services'], false) ? { + true => 'https', + default => 'http', +} + +$public_address = pick($public_ssl_hash['services'], false) ? { + true => pick($public_ssl_hash['hostname']), + default => $public_ip, +} +$auth_uri = "${public_protocol}://${public_address}:5000/v2.0/" +$identity_uri = "http://${service_endpoint}:35357/" ####### Disable upstart startup on install ####### if $::operatingsystem == 'Ubuntu' { @@ -54,7 +66,8 @@ class { 'openstack::heat' : api_bind_host => $bind_address, api_cfn_bind_host => $bind_address, api_cloudwatch_bind_host => $bind_address, - keystone_host => $service_endpoint, + auth_uri => $auth_uri, + identity_uri => $identity_uri, keystone_user => $keystone_user, keystone_password => $heat_hash['user_password'], keystone_tenant => $keystone_tenant, diff --git a/f2s/resources/keystone/actions/run.pp b/f2s/resources/keystone/actions/run.pp index 839e5c6a..4582ec0c 100644 --- a/f2s/resources/keystone/actions/run.pp +++ b/f2s/resources/keystone/actions/run.pp @@ -46,6 +46,10 @@ $public_address = $public_ssl_hash['services'] ? { true => $public_ssl_hash['hostname'], default => $public_service_endpoint, } +$public_cert = $public_ssl_hash['services']? { + true => '/etc/pki/tls/certs/public_haproxy.pem', + default => undef, +} $admin_address = $service_endpoint $local_address_for_bind = get_network_role_property('keystone/api', 'ipaddr') @@ -179,6 +183,7 @@ class { 'openstack::auth_file': region_name => $region, controller_node => $service_endpoint, murano_repo_url => $murano_repo_url, + cacert => $public_cert } # Get paste.ini source diff --git a/f2s/resources/murano-rabbitmq/meta.yaml b/f2s/resources/murano-rabbitmq/meta.yaml index d8c17ef1..09230cf3 100644 --- a/f2s/resources/murano-rabbitmq/meta.yaml +++ b/f2s/resources/murano-rabbitmq/meta.yaml @@ -4,4 +4,10 @@ version: '8.0' actions: run: run.pp update: run.pp -input: {} +input: + fqdn: + value: null + puppet_modules: + value: null + role: + value: null diff --git a/f2s/resources/murano/actions/run.pp b/f2s/resources/murano/actions/run.pp index cb163e90..b8ad753e 100644 --- a/f2s/resources/murano/actions/run.pp +++ b/f2s/resources/murano/actions/run.pp @@ -90,9 +90,9 @@ if $murano_hash['enabled'] { rabbit_os_port => $amqp_port, rabbit_os_host => split($amqp_hosts, ','), rabbit_ha_queues => $rabbit_ha_queues, - rabbit_own_host => $public_ip, + rabbit_own_host => $management_ip, rabbit_own_port => $amqp_port, - rabbit_own_vhost => 'murano', + rabbit_own_vhost => '/murano', rabbit_own_user => $rabbit_hash['user'], rabbit_own_password => $rabbit_hash['password'], service_host => $api_bind_host, diff --git a/f2s/resources/ntp-client/meta.yaml b/f2s/resources/ntp-client/meta.yaml index cf874f6a..20f6ae83 100644 --- a/f2s/resources/ntp-client/meta.yaml +++ b/f2s/resources/ntp-client/meta.yaml @@ -7,7 +7,15 @@ actions: input: fqdn: value: null + management_vrouter_vip: + value: null + nodes: + value: null + ntp_servers: + value: null puppet_modules: value: null role: value: null + uid: + value: null diff --git a/f2s/resources/openstack-controller/actions/run.pp b/f2s/resources/openstack-controller/actions/run.pp index 9406d362..583e2568 100644 --- a/f2s/resources/openstack-controller/actions/run.pp +++ b/f2s/resources/openstack-controller/actions/run.pp @@ -170,7 +170,16 @@ if $primary_controller { ], command => 'bash -c "nova flavor-create --is-public true m1.micro auto 64 0 1"', #FIXME(mattymo): Upstream bug PUP-2299 for retries in unless/onlyif - unless => 'bash -c "for tries in {1..10}; do nova flavor-list | grep -q m1.micro && exit 0; sleep 2; done"; exit 1', + # Retry nova-flavor list until it exits 0, then exit with grep status, + # finally exit 1 if tries exceeded + # lint:ignore:single_quote_string_with_variables + unless => 'bash -c \'for tries in {1..10}; do + nova flavor-list | grep m1.micro; + status=("${PIPESTATUS[@]}"); + (( ! status[0] )) && exit "${status[1]}"; + sleep 2; + done; exit 1\'', + # lint:endignore tries => 10, try_sleep => 2, require => Class['nova'], diff --git a/f2s/resources/openstack-network-compute-nova/actions/run.pp b/f2s/resources/openstack-network-compute-nova/actions/run.pp index 3fdd4b33..69c2cfd0 100644 --- a/f2s/resources/openstack-network-compute-nova/actions/run.pp +++ b/f2s/resources/openstack-network-compute-nova/actions/run.pp @@ -102,7 +102,7 @@ if $use_neutron { # Neutron L2 agent. # The reason is described here https://bugs.launchpad.net/fuel/+bug/1477475 exec { 'wait-for-int-br': - command => "ovs-vsctl br-exists $neutron_integration_bridge", + command => "ovs-vsctl br-exists ${neutron_integration_bridge}", path => [ '/sbin', '/bin', '/usr/bin', '/usr/sbin' ], try_sleep => 6, tries => 10, diff --git a/f2s/resources/openstack-network-compute-nova/meta.yaml b/f2s/resources/openstack-network-compute-nova/meta.yaml index 406e55dc..a1e8fb20 100644 --- a/f2s/resources/openstack-network-compute-nova/meta.yaml +++ b/f2s/resources/openstack-network-compute-nova/meta.yaml @@ -7,7 +7,21 @@ actions: input: fqdn: value: null + management_vip: + value: null + neutron_config: + value: null + neutron_endpoint: + value: null + nova: + value: null puppet_modules: value: null + region: + value: null role: value: null + service_endpoint: + value: null + use_neutron: + value: null diff --git a/f2s/resources/rabbitmq/actions/run.pp b/f2s/resources/rabbitmq/actions/run.pp index ed980929..4276965d 100644 --- a/f2s/resources/rabbitmq/actions/run.pp +++ b/f2s/resources/rabbitmq/actions/run.pp @@ -46,6 +46,14 @@ if $queue_provider == 'rabbitmq' { $cluster_partition_handling = hiera('rabbit_cluster_partition_handling', 'autoheal') $mnesia_table_loading_timeout = hiera('mnesia_table_loading_timeout', '10000') $rabbitmq_bind_ip_address = pick(get_network_role_property('mgmt/messaging', 'ipaddr'), 'UNSET') + $management_bind_ip_address = hiera('management_bind_ip_address', '127.0.0.1') + + # NOTE(mattymo) UNSET is a puppet ref, but would break real configs + if $rabbitmq_bind_ip_address == 'UNSET' { + $epmd_bind_ip_address = '0.0.0.0' + } else { + $epmd_bind_ip_address = $rabbitmq_bind_ip_address + } # NOTE(bogdando) not a hash. Keep an indentation as is $rabbit_tcp_listen_options = hiera('rabbit_tcp_listen_options', @@ -81,7 +89,8 @@ if $queue_provider == 'rabbitmq' { ) $config_rabbitmq_management_variables = hiera('rabbit_config_management_variables', { - 'rates_mode' => 'none' + 'rates_mode' => 'none', + 'listener' => "[{port, 15672}, {ip,\"${management_bind_ip_address}\"}]", } ) @@ -95,6 +104,7 @@ if $queue_provider == 'rabbitmq' { $environment_variables = hiera('rabbit_environment_variables', { 'SERVER_ERL_ARGS' => "\"+K true +A${thread_pool_calc} +P 1048576\"", + 'ERL_EPMD_ADDRESS' => $epmd_bind_ip_address, 'PID_FILE' => $rabbit_pid_file, } ) @@ -149,6 +159,7 @@ if $queue_provider == 'rabbitmq' { erlang_cookie => $erlang_cookie, admin_user => $rabbit_hash['user'], admin_pass => $rabbit_hash['password'], + host_ip => $rabbitmq_bind_ip_address, before => Class['nova::rabbitmq'], } } diff --git a/f2s/resources/swift-keystone/actions/run.pp b/f2s/resources/swift-keystone/actions/run.pp index 6e7e5770..325a83b1 100644 --- a/f2s/resources/swift-keystone/actions/run.pp +++ b/f2s/resources/swift-keystone/actions/run.pp @@ -2,11 +2,17 @@ notice('MODULAR: swift/keystone.pp') $swift_hash = hiera_hash('swift', {}) $public_vip = hiera('public_vip') -$admin_address = hiera('management_vip') +# Allow a plugin to override the admin address using swift_hash: +$admin_address = pick($swift_hash['management_vip'], hiera('management_vip')) $region = pick($swift_hash['region'], hiera('region', 'RegionOne')) $public_ssl_hash = hiera('public_ssl') $public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], + # Allow a plugin to override the public address using swift_hash: + # TODO(sbog): with this approach you must use IP address in SAN field of + # certificate on external swift. Change this in next iterations of TLS + # implementation. + true => pick($swift_hash['public_vip'], + $public_ssl_hash['hostname']), default => $public_vip, } $public_protocol = $public_ssl_hash['services'] ? { diff --git a/f2s/resources/top-role-ceph-osd/meta.yaml b/f2s/resources/top-role-ceph-osd/meta.yaml index 206f65e2..492120ed 100644 --- a/f2s/resources/top-role-ceph-osd/meta.yaml +++ b/f2s/resources/top-role-ceph-osd/meta.yaml @@ -5,9 +5,43 @@ actions: run: run.pp update: run.pp input: + access: + value: null + auto_assign_floating_ip: + value: null + ceph_monitor_nodes: + value: null + ceph_primary_monitor_node: + value: null + debug: + value: null fqdn: value: null + keystone: + value: null + management_vip: + value: null + mp: + value: null + network_scheme: + value: null + neutron_mellanox: + value: null + public_vip: + value: null puppet_modules: value: null role: value: null + storage: + value: null + syslog: + value: null + syslog_log_facility_ceph: + value: null + syslog_log_level_ceph: + value: null + use_neutron: + value: null + use_syslog: + value: null diff --git a/f2s/resources/top-role-compute/meta.yaml b/f2s/resources/top-role-compute/meta.yaml index 093ebd24..132b1de9 100644 --- a/f2s/resources/top-role-compute/meta.yaml +++ b/f2s/resources/top-role-compute/meta.yaml @@ -5,9 +5,129 @@ actions: run: run.pp update: run.pp input: + access: + value: null + amqp_hosts: + value: null + auto_assign_floating_ip: + value: null + base_syslog: + value: null + block_device_allocate_retries: + value: null + block_device_allocate_retries_interval: + value: null + ceilometer_hash: + value: null + cinder_hash: + value: null + compute: + value: null + database_vip: + value: null + debug: + value: null fqdn: value: null + glance_api_servers: + value: null + glance_hash: + value: null + keystone_hash: + value: null + libvirt_type: + value: null + management_vip: + value: null + memcache_roles: + value: null + mp: + value: null + murano: + value: null + network_config: + value: null + network_manager: + value: null + network_metadata: + value: null + network_scheme: + value: null + neutron_mellanox: + value: null + node_name: + value: null + nodes: + value: null + nova_custom_hash: + value: null + nova_hash: + value: null + nova_rate_limits: + value: null + nova_report_interval: + value: null + nova_service_down_time: + value: null + openstack_version: + value: null + primary_controller: + value: null + public_int: + value: null + public_ssl: + value: null + public_vip: + value: null puppet_modules: value: null + quantum_settings: + value: null + queue_provider: + value: null + rabbit_hash: + value: null + resume_guests_state_on_host_boot: + value: null role: value: null + sahara: + value: null + service_endpoint: + value: null + storage_hash: + value: null + swift_hash: + value: null + swift_master_role: + value: null + swift_proxies: + value: null + syslog: + value: null + syslog_log_facility_cinder: + value: null + syslog_log_facility_glance: + value: null + syslog_log_facility_keystone: + value: null + syslog_log_facility_murano: + value: null + syslog_log_facility_neutron: + value: null + syslog_log_facility_nova: + value: null + syslog_log_facility_sahara: + value: null + use_cow_images: + value: null + use_neutron: + value: null + use_stderr: + value: null + use_syslog: + value: null + use_vcenter: + value: null + vcenter: + value: null From cac1faf9e5b141e5297b7f1eeba600ad0165d6bd Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 23 Nov 2015 12:27:05 +0200 Subject: [PATCH 44/51] Filter null values for hiera --- solar/core/handlers/puppet.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/solar/core/handlers/puppet.py b/solar/core/handlers/puppet.py index bff82cbb..c52f88b5 100644 --- a/solar/core/handlers/puppet.py +++ b/solar/core/handlers/puppet.py @@ -82,4 +82,5 @@ class Puppet(TempFileHandler): class PuppetV2(Puppet): def _make_args(self, resource): - return resource.args + args = resource.args + return {k: args[k] for k in args if not args[k] is None} From a1b54e55cd5900b1897a8663a3a7e54e872d7e08 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 23 Nov 2015 12:57:28 +0200 Subject: [PATCH 45/51] Take into account skipped (anchor-like) tasks --- f2s/f2s.py | 32 ++-- f2s/resources/openstack-network-end/meta.yaml | 4 + .../openstack-network-start/meta.yaml | 4 + f2s/vrs/base-os.yml | 1 + f2s/vrs/ceph-osd.yml | 1 + f2s/vrs/cinder-vmware.yml | 1 + f2s/vrs/cinder.yml | 1 + f2s/vrs/compute-vmware.yml | 1 + f2s/vrs/compute.yml | 130 +++++++++++++++- f2s/vrs/controller.yml | 136 ++++++++++++++++- f2s/vrs/ironic.yml | 135 +++++++++++++++++ f2s/vrs/mongo.yml | 1 + f2s/vrs/primary-controller.yml | 143 ++++++++++++++++-- f2s/vrs/primary-mongo.yml | 1 + f2s/vrs/virt.yml | 1 + 15 files changed, 572 insertions(+), 20 deletions(-) create mode 100644 f2s/resources/openstack-network-end/meta.yaml create mode 100644 f2s/resources/openstack-network-start/meta.yaml diff --git a/f2s/f2s.py b/f2s/f2s.py index 2b57bda7..a21814e0 100755 --- a/f2s/f2s.py +++ b/f2s/f2s.py @@ -28,6 +28,8 @@ INPUTS_LOCATION = "/root/current/" DEPLOYMENT_GROUP_PATH = os.path.join(LIBRARY_PATH, 'deployment', 'puppet', 'deployment_groups', 'tasks.yaml') +VALID_TASKS = ('puppet', 'skipped') + def clean_resources(): shutil.rmtree(RESOURCE_TMP_WORKDIR) ensure_dir(RESOURCE_TMP_WORKDIR) @@ -74,6 +76,8 @@ class Task(object): @property def manifest(self): + if self.data['type'] != 'puppet': + return None after_naily = self.data['parameters']['puppet_manifest'].split('osnailyfacter/')[-1] return os.path.join( LIBRARY_PATH, 'deployment', 'puppet', 'osnailyfacter', @@ -99,19 +103,29 @@ class Task(object): return os.path.join(self.dst_path, 'meta.yaml') def meta(self): - data = OrderedDict([('id', self.name), - ('handler', 'puppetv2'), + if self.data['type'] == 'skipped': + data = OrderedDict([('id', self.name), + ('handler', 'none'), ('version', '8.0'), - ('actions', { - 'run': 'run.pp', - 'update': 'run.pp'}), - ('input', self.inputs()),]) + ('inputs', {})]) + elif self.data['type'] == 'puppet': + data = OrderedDict([('id', self.name), + ('handler', 'puppetv2'), + ('version', '8.0'), + ('actions', { + 'run': 'run.pp', + 'update': 'run.pp'}), + ('input', self.inputs()),]) + else: + raise NotImplemented('Support for %s' % self.data['type']) return ordered_dump(data, default_flow_style=False) @property def actions(self): """yield an iterable of src/dst """ + if self.manifest is None: + return yield self.manifest, os.path.join(self.actions_path, 'run.pp') def inputs(self): @@ -290,7 +304,7 @@ def t2r(tasks, t, p, c): clean_resources() for task in get_tasks(): - if task.type != 'puppet': + if not task.type in VALID_TASKS: continue if task.name in tasks or tasks == (): @@ -320,7 +334,7 @@ def g2vr(groups, c): inner_preds = [] outer_preds = [] for p in dg.predecessors(t): - if dg.node[p]['t'].type != 'puppet': + if not dg.node[p]['t'].type in VALID_TASKS: continue if p in dsub: @@ -328,7 +342,7 @@ def g2vr(groups, c): else: outer_preds.append(p) - if dg.node[t]['t'].type == 'puppet': + if dg.node[t]['t'].type in VALID_TASKS: ordered.append((dg.node[t]['t'], inner_preds, outer_preds)) obj = DGroup(group, ordered) diff --git a/f2s/resources/openstack-network-end/meta.yaml b/f2s/resources/openstack-network-end/meta.yaml new file mode 100644 index 00000000..3fd30ae7 --- /dev/null +++ b/f2s/resources/openstack-network-end/meta.yaml @@ -0,0 +1,4 @@ +id: openstack-network-end +handler: none +version: '8.0' +inputs: {} diff --git a/f2s/resources/openstack-network-start/meta.yaml b/f2s/resources/openstack-network-start/meta.yaml new file mode 100644 index 00000000..2f533df8 --- /dev/null +++ b/f2s/resources/openstack-network-start/meta.yaml @@ -0,0 +1,4 @@ +id: openstack-network-start +handler: none +version: '8.0' +inputs: {} diff --git a/f2s/vrs/base-os.yml b/f2s/vrs/base-os.yml index 329c85e2..4157dbde 100644 --- a/f2s/vrs/base-os.yml +++ b/f2s/vrs/base-os.yml @@ -5,6 +5,7 @@ resources: location: '{{node}}' values: env: '{{env}}' + puppet_modules: /etc/puppet/modules uid: '{{index}}' - id: logging{{index}} from: f2s/resources/logging diff --git a/f2s/vrs/ceph-osd.yml b/f2s/vrs/ceph-osd.yml index 91613111..5be3a673 100644 --- a/f2s/vrs/ceph-osd.yml +++ b/f2s/vrs/ceph-osd.yml @@ -5,6 +5,7 @@ resources: location: '{{node}}' values: env: '{{env}}' + puppet_modules: /etc/puppet/modules uid: '{{index}}' puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} diff --git a/f2s/vrs/cinder-vmware.yml b/f2s/vrs/cinder-vmware.yml index 81f1843a..778d861a 100644 --- a/f2s/vrs/cinder-vmware.yml +++ b/f2s/vrs/cinder-vmware.yml @@ -5,6 +5,7 @@ resources: location: '{{node}}' values: env: '{{env}}' + puppet_modules: /etc/puppet/modules uid: '{{index}}' puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} diff --git a/f2s/vrs/cinder.yml b/f2s/vrs/cinder.yml index 78a535c0..d04ede6f 100644 --- a/f2s/vrs/cinder.yml +++ b/f2s/vrs/cinder.yml @@ -5,6 +5,7 @@ resources: location: '{{node}}' values: env: '{{env}}' + puppet_modules: /etc/puppet/modules uid: '{{index}}' puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} diff --git a/f2s/vrs/compute-vmware.yml b/f2s/vrs/compute-vmware.yml index 8f5ed761..68cc5c9e 100644 --- a/f2s/vrs/compute-vmware.yml +++ b/f2s/vrs/compute-vmware.yml @@ -5,6 +5,7 @@ resources: location: '{{node}}' values: env: '{{env}}' + puppet_modules: /etc/puppet/modules uid: '{{index}}' puppet_modules: '/etc/puppet/modules' - id: logging{{index}} diff --git a/f2s/vrs/compute.yml b/f2s/vrs/compute.yml index 4bece7d2..4f69fab5 100644 --- a/f2s/vrs/compute.yml +++ b/f2s/vrs/compute.yml @@ -5,8 +5,8 @@ resources: location: '{{node}}' values: env: '{{env}}' + puppet_modules: /etc/puppet/modules uid: '{{index}}' - puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} from: f2s/resources/fuel_pkgs location: '{{node}}' @@ -51,6 +51,10 @@ resources: from: f2s/resources/ceilometer-compute location: '{{node}}' values_from: role_data{{index}} +- id: openstack-network-start{{index}} + from: f2s/resources/openstack-network-start + location: '{{node}}' + values_from: role_data{{index}} - id: openstack-network-common-config{{index}} from: f2s/resources/openstack-network-common-config location: '{{node}}' @@ -67,6 +71,10 @@ resources: from: f2s/resources/openstack-network-compute-nova location: '{{node}}' values_from: role_data{{index}} +- id: openstack-network-end{{index}} + from: f2s/resources/openstack-network-end + location: '{{node}}' + values_from: role_data{{index}} events: - type: depends_on state: success @@ -178,6 +186,50 @@ events: - type: depends_on state: success parent_action: role_data{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: top-role-compute{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=ironic-compute + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=neutron-keystone + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=neutron-db + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-controller + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-common-config{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-start{{index}}.run depend_action: openstack-network-common-config{{index}}.run - type: depends_on state: success @@ -228,3 +280,79 @@ events: state: success parent_action: openstack-network-common-config{{index}}.run depend_action: openstack-network-compute-nova{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-l3{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-compute-nova{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-plugins-l2{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=neutron-keystone + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-server-config + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=neutron-db + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-networks + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-routers + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-server-nova + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-agents-metadata + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-agents-dhcp + depend_action: openstack-network-end{{index}}.run diff --git a/f2s/vrs/controller.yml b/f2s/vrs/controller.yml index 4cdc1496..9e51f549 100644 --- a/f2s/vrs/controller.yml +++ b/f2s/vrs/controller.yml @@ -5,8 +5,8 @@ resources: location: '{{node}}' values: env: '{{env}}' + puppet_modules: /etc/puppet/modules uid: '{{index}}' - puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} from: f2s/resources/fuel_pkgs location: '{{node}}' @@ -183,6 +183,10 @@ resources: from: f2s/resources/openstack-controller location: '{{node}}' values_from: role_data{{index}} +- id: openstack-network-start{{index}} + from: f2s/resources/openstack-network-start + location: '{{node}}' + values_from: role_data{{index}} - id: openstack-network-common-config{{index}} from: f2s/resources/openstack-network-common-config location: '{{node}}' @@ -211,6 +215,10 @@ resources: from: f2s/resources/openstack-network-agents-metadata location: '{{node}}' values_from: role_data{{index}} +- id: openstack-network-end{{index}} + from: f2s/resources/openstack-network-end + location: '{{node}}' + values_from: role_data{{index}} - id: heat{{index}} from: f2s/resources/heat location: '{{node}}' @@ -817,6 +825,50 @@ events: - type: depends_on state: success parent_action: role_data{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: openstack-controller{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=ironic-compute + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=neutron-keystone + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=neutron-db + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=top-role-compute + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-common-config{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-start{{index}}.run depend_action: openstack-network-common-config{{index}}.run - type: depends_on state: success @@ -904,6 +956,73 @@ events: state: success parent_action: openstack-network-common-config{{index}}.run depend_action: openstack-network-agents-metadata{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-server-config{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-metadata{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-server-nova{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-l3{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-plugins-l2{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-dhcp{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=neutron-keystone + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-networks + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=neutron-db + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-compute-nova + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-routers + depend_action: openstack-network-end{{index}}.run - type: depends_on state: success parent_action: role_data{{index}}.run @@ -946,6 +1065,17 @@ events: state: success parent_action: horizon{{index}}.run depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: rabbitmq{{index}}.run + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=murano-rabbitmq + depend_action: murano{{index}}.run - type: depends_on state: success parent: @@ -964,6 +1094,10 @@ events: state: success parent_action: role_data{{index}}.run depend_action: sahara{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-end{{index}}.run + depend_action: sahara{{index}}.run - type: depends_on state: success parent_action: horizon{{index}}.run diff --git a/f2s/vrs/ironic.yml b/f2s/vrs/ironic.yml index cb8333ac..cd7d8bfb 100644 --- a/f2s/vrs/ironic.yml +++ b/f2s/vrs/ironic.yml @@ -5,6 +5,7 @@ resources: location: '{{node}}' values: env: '{{env}}' + puppet_modules: /etc/puppet/modules uid: '{{index}}' puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} @@ -43,6 +44,10 @@ resources: from: f2s/resources/ironic-compute location: '{{node}}' values_from: role_data{{index}} +- id: openstack-network-start{{index}} + from: f2s/resources/openstack-network-start + location: '{{node}}' + values_from: role_data{{index}} - id: openstack-network-common-config{{index}} from: f2s/resources/openstack-network-common-config location: '{{node}}' @@ -51,6 +56,10 @@ resources: from: f2s/resources/openstack-network-plugins-l2 location: '{{node}}' values_from: role_data{{index}} +- id: openstack-network-end{{index}} + from: f2s/resources/openstack-network-end + location: '{{node}}' + values_from: role_data{{index}} events: - type: depends_on state: success @@ -142,6 +151,50 @@ events: - type: depends_on state: success parent_action: role_data{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: ironic-compute{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=neutron-keystone + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=neutron-db + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=top-role-compute + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-controller + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-common-config{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-start{{index}}.run depend_action: openstack-network-common-config{{index}}.run - type: depends_on state: success @@ -158,3 +211,85 @@ events: with_tags: - resource=openstack-network-server-config depend_action: openstack-network-plugins-l2{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-plugins-l2{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=neutron-keystone + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-compute-nova + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-server-config + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=neutron-db + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-networks + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-routers + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-server-nova + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-agents-l3 + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-agents-metadata + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-agents-dhcp + depend_action: openstack-network-end{{index}}.run diff --git a/f2s/vrs/mongo.yml b/f2s/vrs/mongo.yml index a9e9bf50..92274851 100644 --- a/f2s/vrs/mongo.yml +++ b/f2s/vrs/mongo.yml @@ -5,6 +5,7 @@ resources: location: '{{node}}' values: env: '{{env}}' + puppet_modules: /etc/puppet/modules uid: '{{index}}' puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} diff --git a/f2s/vrs/primary-controller.yml b/f2s/vrs/primary-controller.yml index cb373d4a..43cf3e6b 100644 --- a/f2s/vrs/primary-controller.yml +++ b/f2s/vrs/primary-controller.yml @@ -5,8 +5,8 @@ resources: location: '{{node}}' values: env: '{{env}}' + puppet_modules: /etc/puppet/modules uid: '{{index}}' - puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} from: f2s/resources/fuel_pkgs location: '{{node}}' @@ -183,6 +183,10 @@ resources: from: f2s/resources/rabbitmq location: '{{node}}' values_from: role_data{{index}} +- id: murano-rabbitmq{{index}} + from: f2s/resources/murano-rabbitmq + location: '{{node}}' + values_from: role_data{{index}} - id: apache{{index}} from: f2s/resources/apache location: '{{node}}' @@ -283,6 +287,10 @@ resources: from: f2s/resources/swift-rebalance-cron location: '{{node}}' values_from: role_data{{index}} +- id: openstack-network-start{{index}} + from: f2s/resources/openstack-network-start + location: '{{node}}' + values_from: role_data{{index}} - id: openstack-network-common-config{{index}} from: f2s/resources/openstack-network-common-config location: '{{node}}' @@ -319,6 +327,10 @@ resources: from: f2s/resources/openstack-network-agents-metadata location: '{{node}}' values_from: role_data{{index}} +- id: openstack-network-end{{index}} + from: f2s/resources/openstack-network-end + location: '{{node}}' + values_from: role_data{{index}} - id: horizon{{index}} from: f2s/resources/horizon location: '{{node}}' @@ -768,6 +780,14 @@ events: state: success parent_action: openstack-haproxy{{index}}.run depend_action: rabbitmq{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: murano-rabbitmq{{index}}.run +- type: depends_on + state: success + parent_action: rabbitmq{{index}}.run + depend_action: murano-rabbitmq{{index}}.run - type: depends_on state: success parent_action: role_data{{index}}.run @@ -1095,6 +1115,44 @@ events: - type: depends_on state: success parent_action: role_data{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: neutron-keystone{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: netconfig{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: neutron-db{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: openstack-controller{{index}}.run + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=ironic-compute + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=top-role-compute + depend_action: openstack-network-start{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-common-config{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-start{{index}}.run depend_action: openstack-network-common-config{{index}}.run - type: depends_on state: success @@ -1192,6 +1250,61 @@ events: state: success parent_action: openstack-network-common-config{{index}}.run depend_action: openstack-network-agents-metadata{{index}}.run +- type: depends_on + state: success + parent_action: role_data{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: neutron-keystone{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-common-config{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-server-config{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: neutron-db{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-networks{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-routers{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-server-nova{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-plugins-l2{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-l3{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-metadata{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-agents-dhcp{{index}}.run + depend_action: openstack-network-end{{index}}.run +- type: depends_on + state: success + parent: + action: run + with_tags: + - resource=openstack-network-compute-nova + depend_action: openstack-network-end{{index}}.run - type: depends_on state: success parent_action: role_data{{index}}.run @@ -1204,14 +1317,6 @@ events: state: success parent_action: role_data{{index}}.run depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: horizon{{index}}.run - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: murano-keystone{{index}}.run - depend_action: murano{{index}}.run - type: depends_on state: success parent_action: heat{{index}}.run @@ -1220,6 +1325,22 @@ events: state: success parent_action: murano-db{{index}}.run depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: murano-rabbitmq{{index}}.run + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: murano-keystone{{index}}.run + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: rabbitmq{{index}}.run + depend_action: murano{{index}}.run +- type: depends_on + state: success + parent_action: horizon{{index}}.run + depend_action: murano{{index}}.run - type: depends_on state: success parent_action: role_data{{index}}.run @@ -1228,6 +1349,10 @@ events: state: success parent_action: sahara-keystone{{index}}.run depend_action: sahara{{index}}.run +- type: depends_on + state: success + parent_action: openstack-network-end{{index}}.run + depend_action: sahara{{index}}.run - type: depends_on state: success parent_action: sahara-db{{index}}.run diff --git a/f2s/vrs/primary-mongo.yml b/f2s/vrs/primary-mongo.yml index cafae789..78d78a3a 100644 --- a/f2s/vrs/primary-mongo.yml +++ b/f2s/vrs/primary-mongo.yml @@ -5,6 +5,7 @@ resources: location: '{{node}}' values: env: '{{env}}' + puppet_modules: /etc/puppet/modules uid: '{{index}}' puppet_modules: '/etc/puppet/modules' - id: fuel_pkgs{{index}} diff --git a/f2s/vrs/virt.yml b/f2s/vrs/virt.yml index a243212c..f23090c2 100644 --- a/f2s/vrs/virt.yml +++ b/f2s/vrs/virt.yml @@ -5,6 +5,7 @@ resources: location: '{{node}}' values: env: '{{env}}' + puppet_modules: /etc/puppet/modules uid: '{{index}}' puppet_modules: '/etc/puppet/modules' - id: ssl-keys-saving{{index}} From 2ba1e08279efb87930042364a7c7a0acb16cf84c Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 23 Nov 2015 13:34:52 +0200 Subject: [PATCH 46/51] Add environment variable that will enable debug source in fsclient --- f2s/fsclient.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/f2s/fsclient.py b/f2s/fsclient.py index db785b14..afbcc42b 100755 --- a/f2s/fsclient.py +++ b/f2s/fsclient.py @@ -1,5 +1,6 @@ #!/usr/bin/env python +import os import click from solar.core.resource import virtual_resource as vr @@ -39,7 +40,10 @@ class DumbSource(object): def master(self): return 'master', '0.0.0.0' -source = NailgunSource() +if os.environ.get('DEBUG_FSCLIENT'): + source = DumbSource() +else: + source = NailgunSource() @main.command() @click.argument('uids', nargs=-1) From 3d1efa58a5da2b56d19f377f6d7ea6823605dd96 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 24 Nov 2015 12:38:25 +0200 Subject: [PATCH 47/51] Assign tasks based on roles generated by deployment info --- f2s/fsclient.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/f2s/fsclient.py b/f2s/fsclient.py index afbcc42b..28b8bf66 100755 --- a/f2s/fsclient.py +++ b/f2s/fsclient.py @@ -22,8 +22,11 @@ class NailgunSource(object): def roles(self, uid): from fuelclient.objects.node import Node + from fuelclient.objects.environment import Environment node = Node(uid) - return node.data['roles'] + node.data['pending_roles'] + env = Environment(node.data['cluster']) + facts = env.get_default_facts('deployment', [uid]) + return [f['role'] for f in facts] def master(self): return 'master', '10.20.0.2' From 2ddb9b75f9c68133eedb07a415022795ced500f0 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Thu, 26 Nov 2015 11:53:11 +0200 Subject: [PATCH 48/51] Fix inconsistencies after rebase --- docker-compose.yml | 8 +-- solar/{solar => }/core/handlers/naive_sync.py | 0 solar/orchestration/tasks.py | 18 ------ solar/solar/config.py | 63 ------------------- solar/solar/dblayer/__init__.py | 9 --- 5 files changed, 1 insertion(+), 97 deletions(-) rename solar/{solar => }/core/handlers/naive_sync.py (100%) delete mode 100644 solar/solar/config.py delete mode 100644 solar/solar/dblayer/__init__.py diff --git a/docker-compose.yml b/docker-compose.yml index 81be799f..f5ca2933 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,5 +1,5 @@ solar: - image: solarproject/solar-celery:f2s + image: solarproject/solar-celery # path inside of the container should be exactly the same as outside # because solar uses absolute path to find resoruce actions files volumes: @@ -19,12 +19,6 @@ solar: - riak - redis -# docker run --name solar -d -v /root/solar/solar:/solar -v /root/solar/solard:/solard -v /root/solar/templates:/templates \ -# -v /root/solar/resources:/resources -v /root/solar/f2s:/f2s \ -# -v /var/lib/fuel:/var/lib/fuel -v /root/.config/fuel/fuel_client.yaml:/etc/fuel/client/config.yaml -v /etc/puppet/modules:/etc/puppet/modules \ -# -v /root/.ssh:/root/.ssh \ -# --link=riak:riak --link=redis:redis solarproject/solar-celery:f2s - riak: image: tutum/riak ports: diff --git a/solar/solar/core/handlers/naive_sync.py b/solar/core/handlers/naive_sync.py similarity index 100% rename from solar/solar/core/handlers/naive_sync.py rename to solar/core/handlers/naive_sync.py diff --git a/solar/orchestration/tasks.py b/solar/orchestration/tasks.py index f499897b..59624619 100644 --- a/solar/orchestration/tasks.py +++ b/solar/orchestration/tasks.py @@ -34,20 +34,6 @@ from solar.orchestration import limits from solar.orchestration import executor from solar.dblayer import ModelMeta -from solar.dblayer.model import ModelMeta -from functools import wraps - -def session(func): - @wraps(func) - def inner(*args, **kwargs): - try: - ModelMeta.session_start() - rst = func(*args, **kwargs) - finally: - ModelMeta.session_end() - return rst - return inner - __all__ = ['solar_resource', 'cmd', 'sleep', 'error', 'fault_tolerance', 'schedule_start', 'schedule_next'] @@ -85,7 +71,6 @@ def end_solar_session(task_id, task, *args, **kwargs): @report_task(name='solar_resource') -@session def solar_resource(ctxt, resource_name, action): res = resource.load(resource_name) return actions.resource_action(res, action) @@ -160,7 +145,6 @@ def schedule(plan_uid, dg): @app.task(name='schedule_start') -@session def schedule_start(plan_uid): """On receive finished task should update storage with task result: @@ -172,7 +156,6 @@ def schedule_start(plan_uid): @app.task(name='soft_stop') -@session def soft_stop(plan_uid): dg = graph.get_graph(plan_uid) for n in dg: @@ -182,7 +165,6 @@ def soft_stop(plan_uid): @app.task(name='schedule_next') -@session def schedule_next(task_id, status, errmsg=None): plan_uid, task_name = task_id.rsplit(':', 1) dg = graph.get_graph(plan_uid) diff --git a/solar/solar/config.py b/solar/solar/config.py deleted file mode 100644 index 9a92c501..00000000 --- a/solar/solar/config.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -import yaml -from bunch import Bunch - -CWD = os.getcwd() - -C = Bunch() -C.redis = Bunch(port='6379', host='10.0.0.2') -C.solar_db = Bunch(mode='riak', port='8087', host='10.0.0.2', protocol='pbc') - - -def _lookup_vals(setter, config, prefix=None): - for key, val in config.iteritems(): - if prefix is None: - sub = [key] - else: - sub = prefix + [key] - if isinstance(val, Bunch): - _lookup_vals(setter, val, sub) - else: - setter(config, sub) - - -def from_configs(): - - paths = [ - os.getenv('SOLAR_CONFIG', os.path.join(CWD, '.config')), - os.path.join(CWD, '.config.override') - ] - data = {} - - def _load_from_path(data, path): - with open(path) as f: - loaded = yaml.load(f) - if loaded: - data.update(loaded) - - for path in paths: - if not os.path.exists(path): - continue - with open(path) as f: - loaded = yaml.load(f) - if loaded: - data.update(loaded) - - def _setter(config, path): - vals = data - for key in path: - vals = vals[key] - config[path[-1]] = vals - if data: - _lookup_vals(_setter, C) - - -def from_env(): - def _setter(config, path): - env_key = '_'.join(path).upper() - if env_key in os.environ: - config[path[-1]] = os.environ[env_key] - _lookup_vals(_setter, C) - -from_configs() -from_env() diff --git a/solar/solar/dblayer/__init__.py b/solar/solar/dblayer/__init__.py deleted file mode 100644 index 2f3b0fae..00000000 --- a/solar/solar/dblayer/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from solar.dblayer.model import ModelMeta -from solar.dblayer.riak_client import RiakClient -from solar.config import C - -client = RiakClient( - protocol=C.riak.protcol, host=C.riak.host, pb_port=C.riak.port) -# client = RiakClient(protocol='http', host='10.0.0.2', http_port=8098) - -ModelMeta.setup(client) From 3075df61def9dfbe62e9d48674ff1b72401eb1f3 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 30 Nov 2015 14:55:31 +0200 Subject: [PATCH 49/51] Fix flake8 import rules --- solar/cli/orch.py | 2 ++ solar/core/resource/resource.py | 2 ++ solar/orchestration/graph.py | 13 ++++++------- solar/orchestration/tasks.py | 8 +++----- solar/utils.py | 8 ++++++-- 5 files changed, 19 insertions(+), 14 deletions(-) diff --git a/solar/cli/orch.py b/solar/cli/orch.py index 6abd0482..226f5d2c 100755 --- a/solar/cli/orch.py +++ b/solar/cli/orch.py @@ -115,12 +115,14 @@ def filter(uid, start, end): utils.write_graph(plan) click.echo('Created {name}.png'.format(name=plan.graph['name'])) + @orchestration.command(help='Used to mark task as executed') @click.argument('uid', type=SOLARUID) @click.option('--task', '-t', multiple=True) def noop(uid, task): graph.set_states(uid, task) + @orchestration.command(name='run-once') @click.argument('uid', type=SOLARUID, default='last') @click.option('-w', 'wait', default=0) diff --git a/solar/core/resource/resource.py b/solar/core/resource/resource.py index 9310c934..71822222 100644 --- a/solar/core/resource/resource.py +++ b/solar/core/resource/resource.py @@ -15,6 +15,7 @@ from copy import deepcopy from hashlib import md5 +import json import os from uuid import uuid4 @@ -296,6 +297,7 @@ class Resource(object): if rst: self.update(json.loads(rst)) + def load(name): r = DBResource.get(name) diff --git a/solar/orchestration/graph.py b/solar/orchestration/graph.py index a8006356..b4f70ec4 100644 --- a/solar/orchestration/graph.py +++ b/solar/orchestration/graph.py @@ -12,22 +12,19 @@ # License for the specific language governing permissions and limitations # under the License. -from collections import Counter import time import uuid +from collections import Counter + import networkx as nx from solar.dblayer.model import clear_cache +from solar.dblayer.model import ModelMeta from solar.dblayer.solar_models import Task from solar import errors -from solar import utils - from solar.orchestration.traversal import states - -from solar.dblayer.solar_models import Task -from solar.dblayer.model import clear_cache -from solar.dblayer.model import ModelMeta +from solar import utils def save_graph(graph): @@ -58,6 +55,7 @@ def update_graph(graph): task.errmsg = graph.node[n]['errmsg'] or '' task.save() + def set_states(uid, tasks): plan = get_graph(uid) for t in tasks: @@ -67,6 +65,7 @@ def set_states(uid, tasks): plan.node[t]['task'].save_lazy() ModelMeta.save_all_lazy() + def get_graph(uid): dg = nx.MultiDiGraph() dg.graph['uid'] = uid diff --git a/solar/orchestration/tasks.py b/solar/orchestration/tasks.py index 59624619..6e03616d 100644 --- a/solar/orchestration/tasks.py +++ b/solar/orchestration/tasks.py @@ -12,9 +12,10 @@ # License for the specific language governing permissions and limitations # under the License. -import time -from functools import partial import subprocess +import time + +from functools import partial from celery.app import task from celery.signals import task_postrun @@ -30,9 +31,6 @@ from solar.orchestration.runner import app from solar.orchestration.traversal import traverse from solar.system_log.tasks import commit_logitem from solar.system_log.tasks import error_logitem -from solar.orchestration import limits -from solar.orchestration import executor -from solar.dblayer import ModelMeta __all__ = ['solar_resource', 'cmd', 'sleep', diff --git a/solar/utils.py b/solar/utils.py index 0b0a0c54..813b8488 100644 --- a/solar/utils.py +++ b/solar/utils.py @@ -17,8 +17,8 @@ import io import json import logging import os +import subprocess import uuid -from subprocess import Popen, PIPE, STDOUT from jinja2 import Environment import yaml @@ -36,7 +36,11 @@ def to_pretty_json(data): def communicate(command, data): - popen = Popen(command, stdout=PIPE, stdin=PIPE, stderr=STDOUT) + popen = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE) return popen.communicate(input=data)[0] # Configure jinja2 filters From f1a3b1afca261898cca1d31cbb58955b0e041a17 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 1 Dec 2015 10:36:18 +0200 Subject: [PATCH 50/51] Remove f2s directory --- Dockerfile | 2 - f2s/.gitignore | 2 - f2s/README.md | 109 -- f2s/f2s.py | 354 ----- f2s/fsclient.py | 89 -- f2s/patches/hiera.patch | 10 - f2s/patches/noop_tests.patch | 15 - f2s/resources/apache/actions/run.pp | 13 - f2s/resources/apache/meta.yaml | 15 - f2s/resources/api-proxy/actions/run.pp | 16 - f2s/resources/api-proxy/meta.yaml | 19 - .../ceilometer-compute/actions/run.pp | 59 - f2s/resources/ceilometer-compute/meta.yaml | 37 - .../ceilometer-controller/actions/run.pp | 111 -- f2s/resources/ceilometer-controller/meta.yaml | 47 - .../ceilometer-keystone/actions/run.pp | 41 - f2s/resources/ceilometer-keystone/meta.yaml | 23 - .../ceilometer-radosgw-user/actions/run.pp | 20 - .../ceilometer-radosgw-user/meta.yaml | 17 - f2s/resources/ceph-compute/actions/run.pp | 97 -- f2s/resources/ceph-compute/meta.yaml | 37 - f2s/resources/ceph-mon/actions/run.pp | 95 -- f2s/resources/ceph-mon/meta.yaml | 35 - f2s/resources/ceph-radosgw/actions/run.pp | 103 -- f2s/resources/ceph-radosgw/meta.yaml | 29 - .../ceph_create_pools/actions/run.pp | 80 - f2s/resources/ceph_create_pools/meta.yaml | 15 - f2s/resources/cinder-db/actions/run.pp | 53 - f2s/resources/cinder-db/meta.yaml | 23 - f2s/resources/cinder-keystone/actions/run.pp | 51 - f2s/resources/cinder-keystone/meta.yaml | 23 - f2s/resources/cluster-haproxy/actions/run.pp | 20 - f2s/resources/cluster-haproxy/meta.yaml | 27 - f2s/resources/cluster-vrouter/actions/run.pp | 7 - f2s/resources/cluster-vrouter/meta.yaml | 15 - f2s/resources/cluster/actions/run.pp | 49 - f2s/resources/cluster/meta.yaml | 19 - f2s/resources/cluster_health/actions/run.pp | 20 - f2s/resources/cluster_health/meta.yaml | 27 - .../configure_default_route/actions/run.pp | 11 - .../configure_default_route/meta.yaml | 17 - .../connectivity_tests/actions/run.pp | 5 - f2s/resources/connectivity_tests/meta.yaml | 15 - f2s/resources/conntrackd/actions/run.pp | 79 - f2s/resources/conntrackd/meta.yaml | 17 - .../controller_remaining_tasks/actions/run.pp | 49 - .../controller_remaining_tasks/meta.yaml | 19 - f2s/resources/database/actions/run.pp | 132 -- f2s/resources/database/meta.yaml | 33 - .../actions/run.pp | 41 - .../disable_keystone_service_token/meta.yaml | 15 - f2s/resources/dns-client/actions/run.pp | 8 - f2s/resources/dns-client/meta.yaml | 15 - f2s/resources/dns-server/actions/run.pp | 16 - f2s/resources/dns-server/meta.yaml | 21 - .../dump_rabbitmq_definitions/actions/run.pp | 33 - .../dump_rabbitmq_definitions/meta.yaml | 15 - .../actions/run.pp | 10 - .../enable_cinder_volume_service/meta.yaml | 13 - .../actions/run.pp | 10 - .../enable_nova_compute_service/meta.yaml | 13 - f2s/resources/enable_rados/actions/run.pp | 17 - f2s/resources/enable_rados/meta.yaml | 13 - f2s/resources/firewall/actions/run.pp | 133 -- f2s/resources/firewall/meta.yaml | 19 - f2s/resources/fuel_pkgs/actions/run.pp | 10 - f2s/resources/fuel_pkgs/meta.yaml | 13 - f2s/resources/generate_vms/actions/run.pp | 49 - f2s/resources/generate_vms/meta.yaml | 13 - f2s/resources/genkeys/actions/run.sh | 39 - f2s/resources/genkeys/meta.yaml | 20 - f2s/resources/glance-db/actions/run.pp | 53 - f2s/resources/glance-db/meta.yaml | 25 - f2s/resources/glance-keystone/actions/run.pp | 42 - f2s/resources/glance-keystone/meta.yaml | 23 - f2s/resources/glance/actions/run.pp | 128 -- f2s/resources/glance/meta.yaml | 49 - f2s/resources/globals/actions/run.pp | 293 ---- f2s/resources/globals/meta.yaml | 127 -- f2s/resources/heat-db/actions/run.pp | 53 - f2s/resources/heat-db/meta.yaml | 23 - f2s/resources/heat-keystone/actions/run.pp | 59 - f2s/resources/heat-keystone/meta.yaml | 23 - f2s/resources/heat/actions/run.pp | 169 -- f2s/resources/heat/meta.yaml | 55 - f2s/resources/hiera/actions/run.pp | 75 - f2s/resources/hiera/meta.yaml | 11 - f2s/resources/horizon/actions/run.pp | 79 - f2s/resources/horizon/meta.yaml | 47 - f2s/resources/hosts/actions/run.pp | 5 - f2s/resources/hosts/meta.yaml | 13 - f2s/resources/ironic-api/actions/run.pp | 61 - f2s/resources/ironic-api/meta.yaml | 11 - f2s/resources/ironic-compute/actions/run.pp | 98 -- f2s/resources/ironic-compute/meta.yaml | 13 - f2s/resources/ironic-conductor/actions/run.pp | 121 -- f2s/resources/ironic-conductor/meta.yaml | 13 - f2s/resources/ironic-db/actions/run.pp | 51 - f2s/resources/ironic-db/meta.yaml | 23 - f2s/resources/ironic-keystone/actions/run.pp | 39 - f2s/resources/ironic-keystone/meta.yaml | 23 - f2s/resources/keystone-db/actions/run.pp | 54 - f2s/resources/keystone-db/meta.yaml | 25 - f2s/resources/keystone/actions/run.pp | 247 --- f2s/resources/keystone/meta.yaml | 77 - f2s/resources/logging/actions/run.pp | 67 - f2s/resources/logging/meta.yaml | 27 - f2s/resources/memcached/actions/run.pp | 8 - f2s/resources/memcached/meta.yaml | 15 - f2s/resources/murano-db/actions/run.pp | 57 - f2s/resources/murano-db/meta.yaml | 25 - f2s/resources/murano-keystone/actions/run.pp | 36 - f2s/resources/murano-keystone/meta.yaml | 25 - f2s/resources/murano-rabbitmq/actions/run.pp | 13 - f2s/resources/murano-rabbitmq/meta.yaml | 13 - f2s/resources/murano/actions/run.pp | 151 -- f2s/resources/murano/meta.yaml | 59 - f2s/resources/netconfig/actions/run.pp | 106 -- f2s/resources/netconfig/meta.yaml | 27 - f2s/resources/neutron-db/actions/run.pp | 59 - f2s/resources/neutron-db/meta.yaml | 29 - f2s/resources/neutron-keystone/actions/run.pp | 50 - f2s/resources/neutron-keystone/meta.yaml | 25 - f2s/resources/nova-db/actions/run.pp | 53 - f2s/resources/nova-db/meta.yaml | 23 - f2s/resources/nova-keystone/actions/run.pp | 56 - f2s/resources/nova-keystone/meta.yaml | 23 - f2s/resources/ntp-check/actions/run.pp | 6 - f2s/resources/ntp-check/meta.yaml | 15 - f2s/resources/ntp-client/actions/run.pp | 26 - f2s/resources/ntp-client/meta.yaml | 21 - f2s/resources/ntp-server/actions/run.pp | 31 - f2s/resources/ntp-server/meta.yaml | 15 - f2s/resources/openstack-cinder/actions/run.pp | 107 -- f2s/resources/openstack-cinder/meta.yaml | 59 - .../openstack-controller/actions/run.pp | 242 --- f2s/resources/openstack-controller/meta.yaml | 113 -- .../actions/run.pp | 23 - .../openstack-haproxy-ceilometer/meta.yaml | 19 - .../openstack-haproxy-cinder/actions/run.pp | 24 - .../openstack-haproxy-cinder/meta.yaml | 29 - .../openstack-haproxy-glance/actions/run.pp | 26 - .../openstack-haproxy-glance/meta.yaml | 27 - .../openstack-haproxy-heat/actions/run.pp | 24 - .../openstack-haproxy-heat/meta.yaml | 29 - .../openstack-haproxy-horizon/actions/run.pp | 24 - .../openstack-haproxy-horizon/meta.yaml | 29 - .../openstack-haproxy-ironic/actions/run.pp | 22 - .../openstack-haproxy-ironic/meta.yaml | 11 - .../openstack-haproxy-keystone/actions/run.pp | 29 - .../openstack-haproxy-keystone/meta.yaml | 31 - .../openstack-haproxy-murano/actions/run.pp | 24 - .../openstack-haproxy-murano/meta.yaml | 21 - .../openstack-haproxy-mysqld/actions/run.pp | 31 - .../openstack-haproxy-mysqld/meta.yaml | 35 - .../openstack-haproxy-neutron/actions/run.pp | 22 - .../openstack-haproxy-neutron/meta.yaml | 27 - .../openstack-haproxy-nova/actions/run.pp | 25 - .../openstack-haproxy-nova/meta.yaml | 27 - .../openstack-haproxy-radosgw/actions/run.pp | 39 - .../openstack-haproxy-radosgw/meta.yaml | 19 - .../openstack-haproxy-sahara/actions/run.pp | 24 - .../openstack-haproxy-sahara/meta.yaml | 21 - .../openstack-haproxy-stats/actions/run.pp | 7 - .../openstack-haproxy-stats/meta.yaml | 19 - .../openstack-haproxy-swift/actions/run.pp | 37 - .../openstack-haproxy-swift/meta.yaml | 31 - .../openstack-haproxy/actions/run.pp | 3 - f2s/resources/openstack-haproxy/meta.yaml | 13 - .../actions/run.pp | 39 - .../openstack-network-agents-dhcp/meta.yaml | 21 - .../actions/run.pp | 59 - .../openstack-network-agents-l3/meta.yaml | 23 - .../actions/run.pp | 56 - .../meta.yaml | 31 - .../actions/run.pp | 110 -- .../openstack-network-common-config/meta.yaml | 37 - .../actions/run.pp | 267 ---- .../openstack-network-compute-nova/meta.yaml | 27 - f2s/resources/openstack-network-end/meta.yaml | 4 - .../openstack-network-networks/actions/run.pp | 106 -- .../openstack-network-networks/meta.yaml | 21 - .../actions/run.pp | 170 -- .../openstack-network-plugins-l2/meta.yaml | 29 - .../openstack-network-routers/actions/run.pp | 32 - .../openstack-network-routers/meta.yaml | 21 - .../actions/run.pp | 95 -- .../openstack-network-server-config/meta.yaml | 33 - .../actions/run.pp | 98 -- .../openstack-network-server-nova/meta.yaml | 25 - .../openstack-network-start/meta.yaml | 4 - f2s/resources/pre_hiera_config/actions/run.pp | 75 - f2s/resources/pre_hiera_config/meta.yaml | 11 - f2s/resources/public_vip_ping/actions/run.pp | 17 - f2s/resources/public_vip_ping/meta.yaml | 17 - f2s/resources/rabbitmq/actions/run.pp | 174 -- f2s/resources/rabbitmq/meta.yaml | 43 - f2s/resources/role_data/managers/from_file.py | 13 - .../role_data/managers/from_nailgun.py | 13 - f2s/resources/role_data/managers/globals.pp | 294 ---- f2s/resources/role_data/managers/globals.py | 57 - .../role_data/managers/test_sample.yaml | 695 -------- f2s/resources/role_data/meta.yaml | 359 ----- f2s/resources/sahara-db/actions/run.pp | 57 - f2s/resources/sahara-db/meta.yaml | 25 - f2s/resources/sahara-keystone/actions/run.pp | 34 - f2s/resources/sahara-keystone/meta.yaml | 23 - f2s/resources/sahara/actions/run.pp | 156 -- f2s/resources/sahara/meta.yaml | 55 - .../ssl-add-trust-chain/actions/run.pp | 42 - f2s/resources/ssl-add-trust-chain/meta.yaml | 17 - f2s/resources/ssl-keys-saving/actions/run.pp | 22 - f2s/resources/ssl-keys-saving/meta.yaml | 15 - f2s/resources/swift-keystone/actions/run.pp | 51 - f2s/resources/swift-keystone/meta.yaml | 23 - .../swift-rebalance-cron/actions/run.pp | 24 - f2s/resources/swift-rebalance-cron/meta.yaml | 23 - f2s/resources/swift/actions/run.pp | 147 -- f2s/resources/swift/meta.yaml | 63 - f2s/resources/tools/actions/run.pp | 42 - f2s/resources/tools/meta.yaml | 17 - .../top-role-ceph-osd/actions/run.pp | 57 - f2s/resources/top-role-ceph-osd/meta.yaml | 47 - .../top-role-cinder-vmware/actions/run.pp | 11 - .../top-role-cinder-vmware/meta.yaml | 13 - f2s/resources/top-role-cinder/actions/run.pp | 308 ---- f2s/resources/top-role-cinder/meta.yaml | 13 - .../top-role-compute-vmware/actions/run.pp | 18 - .../top-role-compute-vmware/meta.yaml | 13 - f2s/resources/top-role-compute/actions/run.pp | 339 ---- f2s/resources/top-role-compute/meta.yaml | 133 -- f2s/resources/top-role-mongo/actions/run.pp | 32 - f2s/resources/top-role-mongo/meta.yaml | 13 - .../top-role-primary-mongo/actions/run.pp | 32 - .../top-role-primary-mongo/meta.yaml | 13 - f2s/resources/umm/actions/run.pp | 3 - f2s/resources/umm/meta.yaml | 13 - f2s/resources/update_hosts/actions/run.pp | 5 - f2s/resources/update_hosts/meta.yaml | 13 - f2s/resources/updatedb/actions/run.pp | 21 - f2s/resources/updatedb/meta.yaml | 15 - f2s/resources/virtual_ips/actions/run.pp | 3 - f2s/resources/virtual_ips/meta.yaml | 17 - f2s/resources/vmware-vcenter/actions/run.pp | 19 - f2s/resources/vmware-vcenter/meta.yaml | 27 - .../workloads_collector_add/actions/run.pp | 21 - .../workloads_collector_add/meta.yaml | 17 - f2s/vrs/base-os.yml | 18 - f2s/vrs/ceph-osd.yml | 143 -- f2s/vrs/cinder-vmware.yml | 183 --- f2s/vrs/cinder.yml | 143 -- f2s/vrs/compute-vmware.yml | 147 -- f2s/vrs/compute.yml | 358 ----- f2s/vrs/controller.yml | 1201 -------------- f2s/vrs/fuel_node.yaml | 20 - f2s/vrs/genkeys.yaml | 10 - f2s/vrs/ironic.yml | 295 ---- f2s/vrs/mongo.yml | 143 -- f2s/vrs/prep.yaml | 29 - f2s/vrs/primary-controller.yml | 1399 ----------------- f2s/vrs/primary-mongo.yml | 143 -- f2s/vrs/virt.yml | 101 -- 262 files changed, 16760 deletions(-) delete mode 100644 f2s/.gitignore delete mode 100644 f2s/README.md delete mode 100755 f2s/f2s.py delete mode 100755 f2s/fsclient.py delete mode 100644 f2s/patches/hiera.patch delete mode 100644 f2s/patches/noop_tests.patch delete mode 100644 f2s/resources/apache/actions/run.pp delete mode 100644 f2s/resources/apache/meta.yaml delete mode 100644 f2s/resources/api-proxy/actions/run.pp delete mode 100644 f2s/resources/api-proxy/meta.yaml delete mode 100644 f2s/resources/ceilometer-compute/actions/run.pp delete mode 100644 f2s/resources/ceilometer-compute/meta.yaml delete mode 100644 f2s/resources/ceilometer-controller/actions/run.pp delete mode 100644 f2s/resources/ceilometer-controller/meta.yaml delete mode 100644 f2s/resources/ceilometer-keystone/actions/run.pp delete mode 100644 f2s/resources/ceilometer-keystone/meta.yaml delete mode 100644 f2s/resources/ceilometer-radosgw-user/actions/run.pp delete mode 100644 f2s/resources/ceilometer-radosgw-user/meta.yaml delete mode 100644 f2s/resources/ceph-compute/actions/run.pp delete mode 100644 f2s/resources/ceph-compute/meta.yaml delete mode 100644 f2s/resources/ceph-mon/actions/run.pp delete mode 100644 f2s/resources/ceph-mon/meta.yaml delete mode 100644 f2s/resources/ceph-radosgw/actions/run.pp delete mode 100644 f2s/resources/ceph-radosgw/meta.yaml delete mode 100644 f2s/resources/ceph_create_pools/actions/run.pp delete mode 100644 f2s/resources/ceph_create_pools/meta.yaml delete mode 100644 f2s/resources/cinder-db/actions/run.pp delete mode 100644 f2s/resources/cinder-db/meta.yaml delete mode 100644 f2s/resources/cinder-keystone/actions/run.pp delete mode 100644 f2s/resources/cinder-keystone/meta.yaml delete mode 100644 f2s/resources/cluster-haproxy/actions/run.pp delete mode 100644 f2s/resources/cluster-haproxy/meta.yaml delete mode 100644 f2s/resources/cluster-vrouter/actions/run.pp delete mode 100644 f2s/resources/cluster-vrouter/meta.yaml delete mode 100644 f2s/resources/cluster/actions/run.pp delete mode 100644 f2s/resources/cluster/meta.yaml delete mode 100644 f2s/resources/cluster_health/actions/run.pp delete mode 100644 f2s/resources/cluster_health/meta.yaml delete mode 100644 f2s/resources/configure_default_route/actions/run.pp delete mode 100644 f2s/resources/configure_default_route/meta.yaml delete mode 100644 f2s/resources/connectivity_tests/actions/run.pp delete mode 100644 f2s/resources/connectivity_tests/meta.yaml delete mode 100644 f2s/resources/conntrackd/actions/run.pp delete mode 100644 f2s/resources/conntrackd/meta.yaml delete mode 100644 f2s/resources/controller_remaining_tasks/actions/run.pp delete mode 100644 f2s/resources/controller_remaining_tasks/meta.yaml delete mode 100644 f2s/resources/database/actions/run.pp delete mode 100644 f2s/resources/database/meta.yaml delete mode 100644 f2s/resources/disable_keystone_service_token/actions/run.pp delete mode 100644 f2s/resources/disable_keystone_service_token/meta.yaml delete mode 100644 f2s/resources/dns-client/actions/run.pp delete mode 100644 f2s/resources/dns-client/meta.yaml delete mode 100644 f2s/resources/dns-server/actions/run.pp delete mode 100644 f2s/resources/dns-server/meta.yaml delete mode 100644 f2s/resources/dump_rabbitmq_definitions/actions/run.pp delete mode 100644 f2s/resources/dump_rabbitmq_definitions/meta.yaml delete mode 100644 f2s/resources/enable_cinder_volume_service/actions/run.pp delete mode 100644 f2s/resources/enable_cinder_volume_service/meta.yaml delete mode 100644 f2s/resources/enable_nova_compute_service/actions/run.pp delete mode 100644 f2s/resources/enable_nova_compute_service/meta.yaml delete mode 100644 f2s/resources/enable_rados/actions/run.pp delete mode 100644 f2s/resources/enable_rados/meta.yaml delete mode 100644 f2s/resources/firewall/actions/run.pp delete mode 100644 f2s/resources/firewall/meta.yaml delete mode 100644 f2s/resources/fuel_pkgs/actions/run.pp delete mode 100644 f2s/resources/fuel_pkgs/meta.yaml delete mode 100644 f2s/resources/generate_vms/actions/run.pp delete mode 100644 f2s/resources/generate_vms/meta.yaml delete mode 100644 f2s/resources/genkeys/actions/run.sh delete mode 100644 f2s/resources/genkeys/meta.yaml delete mode 100644 f2s/resources/glance-db/actions/run.pp delete mode 100644 f2s/resources/glance-db/meta.yaml delete mode 100644 f2s/resources/glance-keystone/actions/run.pp delete mode 100644 f2s/resources/glance-keystone/meta.yaml delete mode 100644 f2s/resources/glance/actions/run.pp delete mode 100644 f2s/resources/glance/meta.yaml delete mode 100644 f2s/resources/globals/actions/run.pp delete mode 100644 f2s/resources/globals/meta.yaml delete mode 100644 f2s/resources/heat-db/actions/run.pp delete mode 100644 f2s/resources/heat-db/meta.yaml delete mode 100644 f2s/resources/heat-keystone/actions/run.pp delete mode 100644 f2s/resources/heat-keystone/meta.yaml delete mode 100644 f2s/resources/heat/actions/run.pp delete mode 100644 f2s/resources/heat/meta.yaml delete mode 100644 f2s/resources/hiera/actions/run.pp delete mode 100644 f2s/resources/hiera/meta.yaml delete mode 100644 f2s/resources/horizon/actions/run.pp delete mode 100644 f2s/resources/horizon/meta.yaml delete mode 100644 f2s/resources/hosts/actions/run.pp delete mode 100644 f2s/resources/hosts/meta.yaml delete mode 100644 f2s/resources/ironic-api/actions/run.pp delete mode 100644 f2s/resources/ironic-api/meta.yaml delete mode 100644 f2s/resources/ironic-compute/actions/run.pp delete mode 100644 f2s/resources/ironic-compute/meta.yaml delete mode 100644 f2s/resources/ironic-conductor/actions/run.pp delete mode 100644 f2s/resources/ironic-conductor/meta.yaml delete mode 100644 f2s/resources/ironic-db/actions/run.pp delete mode 100644 f2s/resources/ironic-db/meta.yaml delete mode 100644 f2s/resources/ironic-keystone/actions/run.pp delete mode 100644 f2s/resources/ironic-keystone/meta.yaml delete mode 100644 f2s/resources/keystone-db/actions/run.pp delete mode 100644 f2s/resources/keystone-db/meta.yaml delete mode 100644 f2s/resources/keystone/actions/run.pp delete mode 100644 f2s/resources/keystone/meta.yaml delete mode 100644 f2s/resources/logging/actions/run.pp delete mode 100644 f2s/resources/logging/meta.yaml delete mode 100644 f2s/resources/memcached/actions/run.pp delete mode 100644 f2s/resources/memcached/meta.yaml delete mode 100644 f2s/resources/murano-db/actions/run.pp delete mode 100644 f2s/resources/murano-db/meta.yaml delete mode 100644 f2s/resources/murano-keystone/actions/run.pp delete mode 100644 f2s/resources/murano-keystone/meta.yaml delete mode 100644 f2s/resources/murano-rabbitmq/actions/run.pp delete mode 100644 f2s/resources/murano-rabbitmq/meta.yaml delete mode 100644 f2s/resources/murano/actions/run.pp delete mode 100644 f2s/resources/murano/meta.yaml delete mode 100644 f2s/resources/netconfig/actions/run.pp delete mode 100644 f2s/resources/netconfig/meta.yaml delete mode 100644 f2s/resources/neutron-db/actions/run.pp delete mode 100644 f2s/resources/neutron-db/meta.yaml delete mode 100644 f2s/resources/neutron-keystone/actions/run.pp delete mode 100644 f2s/resources/neutron-keystone/meta.yaml delete mode 100644 f2s/resources/nova-db/actions/run.pp delete mode 100644 f2s/resources/nova-db/meta.yaml delete mode 100644 f2s/resources/nova-keystone/actions/run.pp delete mode 100644 f2s/resources/nova-keystone/meta.yaml delete mode 100644 f2s/resources/ntp-check/actions/run.pp delete mode 100644 f2s/resources/ntp-check/meta.yaml delete mode 100644 f2s/resources/ntp-client/actions/run.pp delete mode 100644 f2s/resources/ntp-client/meta.yaml delete mode 100644 f2s/resources/ntp-server/actions/run.pp delete mode 100644 f2s/resources/ntp-server/meta.yaml delete mode 100644 f2s/resources/openstack-cinder/actions/run.pp delete mode 100644 f2s/resources/openstack-cinder/meta.yaml delete mode 100644 f2s/resources/openstack-controller/actions/run.pp delete mode 100644 f2s/resources/openstack-controller/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-ceilometer/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-ceilometer/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-cinder/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-cinder/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-glance/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-glance/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-heat/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-heat/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-horizon/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-horizon/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-ironic/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-ironic/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-keystone/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-keystone/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-murano/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-murano/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-mysqld/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-mysqld/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-neutron/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-neutron/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-nova/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-nova/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-radosgw/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-radosgw/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-sahara/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-sahara/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-stats/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-stats/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy-swift/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy-swift/meta.yaml delete mode 100644 f2s/resources/openstack-haproxy/actions/run.pp delete mode 100644 f2s/resources/openstack-haproxy/meta.yaml delete mode 100644 f2s/resources/openstack-network-agents-dhcp/actions/run.pp delete mode 100644 f2s/resources/openstack-network-agents-dhcp/meta.yaml delete mode 100644 f2s/resources/openstack-network-agents-l3/actions/run.pp delete mode 100644 f2s/resources/openstack-network-agents-l3/meta.yaml delete mode 100644 f2s/resources/openstack-network-agents-metadata/actions/run.pp delete mode 100644 f2s/resources/openstack-network-agents-metadata/meta.yaml delete mode 100644 f2s/resources/openstack-network-common-config/actions/run.pp delete mode 100644 f2s/resources/openstack-network-common-config/meta.yaml delete mode 100644 f2s/resources/openstack-network-compute-nova/actions/run.pp delete mode 100644 f2s/resources/openstack-network-compute-nova/meta.yaml delete mode 100644 f2s/resources/openstack-network-end/meta.yaml delete mode 100644 f2s/resources/openstack-network-networks/actions/run.pp delete mode 100644 f2s/resources/openstack-network-networks/meta.yaml delete mode 100644 f2s/resources/openstack-network-plugins-l2/actions/run.pp delete mode 100644 f2s/resources/openstack-network-plugins-l2/meta.yaml delete mode 100644 f2s/resources/openstack-network-routers/actions/run.pp delete mode 100644 f2s/resources/openstack-network-routers/meta.yaml delete mode 100644 f2s/resources/openstack-network-server-config/actions/run.pp delete mode 100644 f2s/resources/openstack-network-server-config/meta.yaml delete mode 100644 f2s/resources/openstack-network-server-nova/actions/run.pp delete mode 100644 f2s/resources/openstack-network-server-nova/meta.yaml delete mode 100644 f2s/resources/openstack-network-start/meta.yaml delete mode 100644 f2s/resources/pre_hiera_config/actions/run.pp delete mode 100644 f2s/resources/pre_hiera_config/meta.yaml delete mode 100644 f2s/resources/public_vip_ping/actions/run.pp delete mode 100644 f2s/resources/public_vip_ping/meta.yaml delete mode 100644 f2s/resources/rabbitmq/actions/run.pp delete mode 100644 f2s/resources/rabbitmq/meta.yaml delete mode 100755 f2s/resources/role_data/managers/from_file.py delete mode 100755 f2s/resources/role_data/managers/from_nailgun.py delete mode 100644 f2s/resources/role_data/managers/globals.pp delete mode 100755 f2s/resources/role_data/managers/globals.py delete mode 100644 f2s/resources/role_data/managers/test_sample.yaml delete mode 100644 f2s/resources/role_data/meta.yaml delete mode 100644 f2s/resources/sahara-db/actions/run.pp delete mode 100644 f2s/resources/sahara-db/meta.yaml delete mode 100644 f2s/resources/sahara-keystone/actions/run.pp delete mode 100644 f2s/resources/sahara-keystone/meta.yaml delete mode 100644 f2s/resources/sahara/actions/run.pp delete mode 100644 f2s/resources/sahara/meta.yaml delete mode 100644 f2s/resources/ssl-add-trust-chain/actions/run.pp delete mode 100644 f2s/resources/ssl-add-trust-chain/meta.yaml delete mode 100644 f2s/resources/ssl-keys-saving/actions/run.pp delete mode 100644 f2s/resources/ssl-keys-saving/meta.yaml delete mode 100644 f2s/resources/swift-keystone/actions/run.pp delete mode 100644 f2s/resources/swift-keystone/meta.yaml delete mode 100644 f2s/resources/swift-rebalance-cron/actions/run.pp delete mode 100644 f2s/resources/swift-rebalance-cron/meta.yaml delete mode 100644 f2s/resources/swift/actions/run.pp delete mode 100644 f2s/resources/swift/meta.yaml delete mode 100644 f2s/resources/tools/actions/run.pp delete mode 100644 f2s/resources/tools/meta.yaml delete mode 100644 f2s/resources/top-role-ceph-osd/actions/run.pp delete mode 100644 f2s/resources/top-role-ceph-osd/meta.yaml delete mode 100644 f2s/resources/top-role-cinder-vmware/actions/run.pp delete mode 100644 f2s/resources/top-role-cinder-vmware/meta.yaml delete mode 100644 f2s/resources/top-role-cinder/actions/run.pp delete mode 100644 f2s/resources/top-role-cinder/meta.yaml delete mode 100644 f2s/resources/top-role-compute-vmware/actions/run.pp delete mode 100644 f2s/resources/top-role-compute-vmware/meta.yaml delete mode 100644 f2s/resources/top-role-compute/actions/run.pp delete mode 100644 f2s/resources/top-role-compute/meta.yaml delete mode 100644 f2s/resources/top-role-mongo/actions/run.pp delete mode 100644 f2s/resources/top-role-mongo/meta.yaml delete mode 100644 f2s/resources/top-role-primary-mongo/actions/run.pp delete mode 100644 f2s/resources/top-role-primary-mongo/meta.yaml delete mode 100644 f2s/resources/umm/actions/run.pp delete mode 100644 f2s/resources/umm/meta.yaml delete mode 100644 f2s/resources/update_hosts/actions/run.pp delete mode 100644 f2s/resources/update_hosts/meta.yaml delete mode 100644 f2s/resources/updatedb/actions/run.pp delete mode 100644 f2s/resources/updatedb/meta.yaml delete mode 100644 f2s/resources/virtual_ips/actions/run.pp delete mode 100644 f2s/resources/virtual_ips/meta.yaml delete mode 100644 f2s/resources/vmware-vcenter/actions/run.pp delete mode 100644 f2s/resources/vmware-vcenter/meta.yaml delete mode 100644 f2s/resources/workloads_collector_add/actions/run.pp delete mode 100644 f2s/resources/workloads_collector_add/meta.yaml delete mode 100644 f2s/vrs/base-os.yml delete mode 100644 f2s/vrs/ceph-osd.yml delete mode 100644 f2s/vrs/cinder-vmware.yml delete mode 100644 f2s/vrs/cinder.yml delete mode 100644 f2s/vrs/compute-vmware.yml delete mode 100644 f2s/vrs/compute.yml delete mode 100644 f2s/vrs/controller.yml delete mode 100644 f2s/vrs/fuel_node.yaml delete mode 100644 f2s/vrs/genkeys.yaml delete mode 100644 f2s/vrs/ironic.yml delete mode 100644 f2s/vrs/mongo.yml delete mode 100644 f2s/vrs/prep.yaml delete mode 100644 f2s/vrs/primary-controller.yml delete mode 100644 f2s/vrs/primary-mongo.yml delete mode 100644 f2s/vrs/virt.yml diff --git a/Dockerfile b/Dockerfile index b02f277e..8afcb984 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,6 @@ ADD bootstrap/playbooks/celery.yaml /celery.yaml ADD resources /resources ADD templates /templates ADD run.sh /run.sh -ADD f2s /f2s RUN apt-get update # Install pip's dependency: setuptools: @@ -21,7 +20,6 @@ RUN pip install https://github.com/Mirantis/solar-agent/archive/master.zip RUN ansible-playbook -v -i "localhost," -c local /celery.yaml --tags install -RUN pip install riak peewee RUN pip install -U setuptools>=17.1 RUN pip install -U python-fuelclient RUN apt-get install -y puppet diff --git a/f2s/.gitignore b/f2s/.gitignore deleted file mode 100644 index 65923bcf..00000000 --- a/f2s/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -fuel-library -tmp diff --git a/f2s/README.md b/f2s/README.md deleted file mode 100644 index e6fe945c..00000000 --- a/f2s/README.md +++ /dev/null @@ -1,109 +0,0 @@ -#How to install on fuel master? - -To use solar on fuel master we need to use container because of -python2.6 there. Also solar itself relies on several services. - -``` -yum -y install git -git clone -b f2s https://github.com/Mirantis/solar.git - -docker run --name riak -d -p 8087:8087 -p 8098:8098 tutum/riak - -docker run --name redis -d -p 6379:6379 -e REDIS_PASS=**None** tutum/redis - -docker run --name solar -d -v /root/solar/solar:/solar -v /root/solar/solard:/solard -v /root/solar/templates:/templates \ --v /root/solar/resources:/resources -v /root/solar/f2s:/f2s \ --v /var/lib/fuel:/var/lib/fuel -v /root/.config/fuel/fuel_client.yaml:/etc/fuel/client/config.yaml -v /etc/puppet/modules:/etc/puppet/modules \ --v /root/.ssh:/root/.ssh \ ---link=riak:riak --link=redis:redis solarproject/solar-celery:f2s -``` - -#f2s.py - -This script converts tasks.yaml + library actions into solar resources, -vrs, and events. - -1. Based on tasks.yaml meta.yaml is generated, you can take a look on example -at f2s/resources/netconfig/meta.yaml -2. Based on hiera lookup we generated inputs for each resource, patches can be -found at f2s/patches -3. VRs (f2s/vrs) generated based on dependencies between tasks and roles - -#fsclient.py - -This script helps to create solar resource with some of nailgun data. -Note, you should run it inside of the solar container. - -`./f2s/fsclient.py master 1` -Accepts cluster id, prepares transports for master + generate keys task -for current cluster. - -`./f2s/fsclient.py nodes 1` -Prepares transports for provided nodes, ip and cluster id fetchd from nailgun. - -`./f2s/fsclient.py prep 1` -Creates tasks for syncing keys + fuel-library modules. - -`./f2s/fsclient.py roles 1` -Based on roles stored in nailgun we will assign vrs/.yaml to a given -node. Right now it takes while, so be patient. - -#fetching data from nailgun - -Special entity added which allows to fetch data from any source -*before* any actual deployment. -This entity provides mechanism to specify *manager* for resource (or list of them). -Manager accepts inputs as json in stdin, and outputs result in stdout, -with result of manager execution we will update solar storage. - -Examples can be found at f2s/resources/role_data/managers. - -Data will be fetched on solar command - -`solar res prefetch -n ` - -#tweaks - -Several things needs to be manually adjusted before you can use solar -on fuel master. - -- provision a node by fuel - `fuel node --node 1 --provision` -- create /var/lib/astute directory on remote -- install repos using fuel - `fuel node --node 1 --tasks core_repos` -- configure hiera on remote, and create /etc/puppet/hieradata directory -``` - :backends: - - yaml - #- json -:yaml: - :datadir: /etc/puppet/hieradata -:json: - :datadir: /etc/puppet/hieradata -:hierarchy: - - "%{resource_name}" - - resource -``` - -All of this things will be automated by solar eventually - -#basic troubleshooting - -If there are any Fuel plugin installed, you should manually -create a stanza for it in the `./f2s/resources/role_data/meta.yaml`, -like: -``` -input: - foo_plugin_name: - value: null -``` - -And regenerate the data from nailgun, - -To regenerate the deployment data to Solar resources make -``` -solar res clear_all -``` - -and repeat all of the fsclient.py and fetching nailgun data steps diff --git a/f2s/f2s.py b/f2s/f2s.py deleted file mode 100755 index a21814e0..00000000 --- a/f2s/f2s.py +++ /dev/null @@ -1,354 +0,0 @@ -#!/usr/bin/env python - -import os -from fnmatch import fnmatch -import shutil -from collections import OrderedDict - -import click -import yaml -import networkx as nx - - -def ensure_dir(dir): - try: - os.makedirs(dir) - except OSError: - pass - -CURDIR = os.path.dirname(os.path.realpath(__file__)) - -LIBRARY_PATH = os.path.join(CURDIR, 'fuel-library') -RESOURCE_TMP_WORKDIR = os.path.join(CURDIR, 'tmp/resources') -ensure_dir(RESOURCE_TMP_WORKDIR) -RESOURCE_DIR = os.path.join(CURDIR, 'resources') -VR_TMP_DIR = os.path.join(CURDIR, 'tmp/vrs') -ensure_dir(VR_TMP_DIR) -INPUTS_LOCATION = "/root/current/" -DEPLOYMENT_GROUP_PATH = os.path.join(LIBRARY_PATH, - 'deployment', 'puppet', 'deployment_groups', 'tasks.yaml') - -VALID_TASKS = ('puppet', 'skipped') - -def clean_resources(): - shutil.rmtree(RESOURCE_TMP_WORKDIR) - ensure_dir(RESOURCE_TMP_WORKDIR) - -def clean_vr(): - shutil.rmtree(VR_TMP_DIR) - ensure_dir(VR_TMP_DIR) - - -def ordered_dump(data, stream=None, Dumper=yaml.Dumper, **kwds): - class OrderedDumper(Dumper): - pass - def _dict_representer(dumper, data): - return dumper.represent_mapping( - yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, - data.items()) - OrderedDumper.add_representer(OrderedDict, _dict_representer) - return yaml.dump(data, stream, OrderedDumper, **kwds) - - -class Task(object): - - def __init__(self, task_data, task_path): - self.data = task_data - self.src_path = task_path - self.name = self.data['id'] - self.type = self.data['type'] - - def edges(self): - data = self.data - if 'required_for' in data: - for req in data['required_for']: - yield self.name, req - if 'requires' in data: - for req in data['requires']: - yield req, self.name - - if 'groups' in data: - for req in data['groups']: - yield self.name, req - if 'tasks' in data: - for req in data['tasks']: - yield req, self.name - - @property - def manifest(self): - if self.data['type'] != 'puppet': - return None - after_naily = self.data['parameters']['puppet_manifest'].split('osnailyfacter/')[-1] - return os.path.join( - LIBRARY_PATH, 'deployment', 'puppet', 'osnailyfacter', - after_naily) - - @property - def spec_name(self): - splitted = self.data['parameters']['puppet_manifest'].split('/') - directory = splitted[-2] - name = splitted[-1].split('.')[0] - return "{}_{}_spec.rb'".format(directory, name) - - @property - def dst_path(self): - return os.path.join(RESOURCE_TMP_WORKDIR, self.name) - - @property - def actions_path(self): - return os.path.join(self.dst_path, 'actions') - - @property - def meta_path(self): - return os.path.join(self.dst_path, 'meta.yaml') - - def meta(self): - if self.data['type'] == 'skipped': - data = OrderedDict([('id', self.name), - ('handler', 'none'), - ('version', '8.0'), - ('inputs', {})]) - elif self.data['type'] == 'puppet': - data = OrderedDict([('id', self.name), - ('handler', 'puppetv2'), - ('version', '8.0'), - ('actions', { - 'run': 'run.pp', - 'update': 'run.pp'}), - ('input', self.inputs()),]) - else: - raise NotImplemented('Support for %s' % self.data['type']) - return ordered_dump(data, default_flow_style=False) - - @property - def actions(self): - """yield an iterable of src/dst - """ - if self.manifest is None: - return - yield self.manifest, os.path.join(self.actions_path, 'run.pp') - - def inputs(self): - """ - Inputs prepared by - - fuel_noop_tests.rb - identity = spec.split('/')[-1] - ENV["SPEC"] = identity - - hiera.rb - File.open("/tmp/fuel_specs/#{ENV['SPEC']}", 'a') { |f| f << "- #{key}\n" } - """ - print self.spec_name - lookup_stack_path = os.path.join( - INPUTS_LOCATION, self.spec_name) - if not os.path.exists(lookup_stack_path): - return {} - - with open(lookup_stack_path) as f: - data = yaml.safe_load(f) or [] - data = data + ['puppet_modules'] - return {key: {'value': None} for key - in set(data) if '::' not in key} - - -class RoleData(Task): - - name = 'role_data' - - def meta(self): - data = {'id': self.name, - 'handler': 'puppetv2', - 'version': '8.0', - 'inputs': self.inputs(), - 'manager': 'globals.py'} - return yaml.safe_dump(data, default_flow_style=False) - - @property - def actions(self): - pass - - -class DGroup(object): - - filtered = ['globals', 'hiera', 'deploy_start'] - - def __init__(self, name, tasks): - self.name = name - self.tasks = tasks - - def resources(self): - - yield OrderedDict( - [('id', RoleData.name+"{{index}}"), - ('from', 'f2s/resources/'+RoleData.name), - ('location', "{{node}}"), - ('values', {'uid': '{{index}}', - 'env': '{{env}}', - 'puppet_modules': '/etc/puppet/modules'})]) - - for t, _, _ in self.tasks: - if t.name in self.filtered: - continue - - yield OrderedDict( - [('id', t.name+"{{index}}"), - ('from', 'f2s/resources/'+t.name), - ('location', "{{node}}"), - ('values_from', RoleData.name+"{{index}}")]) - - - def events(self): - for t, inner, outer in self.tasks: - if t.name in self.filtered: - continue - - yield OrderedDict([ - ('type', 'depends_on'), - ('state', 'success'), - ('parent_action', RoleData.name + '{{index}}.run'), - ('depend_action', t.name + '{{index}}.run')]) - - for dep in set(inner): - if dep in self.filtered: - continue - - yield OrderedDict([ - ('type', 'depends_on'), - ('state', 'success'), - ('parent_action', dep + '{{index}}.run'), - ('depend_action', t.name + '{{index}}.run')]) - for dep in set(outer): - if dep in self.filtered: - continue - - yield OrderedDict([ - ('type', 'depends_on'), - ('state', 'success'), - ('parent', { - 'with_tags': ['resource=' + dep], - 'action': 'run'}), - ('depend_action', t.name + '{{index}}.run')]) - - def meta(self): - data = OrderedDict([ - ('id', self.name), - ('resources', list(self.resources())), - ('events', list(self.events()))]) - return ordered_dump(data, default_flow_style=False) - - @property - def path(self): - return os.path.join(VR_TMP_DIR, self.name + '.yml') - - -def get_files(base_dir, file_pattern='*tasks.yaml'): - for root, _dirs, files in os.walk(base_dir): - for file_name in files: - if fnmatch(file_name, file_pattern): - yield root, file_name - -def load_data(base, file_name): - with open(os.path.join(base, file_name)) as f: - return yaml.load(f) - -def preview(task): - print 'PATH' - print task.dst_path - print 'META' - print task.meta() - print 'ACTIONS' - for action in task.actions(): - print 'src=%s dst=%s' % action - -def create(task): - ensure_dir(task.dst_path) - if task.actions_path: - ensure_dir(task.actions_path) - for src, dst in task.actions: - shutil.copyfile(src, dst) - - with open(task.meta_path, 'w') as f: - f.write(task.meta()) - - -def get_tasks(): - for base, task_yaml in get_files(LIBRARY_PATH + '/deployment'): - for item in load_data(base, task_yaml): - yield Task(item, base) - - -def get_graph(): - dg = nx.DiGraph() - for t in get_tasks(): - dg.add_edges_from(list(t.edges())) - dg.add_node(t.name, t=t) - return dg - -def dgroup_subgraph(dg, dgroup): - preds = [p for p in dg.predecessors(dgroup) - if dg.node[p]['t'].type == 'puppet'] - return dg.subgraph(preds) - -@click.group() -def main(): - pass - -@main.command(help='converts tasks into resources') -@click.argument('tasks', nargs=-1) -@click.option('-t', is_flag=True) -@click.option('-p', is_flag=True) -@click.option('-c', is_flag=True) -def t2r(tasks, t, p, c): - if c: - clean_resources() - - for task in get_tasks(): - if not task.type in VALID_TASKS: - continue - - if task.name in tasks or tasks == (): - if p: - preview(task) - else: - create(task) - - -@main.command(help='convert groups into templates') -@click.argument('groups', nargs=-1) -@click.option('-c', is_flag=True) -def g2vr(groups, c): - if c: - clean_vr() - - dg = get_graph() - dgroups = [n for n in dg if dg.node[n]['t'].type == 'group'] - - for group in dgroups: - if groups and group not in groups: - continue - - ordered = [] - dsub = dg.subgraph(dg.predecessors(group)) - for t in nx.topological_sort(dsub): - inner_preds = [] - outer_preds = [] - for p in dg.predecessors(t): - if not dg.node[p]['t'].type in VALID_TASKS: - continue - - if p in dsub: - inner_preds.append(p) - else: - outer_preds.append(p) - - if dg.node[t]['t'].type in VALID_TASKS: - ordered.append((dg.node[t]['t'], inner_preds, outer_preds)) - - obj = DGroup(group, ordered) - with open(obj.path, 'w') as f: - f.write(obj.meta()) - # based on inner/outer aggregation configure joins in events - -if __name__ == '__main__': - main() diff --git a/f2s/fsclient.py b/f2s/fsclient.py deleted file mode 100755 index 28b8bf66..00000000 --- a/f2s/fsclient.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python - -import os - -import click -from solar.core.resource import virtual_resource as vr -from solar.dblayer.model import ModelMeta - - - -@click.group() -def main(): - pass - -class NailgunSource(object): - - def nodes(self, uids): - from fuelclient.objects.node import Node - nodes_obj = map(Node, uids) - return [(str(n.data['id']), str(n.data['ip']), str(n.data['cluster'])) - for n in nodes_obj] - - def roles(self, uid): - from fuelclient.objects.node import Node - from fuelclient.objects.environment import Environment - node = Node(uid) - env = Environment(node.data['cluster']) - facts = env.get_default_facts('deployment', [uid]) - return [f['role'] for f in facts] - - def master(self): - return 'master', '10.20.0.2' - -class DumbSource(object): - - def nodes(self, uids): - ip_mask = '10.0.0.%s' - return [(uid, ip_mask % uid, 1) for uid in uids] - - def roles(self, uid): - return ['primary-controller'] - - def master(self): - return 'master', '0.0.0.0' - -if os.environ.get('DEBUG_FSCLIENT'): - source = DumbSource() -else: - source = NailgunSource() - -@main.command() -@click.argument('uids', nargs=-1) -def nodes(uids): - for uid, ip, env in source.nodes(uids): - vr.create('fuel_node', 'f2s/vrs/fuel_node.yaml', - {'index': uid, 'ip': ip}) - -@main.command() -@click.argument('env') -def master(env): - master = source.master() - vr.create('master', 'f2s/vrs/fuel_node.yaml', - {'index': master[0], 'ip': master[1]}) - - vr.create('genkeys', 'f2s/vrs/genkeys.yaml', { - 'node': 'node'+master[0], - 'index': env}) - -@main.command() -@click.argument('uids', nargs=-1) -def prep(uids): - for uid, ip, env in source.nodes(uids): - vr.create('prep', 'f2s/vrs/prep.yaml', - {'index': uid, 'env': env, 'node': 'node'+uid}) - - -@main.command() -@click.argument('uids', nargs=-1) -def roles(uids): - - for uid, ip, env in source.nodes(uids): - for role in source.roles(uid): - vr.create(role, 'f2s/vrs/'+role +'.yml', - {'index': uid, 'env': env, 'node': 'node'+uid}) - - -if __name__ == '__main__': - main() - ModelMeta.session_end() diff --git a/f2s/patches/hiera.patch b/f2s/patches/hiera.patch deleted file mode 100644 index abf5a1e7..00000000 --- a/f2s/patches/hiera.patch +++ /dev/null @@ -1,10 +0,0 @@ ---- /tmp/noop/.bundled_gems/gems/hiera-1.3.4/lib/hiera.rb 2015-11-09 19:55:29.127004136 +0000 -+++ /tmp/noop/.bundled_gems/gems/hiera-1.3.4/lib/hiera.rb 2015-11-09 14:15:54.372852787 +0000 -@@ -57,6 +57,7 @@ - # The order-override will insert as first in the hierarchy a data source - # of your choice. - def lookup(key, default, scope, order_override=nil, resolution_type=:priority) -+ File.open("/tmp/fuel_specs/#{ENV['SPEC']}", 'a') { |f| f << "- #{key}\n" } - Backend.lookup(key, default, scope, order_override, resolution_type) - end - end diff --git a/f2s/patches/noop_tests.patch b/f2s/patches/noop_tests.patch deleted file mode 100644 index 83d6ed86..00000000 --- a/f2s/patches/noop_tests.patch +++ /dev/null @@ -1,15 +0,0 @@ ---- fuel-library/utils/jenkins/fuel_noop_tests.rb 2015-11-09 19:51:53.000000000 +0000 -+++ fuel-library/utils/jenkins/fuel_noop_tests.rb 2015-11-09 19:51:17.000000000 +0000 -@@ -271,8 +271,10 @@ - # @return [Array] success and empty report array - def self.rspec(spec) - inside_noop_tests_directory do -+ splitted = spec.split('/') -+ dir, name = splitted[-2], splitted[-1] -+ ENV["SPEC"] = "#{dir}_#{name}" - command = "rspec #{RSPEC_OPTIONS} #{spec}" -- command = 'bundle exec ' + command if options[:bundle] -+ command = "bundle exec " + command if options[:bundle] - if options[:filter_examples] - options[:filter_examples].each do |example| - command = command + " -e #{example}" diff --git a/f2s/resources/apache/actions/run.pp b/f2s/resources/apache/actions/run.pp deleted file mode 100644 index f1dbfb9c..00000000 --- a/f2s/resources/apache/actions/run.pp +++ /dev/null @@ -1,13 +0,0 @@ -notice('MODULAR: apache.pp') - -# adjustments to defaults for LP#1485644 for scale -sysctl::value { 'net.core.somaxconn': value => '4096' } -sysctl::value { 'net.ipv4.tcp_max_syn_backlog': value => '8192' } - -class { 'osnailyfacter::apache': - purge_configs => true, - listen_ports => hiera_array('apache_ports', ['80', '8888']), -} - -include ::osnailyfacter::apache_mpm - diff --git a/f2s/resources/apache/meta.yaml b/f2s/resources/apache/meta.yaml deleted file mode 100644 index 54f06566..00000000 --- a/f2s/resources/apache/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -id: apache -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - apache_ports: - value: null - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/api-proxy/actions/run.pp b/f2s/resources/api-proxy/actions/run.pp deleted file mode 100644 index 80fd7158..00000000 --- a/f2s/resources/api-proxy/actions/run.pp +++ /dev/null @@ -1,16 +0,0 @@ -notice('MODULAR: api-proxy.pp') - -$max_header_size = hiera('max_header_size', '81900') - -# Apache and listen ports -class { 'osnailyfacter::apache': - listen_ports => hiera_array('apache_ports', ['80', '8888']), -} - -# API proxy vhost -class {'osnailyfacter::apache_api_proxy': - master_ip => hiera('master_ip'), - max_header_size => $max_header_size, -} - -include ::tweaks::apache_wrappers diff --git a/f2s/resources/api-proxy/meta.yaml b/f2s/resources/api-proxy/meta.yaml deleted file mode 100644 index 10aae969..00000000 --- a/f2s/resources/api-proxy/meta.yaml +++ /dev/null @@ -1,19 +0,0 @@ -id: api-proxy -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - apache_ports: - value: null - fqdn: - value: null - master_ip: - value: null - max_header_size: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/ceilometer-compute/actions/run.pp b/f2s/resources/ceilometer-compute/actions/run.pp deleted file mode 100644 index 53fde2a5..00000000 --- a/f2s/resources/ceilometer-compute/actions/run.pp +++ /dev/null @@ -1,59 +0,0 @@ -notice('MODULAR: ceilometer/compute.pp') - -$use_syslog = hiera('use_syslog', true) -$use_stderr = hiera('use_stderr', false) -$syslog_log_facility = hiera('syslog_log_facility_ceilometer', 'LOG_LOCAL0') -$rabbit_hash = hiera_hash('rabbit_hash') -$management_vip = hiera('management_vip') -$service_endpoint = hiera('service_endpoint') - -$default_ceilometer_hash = { - 'enabled' => false, - 'db_password' => 'ceilometer', - 'user_password' => 'ceilometer', - 'metering_secret' => 'ceilometer', - 'http_timeout' => '600', - 'event_time_to_live' => '604800', - 'metering_time_to_live' => '604800', -} - -$region = hiera('region', 'RegionOne') -$ceilometer_hash = hiera_hash('ceilometer_hash', $default_ceilometer_hash) -$ceilometer_region = pick($ceilometer_hash['region'], $region) -$ceilometer_enabled = $ceilometer_hash['enabled'] -$amqp_password = $rabbit_hash['password'] -$amqp_user = $rabbit_hash['user'] -$ceilometer_user_password = $ceilometer_hash['user_password'] -$ceilometer_metering_secret = $ceilometer_hash['metering_secret'] -$verbose = pick($ceilometer_hash['verbose'], hiera('verbose', true)) -$debug = pick($ceilometer_hash['debug'], hiera('debug', false)) - -if ($ceilometer_enabled) { - class { 'openstack::ceilometer': - verbose => $verbose, - debug => $debug, - use_syslog => $use_syslog, - use_stderr => $use_stderr, - syslog_log_facility => $syslog_log_facility, - amqp_hosts => hiera('amqp_hosts',''), - amqp_user => $amqp_user, - amqp_password => $amqp_password, - keystone_user => $ceilometer_hash['user'], - keystone_tenant => $ceilometer_hash['tenant'], - keystone_region => $ceilometer_region, - keystone_host => $service_endpoint, - keystone_password => $ceilometer_user_password, - on_compute => true, - metering_secret => $ceilometer_metering_secret, - event_time_to_live => $ceilometer_hash['event_time_to_live'], - metering_time_to_live => $ceilometer_hash['metering_time_to_live'], - http_timeout => $ceilometer_hash['http_timeout'], - } - - # We need to restart nova-compute service in orderto apply new settings - include ::nova::params - service { 'nova-compute': - ensure => 'running', - name => $::nova::params::compute_service_name, - } -} diff --git a/f2s/resources/ceilometer-compute/meta.yaml b/f2s/resources/ceilometer-compute/meta.yaml deleted file mode 100644 index 512a5276..00000000 --- a/f2s/resources/ceilometer-compute/meta.yaml +++ /dev/null @@ -1,37 +0,0 @@ -id: ceilometer-compute -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - amqp_hosts: - value: null - ceilometer: - value: null - ceilometer_hash: - value: null - debug: - value: null - fqdn: - value: null - management_vip: - value: null - puppet_modules: - value: null - rabbit_hash: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - syslog_log_facility_ceilometer: - value: null - use_stderr: - value: null - use_syslog: - value: null - verbose: - value: null diff --git a/f2s/resources/ceilometer-controller/actions/run.pp b/f2s/resources/ceilometer-controller/actions/run.pp deleted file mode 100644 index 0a062b5c..00000000 --- a/f2s/resources/ceilometer-controller/actions/run.pp +++ /dev/null @@ -1,111 +0,0 @@ -notice('MODULAR: ceilometer/controller.pp') - -$default_ceilometer_hash = { - 'enabled' => false, - 'db_password' => 'ceilometer', - 'user_password' => 'ceilometer', - 'metering_secret' => 'ceilometer', - 'http_timeout' => '600', - 'event_time_to_live' => '604800', - 'metering_time_to_live' => '604800', -} - -$ceilometer_hash = hiera_hash('ceilometer', $default_ceilometer_hash) -$verbose = pick($ceilometer_hash['verbose'], hiera('verbose', true)) -$debug = pick($ceilometer_hash['debug'], hiera('debug', false)) -$use_syslog = hiera('use_syslog', true) -$use_stderr = hiera('use_stderr', false) -$syslog_log_facility = hiera('syslog_log_facility_ceilometer', 'LOG_LOCAL0') -$nodes_hash = hiera('nodes') -$storage_hash = hiera('storage') -$rabbit_hash = hiera_hash('rabbit_hash') -$management_vip = hiera('management_vip') -$region = hiera('region', 'RegionOne') -$ceilometer_region = pick($ceilometer_hash['region'], $region) -$mongo_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('mongo_roles')) -$mongo_address_map = get_node_to_ipaddr_map_by_network_role($mongo_nodes, 'mongo/db') - -$default_mongo_hash = { - 'enabled' => false, -} - -$mongo_hash = hiera_hash('mongo', $default_mongo_hash) - -if $mongo_hash['enabled'] and $ceilometer_hash['enabled'] { - $exteranl_mongo_hash = hiera_hash('external_mongo') - $ceilometer_db_user = $exteranl_mongo_hash['mongo_user'] - $ceilometer_db_password = $exteranl_mongo_hash['mongo_password'] - $ceilometer_db_dbname = $exteranl_mongo_hash['mongo_db_name'] - $external_mongo = true -} else { - $ceilometer_db_user = 'ceilometer' - $ceilometer_db_password = $ceilometer_hash['db_password'] - $ceilometer_db_dbname = 'ceilometer' - $external_mongo = false - $exteranl_mongo_hash = {} -} - -$ceilometer_enabled = $ceilometer_hash['enabled'] -$ceilometer_user_password = $ceilometer_hash['user_password'] -$ceilometer_metering_secret = $ceilometer_hash['metering_secret'] -$ceilometer_db_type = 'mongodb' -$swift_rados_backend = $storage_hash['objects_ceph'] -$amqp_password = $rabbit_hash['password'] -$amqp_user = $rabbit_hash['user'] -$rabbit_ha_queues = true -$service_endpoint = hiera('service_endpoint') -$ha_mode = pick($ceilometer_hash['ha_mode'], true) - -prepare_network_config(hiera('network_scheme', {})) -$api_bind_address = get_network_role_property('ceilometer/api', 'ipaddr') - -if $ceilometer_hash['enabled'] { - if $external_mongo { - $mongo_hosts = $exteranl_mongo_hash['hosts_ip'] - if $exteranl_mongo_hash['mongo_replset'] { - $mongo_replicaset = $exteranl_mongo_hash['mongo_replset'] - } else { - $mongo_replicaset = undef - } - } else { - $mongo_hosts = join(values($mongo_address_map), ',') - # MongoDB is alsways configured with replica set - $mongo_replicaset = 'ceilometer' - } -} - -############################################################################### - -if ($ceilometer_enabled) { - class { 'openstack::ceilometer': - verbose => $verbose, - debug => $debug, - use_syslog => $use_syslog, - use_stderr => $use_stderr, - syslog_log_facility => $syslog_log_facility, - db_type => $ceilometer_db_type, - db_host => $mongo_hosts, - db_user => $ceilometer_db_user, - db_password => $ceilometer_db_password, - db_dbname => $ceilometer_db_dbname, - swift_rados_backend => $swift_rados_backend, - metering_secret => $ceilometer_metering_secret, - amqp_hosts => hiera('amqp_hosts',''), - amqp_user => $amqp_user, - amqp_password => $amqp_password, - rabbit_ha_queues => $rabbit_ha_queues, - keystone_host => $service_endpoint, - keystone_password => $ceilometer_user_password, - keystone_user => $ceilometer_hash['user'], - keystone_tenant => $ceilometer_hash['tenant'], - keystone_region => $ceilometer_region, - host => $api_bind_address, - ha_mode => $ha_mode, - on_controller => true, - ext_mongo => $external_mongo, - mongo_replicaset => $mongo_replicaset, - event_time_to_live => $ceilometer_hash['event_time_to_live'], - metering_time_to_live => $ceilometer_hash['metering_time_to_live'], - http_timeout => $ceilometer_hash['http_timeout'], - } -} diff --git a/f2s/resources/ceilometer-controller/meta.yaml b/f2s/resources/ceilometer-controller/meta.yaml deleted file mode 100644 index 6e9749d4..00000000 --- a/f2s/resources/ceilometer-controller/meta.yaml +++ /dev/null @@ -1,47 +0,0 @@ -id: ceilometer-controller -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - ceilometer: - value: null - debug: - value: null - fqdn: - value: null - management_vip: - value: null - mongo: - value: null - mongo_roles: - value: null - network_metadata: - value: null - network_scheme: - value: null - nodes: - value: null - puppet_modules: - value: null - rabbit: - value: null - rabbit_hash: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - storage: - value: null - syslog_log_facility_ceilometer: - value: null - use_stderr: - value: null - use_syslog: - value: null - verbose: - value: null diff --git a/f2s/resources/ceilometer-keystone/actions/run.pp b/f2s/resources/ceilometer-keystone/actions/run.pp deleted file mode 100644 index c6ddaef1..00000000 --- a/f2s/resources/ceilometer-keystone/actions/run.pp +++ /dev/null @@ -1,41 +0,0 @@ -notice('MODULAR: ceilometer/keystone.pp') - -$ceilometer_hash = hiera_hash('ceilometer', {}) -$public_vip = hiera('public_vip') -$public_ssl_hash = hiera('public_ssl') -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} -$admin_address = hiera('management_vip') -$region = pick($ceilometer_hash['region'], hiera('region', 'RegionOne')) -$password = $ceilometer_hash['user_password'] -$auth_name = pick($ceilometer_hash['auth_name'], 'ceilometer') -$configure_endpoint = pick($ceilometer_hash['configure_endpoint'], true) -$configure_user = pick($ceilometer_hash['configure_user'], true) -$configure_user_role = pick($ceilometer_hash['configure_user_role'], true) -$service_name = pick($ceilometer_hash['service_name'], 'ceilometer') -$tenant = pick($ceilometer_hash['tenant'], 'services') - -validate_string($public_address) -validate_string($password) - -$public_url = "${public_protocol}://${public_address}:8777" -$admin_url = "http://${admin_address}:8777" - -class { '::ceilometer::keystone::auth': - password => $password, - auth_name => $auth_name, - configure_endpoint => $configure_endpoint, - configure_user => $configure_user, - configure_user_role => $configure_user_role, - service_name => $service_name, - public_url => $public_url, - internal_url => $admin_url, - admin_url => $admin_url, - region => $region, -} diff --git a/f2s/resources/ceilometer-keystone/meta.yaml b/f2s/resources/ceilometer-keystone/meta.yaml deleted file mode 100644 index e18c0932..00000000 --- a/f2s/resources/ceilometer-keystone/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: ceilometer-keystone -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - ceilometer: - value: null - fqdn: - value: null - management_vip: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null diff --git a/f2s/resources/ceilometer-radosgw-user/actions/run.pp b/f2s/resources/ceilometer-radosgw-user/actions/run.pp deleted file mode 100644 index 4d12f91b..00000000 --- a/f2s/resources/ceilometer-radosgw-user/actions/run.pp +++ /dev/null @@ -1,20 +0,0 @@ -notice('MODULAR: ceilometer/radosgw_user.pp') - -$default_ceilometer_hash = { - 'enabled' => false, -} - -$ceilometer_hash = hiera_hash('ceilometer', $default_ceilometer_hash) - -if $ceilometer_hash['enabled'] { - include ceilometer::params - - ceilometer_radosgw_user { 'ceilometer': - caps => {'buckets' => 'read', 'usage' => 'read'}, - } ~> - service { $::ceilometer::params::agent_central_service_name: - ensure => 'running', - enable => true, - provider => 'pacemaker', - } -} diff --git a/f2s/resources/ceilometer-radosgw-user/meta.yaml b/f2s/resources/ceilometer-radosgw-user/meta.yaml deleted file mode 100644 index e4cc3b05..00000000 --- a/f2s/resources/ceilometer-radosgw-user/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -id: ceilometer-radosgw-user -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - ceilometer: - value: null - fqdn: - value: null - puppet_modules: - value: null - role: - value: null - storage: - value: null diff --git a/f2s/resources/ceph-compute/actions/run.pp b/f2s/resources/ceph-compute/actions/run.pp deleted file mode 100644 index 757231e6..00000000 --- a/f2s/resources/ceph-compute/actions/run.pp +++ /dev/null @@ -1,97 +0,0 @@ -notice('MODULAR: ceph/ceph_compute.pp') - -$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public') -$storage_hash = hiera_hash('storage_hash', {}) -$use_neutron = hiera('use_neutron') -$public_vip = hiera('public_vip') -$management_vip = hiera('management_vip') -$use_syslog = hiera('use_syslog', true) -$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') -$keystone_hash = hiera_hash('keystone_hash', {}) -# Cinder settings -$cinder_pool = 'volumes' -# Glance settings -$glance_pool = 'images' -#Nova Compute settings -$compute_user = 'compute' -$compute_pool = 'compute' - - -if ($storage_hash['images_ceph']) { - $glance_backend = 'ceph' -} elsif ($storage_hash['images_vcenter']) { - $glance_backend = 'vmware' -} else { - $glance_backend = 'swift' -} - -if ($storage_hash['volumes_ceph'] or - $storage_hash['images_ceph'] or - $storage_hash['objects_ceph'] or - $storage_hash['ephemeral_ceph'] -) { - $use_ceph = true -} else { - $use_ceph = false -} - -if $use_ceph { - $ceph_primary_monitor_node = hiera('ceph_primary_monitor_node') - $primary_mons = keys($ceph_primary_monitor_node) - $primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name'] - - prepare_network_config(hiera_hash('network_scheme')) - $ceph_cluster_network = get_network_role_property('ceph/replication', 'network') - $ceph_public_network = get_network_role_property('ceph/public', 'network') - - class {'ceph': - primary_mon => $primary_mon, - mon_hosts => keys($mon_address_map), - mon_ip_addresses => values($mon_address_map), - cluster_node_address => $public_vip, - osd_pool_default_size => $storage_hash['osd_pool_size'], - osd_pool_default_pg_num => $storage_hash['pg_num'], - osd_pool_default_pgp_num => $storage_hash['pg_num'], - use_rgw => false, - glance_backend => $glance_backend, - rgw_pub_ip => $public_vip, - rgw_adm_ip => $management_vip, - rgw_int_ip => $management_vip, - cluster_network => $ceph_cluster_network, - public_network => $ceph_public_network, - use_syslog => $use_syslog, - syslog_log_level => hiera('syslog_log_level_ceph', 'info'), - syslog_log_facility => $syslog_log_facility_ceph, - rgw_keystone_admin_token => $keystone_hash['admin_token'], - ephemeral_ceph => $storage_hash['ephemeral_ceph'] - } - - - service { $::ceph::params::service_nova_compute :} - - ceph::pool {$compute_pool: - user => $compute_user, - acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}, allow rwx pool=${compute_pool}'", - keyring_owner => 'nova', - pg_num => $storage_hash['pg_num'], - pgp_num => $storage_hash['pg_num'], - } - - include ceph::nova_compute - - if ($storage_hash['ephemeral_ceph']) { - include ceph::ephemeral - Class['ceph::conf'] -> Class['ceph::ephemeral'] ~> - Service[$::ceph::params::service_nova_compute] - } - - Class['ceph::conf'] -> - Ceph::Pool[$compute_pool] -> - Class['ceph::nova_compute'] ~> - Service[$::ceph::params::service_nova_compute] - - Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ], - cwd => '/root', - } - -} diff --git a/f2s/resources/ceph-compute/meta.yaml b/f2s/resources/ceph-compute/meta.yaml deleted file mode 100644 index 31907d00..00000000 --- a/f2s/resources/ceph-compute/meta.yaml +++ /dev/null @@ -1,37 +0,0 @@ -id: ceph-compute -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - ceph_monitor_nodes: - value: null - ceph_primary_monitor_node: - value: null - fqdn: - value: null - keystone_hash: - value: null - management_vip: - value: null - network_scheme: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null - storage: - value: null - storage_hash: - value: null - syslog_log_facility_ceph: - value: null - syslog_log_level_ceph: - value: null - use_neutron: - value: null - use_syslog: - value: null diff --git a/f2s/resources/ceph-mon/actions/run.pp b/f2s/resources/ceph-mon/actions/run.pp deleted file mode 100644 index f9d66765..00000000 --- a/f2s/resources/ceph-mon/actions/run.pp +++ /dev/null @@ -1,95 +0,0 @@ -notice('MODULAR: ceph/mon.pp') - -$storage_hash = hiera('storage', {}) -$use_neutron = hiera('use_neutron') -$public_vip = hiera('public_vip') -$management_vip = hiera('management_vip') -$use_syslog = hiera('use_syslog', true) -$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') -$keystone_hash = hiera('keystone', {}) -$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public') - -if ($storage_hash['images_ceph']) { - $glance_backend = 'ceph' -} elsif ($storage_hash['images_vcenter']) { - $glance_backend = 'vmware' -} else { - $glance_backend = 'swift' -} - -if ($storage_hash['volumes_ceph'] or - $storage_hash['images_ceph'] or - $storage_hash['objects_ceph'] or - $storage_hash['ephemeral_ceph'] -) { - $use_ceph = true -} else { - $use_ceph = false -} - -if $use_ceph { - $ceph_primary_monitor_node = hiera('ceph_primary_monitor_node') - $primary_mons = keys($ceph_primary_monitor_node) - $primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name'] - - prepare_network_config(hiera_hash('network_scheme')) - $ceph_cluster_network = get_network_role_property('ceph/replication', 'network') - $ceph_public_network = get_network_role_property('ceph/public', 'network') - $mon_addr = get_network_role_property('ceph/public', 'ipaddr') - - class {'ceph': - primary_mon => $primary_mon, - mon_hosts => keys($mon_address_map), - mon_ip_addresses => values($mon_address_map), - mon_addr => $mon_addr, - cluster_node_address => $public_vip, - osd_pool_default_size => $storage_hash['osd_pool_size'], - osd_pool_default_pg_num => $storage_hash['pg_num'], - osd_pool_default_pgp_num => $storage_hash['pg_num'], - use_rgw => false, - glance_backend => $glance_backend, - rgw_pub_ip => $public_vip, - rgw_adm_ip => $management_vip, - rgw_int_ip => $management_vip, - cluster_network => $ceph_cluster_network, - public_network => $ceph_public_network, - use_syslog => $use_syslog, - syslog_log_level => hiera('syslog_log_level_ceph', 'info'), - syslog_log_facility => $syslog_log_facility_ceph, - rgw_keystone_admin_token => $keystone_hash['admin_token'], - ephemeral_ceph => $storage_hash['ephemeral_ceph'] - } - - if ($storage_hash['volumes_ceph']) { - include ::cinder::params - service { 'cinder-volume': - ensure => 'running', - name => $::cinder::params::volume_service, - hasstatus => true, - hasrestart => true, - } - - service { 'cinder-backup': - ensure => 'running', - name => $::cinder::params::backup_service, - hasstatus => true, - hasrestart => true, - } - - Class['ceph'] ~> Service['cinder-volume'] - Class['ceph'] ~> Service['cinder-backup'] - } - - if ($storage_hash['images_ceph']) { - include ::glance::params - service { 'glance-api': - ensure => 'running', - name => $::glance::params::api_service_name, - hasstatus => true, - hasrestart => true, - } - - Class['ceph'] ~> Service['glance-api'] - } - -} diff --git a/f2s/resources/ceph-mon/meta.yaml b/f2s/resources/ceph-mon/meta.yaml deleted file mode 100644 index b3cdbb68..00000000 --- a/f2s/resources/ceph-mon/meta.yaml +++ /dev/null @@ -1,35 +0,0 @@ -id: ceph-mon -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - ceph_monitor_nodes: - value: null - ceph_primary_monitor_node: - value: null - fqdn: - value: null - keystone: - value: null - management_vip: - value: null - network_scheme: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null - storage: - value: null - syslog_log_facility_ceph: - value: null - syslog_log_level_ceph: - value: null - use_neutron: - value: null - use_syslog: - value: null diff --git a/f2s/resources/ceph-radosgw/actions/run.pp b/f2s/resources/ceph-radosgw/actions/run.pp deleted file mode 100644 index cf5f131b..00000000 --- a/f2s/resources/ceph-radosgw/actions/run.pp +++ /dev/null @@ -1,103 +0,0 @@ -notice('MODULAR: ceph/radosgw.pp') - -$storage_hash = hiera('storage', {}) -$use_neutron = hiera('use_neutron') -$public_vip = hiera('public_vip') -$keystone_hash = hiera('keystone', {}) -$management_vip = hiera('management_vip') -$service_endpoint = hiera('service_endpoint') -$public_ssl_hash = hiera('public_ssl') -$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public') - -if ($storage_hash['volumes_ceph'] or - $storage_hash['images_ceph'] or - $storage_hash['objects_ceph'] -) { - $use_ceph = true -} else { - $use_ceph = false -} - -if $use_ceph and $storage_hash['objects_ceph'] { - $ceph_primary_monitor_node = hiera('ceph_primary_monitor_node') - $primary_mons = keys($ceph_primary_monitor_node) - $primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name'] - - prepare_network_config(hiera_hash('network_scheme')) - $ceph_cluster_network = get_network_role_property('ceph/replication', 'network') - $ceph_public_network = get_network_role_property('ceph/public', 'network') - $rgw_ip_address = get_network_role_property('ceph/radosgw', 'ipaddr') - - # Apache and listen ports - class { 'osnailyfacter::apache': - listen_ports => hiera_array('apache_ports', ['80', '8888']), - } - if ($::osfamily == 'Debian'){ - apache::mod {'rewrite': } - apache::mod {'fastcgi': } - } - include ::tweaks::apache_wrappers - include ceph::params - - $haproxy_stats_url = "http://${service_endpoint}:10000/;csv" - - haproxy_backend_status { 'keystone-admin' : - name => 'keystone-2', - count => '200', - step => '6', - url => $haproxy_stats_url, - } - - haproxy_backend_status { 'keystone-public' : - name => 'keystone-1', - count => '200', - step => '6', - url => $haproxy_stats_url, - } - - Haproxy_backend_status['keystone-admin'] -> Class ['ceph::keystone'] - Haproxy_backend_status['keystone-public'] -> Class ['ceph::keystone'] - - class { 'ceph::radosgw': - # SSL - use_ssl => false, - public_ssl => $public_ssl_hash['services'], - - # Ceph - primary_mon => $primary_mon, - pub_ip => $public_vip, - adm_ip => $management_vip, - int_ip => $management_vip, - - # RadosGW settings - rgw_host => $::hostname, - rgw_ip => $rgw_ip_address, - rgw_port => '6780', - swift_endpoint_port => '8080', - rgw_keyring_path => '/etc/ceph/keyring.radosgw.gateway', - rgw_socket_path => '/tmp/radosgw.sock', - rgw_log_file => '/var/log/ceph/radosgw.log', - rgw_data => '/var/lib/ceph/radosgw', - rgw_dns_name => "*.${::domain}", - rgw_print_continue => true, - - #rgw Keystone settings - rgw_use_pki => false, - rgw_use_keystone => true, - rgw_keystone_url => "${service_endpoint}:35357", - rgw_keystone_admin_token => $keystone_hash['admin_token'], - rgw_keystone_token_cache_size => '10', - rgw_keystone_accepted_roles => '_member_, Member, admin, swiftoperator', - rgw_keystone_revocation_interval => '1000000', - rgw_nss_db_path => '/etc/ceph/nss', - - #rgw Log settings - use_syslog => hiera('use_syslog', true), - syslog_facility => hiera('syslog_log_facility_ceph', 'LOG_LOCAL0'), - syslog_level => hiera('syslog_log_level_ceph', 'info'), - } - - Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ], - cwd => '/root', - } -} diff --git a/f2s/resources/ceph-radosgw/meta.yaml b/f2s/resources/ceph-radosgw/meta.yaml deleted file mode 100644 index 5c589f2f..00000000 --- a/f2s/resources/ceph-radosgw/meta.yaml +++ /dev/null @@ -1,29 +0,0 @@ -id: ceph-radosgw -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - ceph_monitor_nodes: - value: null - fqdn: - value: null - keystone: - value: null - management_vip: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null - service_endpoint: - value: null - storage: - value: null - use_neutron: - value: null diff --git a/f2s/resources/ceph_create_pools/actions/run.pp b/f2s/resources/ceph_create_pools/actions/run.pp deleted file mode 100644 index 6fdb2ee4..00000000 --- a/f2s/resources/ceph_create_pools/actions/run.pp +++ /dev/null @@ -1,80 +0,0 @@ -notice('MODULAR: ceph/ceph_pools') - -$storage_hash = hiera('storage', {}) -$osd_pool_default_pg_num = $storage_hash['pg_num'] -$osd_pool_default_pgp_num = $storage_hash['pg_num'] -# Cinder settings -$cinder_user = 'volumes' -$cinder_pool = 'volumes' -# Cinder Backup settings -$cinder_backup_user = 'backups' -$cinder_backup_pool = 'backups' -# Glance settings -$glance_user = 'images' -$glance_pool = 'images' - - -Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ], - cwd => '/root', -} - -# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279 -ceph::pool {$glance_pool: - user => $glance_user, - acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${glance_pool}'", - keyring_owner => 'glance', - pg_num => $osd_pool_default_pg_num, - pgp_num => $osd_pool_default_pg_num, -} - -ceph::pool {$cinder_pool: - user => $cinder_user, - acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}'", - keyring_owner => 'cinder', - pg_num => $osd_pool_default_pg_num, - pgp_num => $osd_pool_default_pg_num, -} - -ceph::pool {$cinder_backup_pool: - user => $cinder_backup_user, - acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_backup_pool}, allow rx pool=${cinder_pool}'", - keyring_owner => 'cinder', - pg_num => $osd_pool_default_pg_num, - pgp_num => $osd_pool_default_pg_num, -} - -Ceph::Pool[$glance_pool] -> Ceph::Pool[$cinder_pool] -> Ceph::Pool[$cinder_backup_pool] - -if ($storage_hash['volumes_ceph']) { - include ::cinder::params - service { 'cinder-volume': - ensure => 'running', - name => $::cinder::params::volume_service, - hasstatus => true, - hasrestart => true, - } - - Ceph::Pool[$cinder_pool] ~> Service['cinder-volume'] - - service { 'cinder-backup': - ensure => 'running', - name => $::cinder::params::backup_service, - hasstatus => true, - hasrestart => true, - } - - Ceph::Pool[$cinder_backup_pool] ~> Service['cinder-backup'] -} - -if ($storage_hash['images_ceph']) { - include ::glance::params - service { 'glance-api': - ensure => 'running', - name => $::glance::params::api_service_name, - hasstatus => true, - hasrestart => true, - } - - Ceph::Pool[$glance_pool] ~> Service['glance-api'] -} - diff --git a/f2s/resources/ceph_create_pools/meta.yaml b/f2s/resources/ceph_create_pools/meta.yaml deleted file mode 100644 index 9a8d7e57..00000000 --- a/f2s/resources/ceph_create_pools/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -id: ceph_create_pools -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null - storage: - value: null diff --git a/f2s/resources/cinder-db/actions/run.pp b/f2s/resources/cinder-db/actions/run.pp deleted file mode 100644 index e51e3383..00000000 --- a/f2s/resources/cinder-db/actions/run.pp +++ /dev/null @@ -1,53 +0,0 @@ -notice('MODULAR: cinder/db.pp') - -$cinder_hash = hiera_hash('cinder', {}) -$mysql_hash = hiera_hash('mysql_hash', {}) -$management_vip = hiera('management_vip', undef) -$database_vip = hiera('database_vip', undef) - -$mysql_root_user = pick($mysql_hash['root_user'], 'root') -$mysql_db_create = pick($mysql_hash['db_create'], true) -$mysql_root_password = $mysql_hash['root_password'] - -$db_user = pick($cinder_hash['db_user'], 'cinder') -$db_name = pick($cinder_hash['db_name'], 'cinder') -$db_password = pick($cinder_hash['db_password'], $mysql_root_password) - -$db_host = pick($cinder_hash['db_host'], $database_vip) -$db_create = pick($cinder_hash['db_create'], $mysql_db_create) -$db_root_user = pick($cinder_hash['root_user'], $mysql_root_user) -$db_root_password = pick($cinder_hash['root_password'], $mysql_root_password) - -$allowed_hosts = [ $::hostname, 'localhost', '127.0.0.1', '%' ] - -validate_string($mysql_root_user) - -if $db_create { - - class { 'galera::client': - custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), - } - - class { 'cinder::db::mysql': - user => $db_user, - password => $db_password, - dbname => $db_name, - allowed_hosts => $allowed_hosts, - } - - class { 'osnailyfacter::mysql_access': - db_host => $db_host, - db_user => $db_root_user, - db_password => $db_root_password, - } - - Class['galera::client'] -> - Class['osnailyfacter::mysql_access'] -> - Class['cinder::db::mysql'] - -} - -class mysql::config {} -include mysql::config -class mysql::server {} -include mysql::server diff --git a/f2s/resources/cinder-db/meta.yaml b/f2s/resources/cinder-db/meta.yaml deleted file mode 100644 index 72c5c1d5..00000000 --- a/f2s/resources/cinder-db/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: cinder-db -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - cinder: - value: null - database_vip: - value: null - fqdn: - value: null - management_vip: - value: null - mysql_custom_setup_class: - value: null - mysql_hash: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/cinder-keystone/actions/run.pp b/f2s/resources/cinder-keystone/actions/run.pp deleted file mode 100644 index 1b93e142..00000000 --- a/f2s/resources/cinder-keystone/actions/run.pp +++ /dev/null @@ -1,51 +0,0 @@ -notice('MODULAR: cinder/keystone.pp') - -$cinder_hash = hiera_hash('cinder', {}) -$public_ssl_hash = hiera('public_ssl') -$public_vip = hiera('public_vip') -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} -$admin_protocol = 'http' -$admin_address = hiera('management_vip') -$region = pick($cinder_hash['region'], hiera('region', 'RegionOne')) - -$password = $cinder_hash['user_password'] -$auth_name = pick($cinder_hash['auth_name'], 'cinder') -$configure_endpoint = pick($cinder_hash['configure_endpoint'], true) -$configure_user = pick($cinder_hash['configure_user'], true) -$configure_user_role = pick($cinder_hash['configure_user_role'], true) -$service_name = pick($cinder_hash['service_name'], 'cinder') -$tenant = pick($cinder_hash['tenant'], 'services') - -$port = '8776' - -$public_url = "${public_protocol}://${public_address}:${port}/v1/%(tenant_id)s" -$admin_url = "${admin_protocol}://${admin_address}:${port}/v1/%(tenant_id)s" - -$public_url_v2 = "${public_protocol}://${public_address}:${port}/v2/%(tenant_id)s" -$admin_url_v2 = "${admin_protocol}://${admin_address}:${port}/v2/%(tenant_id)s" - -validate_string($public_address) -validate_string($password) - -class { '::cinder::keystone::auth': - password => $password, - auth_name => $auth_name, - configure_endpoint => $configure_endpoint, - configure_user => $configure_user, - configure_user_role => $configure_user_role, - service_name => $service_name, - public_url => $public_url, - internal_url => $admin_url, - admin_url => $admin_url, - public_url_v2 => $public_url_v2, - internal_url_v2 => $admin_url_v2, - admin_url_v2 => $admin_url_v2, - region => $region, -} diff --git a/f2s/resources/cinder-keystone/meta.yaml b/f2s/resources/cinder-keystone/meta.yaml deleted file mode 100644 index 7c15a73d..00000000 --- a/f2s/resources/cinder-keystone/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: cinder-keystone -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - cinder: - value: null - fqdn: - value: null - management_vip: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null diff --git a/f2s/resources/cluster-haproxy/actions/run.pp b/f2s/resources/cluster-haproxy/actions/run.pp deleted file mode 100644 index 9c604867..00000000 --- a/f2s/resources/cluster-haproxy/actions/run.pp +++ /dev/null @@ -1,20 +0,0 @@ -notice('MODULAR: cluster-haproxy.pp') - -$network_scheme = hiera('network_scheme', {}) -$management_vip = hiera('management_vip') -$database_vip = hiera('database_vip', '') -$service_endpoint = hiera('service_endpoint', '') -$primary_controller = hiera('primary_controller') -$haproxy_hash = hiera_hash('haproxy', {}) - -#FIXME(mattymo): Replace with only VIPs for roles assigned to this node -$stats_ipaddresses = delete_undef_values([$management_vip, $database_vip, $service_endpoint, '127.0.0.1']) - -class { 'cluster::haproxy': - haproxy_maxconn => '16000', - haproxy_bufsize => '32768', - primary_controller => $primary_controller, - debug => pick($haproxy_hash['debug'], hiera('debug', false)), - other_networks => direct_networks($network_scheme['endpoints']), - stats_ipaddresses => $stats_ipaddresses -} diff --git a/f2s/resources/cluster-haproxy/meta.yaml b/f2s/resources/cluster-haproxy/meta.yaml deleted file mode 100644 index 9552c31f..00000000 --- a/f2s/resources/cluster-haproxy/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -id: cluster-haproxy -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - database_vip: - value: null - debug: - value: null - fqdn: - value: null - haproxy: - value: null - management_vip: - value: null - network_scheme: - value: null - primary_controller: - value: null - puppet_modules: - value: null - role: - value: null - service_endpoint: - value: null diff --git a/f2s/resources/cluster-vrouter/actions/run.pp b/f2s/resources/cluster-vrouter/actions/run.pp deleted file mode 100644 index 09125d94..00000000 --- a/f2s/resources/cluster-vrouter/actions/run.pp +++ /dev/null @@ -1,7 +0,0 @@ -notice('MODULAR: cluster-vrouter.pp') - -$network_scheme = hiera('network_scheme', {}) - -class { 'cluster::vrouter_ocf': - other_networks => direct_networks($network_scheme['endpoints']), -} diff --git a/f2s/resources/cluster-vrouter/meta.yaml b/f2s/resources/cluster-vrouter/meta.yaml deleted file mode 100644 index 7f302dab..00000000 --- a/f2s/resources/cluster-vrouter/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -id: cluster-vrouter -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - network_scheme: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/cluster/actions/run.pp b/f2s/resources/cluster/actions/run.pp deleted file mode 100644 index 87aa1242..00000000 --- a/f2s/resources/cluster/actions/run.pp +++ /dev/null @@ -1,49 +0,0 @@ -notice('MODULAR: cluster.pp') - -if !(hiera('role') in hiera('corosync_roles')) { - fail('The node role is not in corosync roles') -} - -prepare_network_config(hiera_hash('network_scheme')) - -$corosync_nodes = corosync_nodes( - get_nodes_hash_by_roles( - hiera_hash('network_metadata'), - hiera('corosync_roles') - ), - 'mgmt/corosync' -) - -class { 'cluster': - internal_address => get_network_role_property('mgmt/corosync', 'ipaddr'), - corosync_nodes => $corosync_nodes, -} - -pcmk_nodes { 'pacemaker' : - nodes => $corosync_nodes, - add_pacemaker_nodes => false, -} - -Service <| title == 'corosync' |> { - subscribe => File['/etc/corosync/service.d'], - require => File['/etc/corosync/corosync.conf'], -} - -Service['corosync'] -> Pcmk_nodes<||> -Pcmk_nodes<||> -> Service<| provider == 'pacemaker' |> - -# Sometimes during first start pacemaker can not connect to corosync -# via IPC due to pacemaker and corosync processes are run under different users -if($::operatingsystem == 'Ubuntu') { - $pacemaker_run_uid = 'hacluster' - $pacemaker_run_gid = 'haclient' - - file {'/etc/corosync/uidgid.d/pacemaker': - content =>"uidgid { - uid: ${pacemaker_run_uid} - gid: ${pacemaker_run_gid} -}" - } - - File['/etc/corosync/corosync.conf'] -> File['/etc/corosync/uidgid.d/pacemaker'] -> Service <| title == 'corosync' |> -} diff --git a/f2s/resources/cluster/meta.yaml b/f2s/resources/cluster/meta.yaml deleted file mode 100644 index b029a5ba..00000000 --- a/f2s/resources/cluster/meta.yaml +++ /dev/null @@ -1,19 +0,0 @@ -id: cluster -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - corosync_roles: - value: null - fqdn: - value: null - network_metadata: - value: null - network_scheme: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/cluster_health/actions/run.pp b/f2s/resources/cluster_health/actions/run.pp deleted file mode 100644 index d6906cd6..00000000 --- a/f2s/resources/cluster_health/actions/run.pp +++ /dev/null @@ -1,20 +0,0 @@ -notice('MODULAR: cluster/health.pp') - -if !(hiera('role') in hiera('corosync_roles')) { - fail('The node role is not in corosync roles') -} - -# load the mounted filesystems from our custom fact, remove boot -$mount_points = delete(split($::mounts, ','), '/boot') - -$disks = hiera('corosync_disks', $mount_points) -$min_disk_free = hiera('corosync_min_disk_space', '512M') -$disk_unit = hiera('corosync_disk_unit', 'M') -$monitor_interval = hiera('corosync_disk_monitor_interval', '15s') - -class { 'cluster::sysinfo': - disks => $disks, - min_disk_free => $min_disk_free, - disk_unit => $disk_unit, - monitor_interval => $monitor_interval, -} diff --git a/f2s/resources/cluster_health/meta.yaml b/f2s/resources/cluster_health/meta.yaml deleted file mode 100644 index 2e759c9a..00000000 --- a/f2s/resources/cluster_health/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -id: cluster_health -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - corosync_disk_monitor: - value: null - corosync_disk_monitor_interval: - value: null - corosync_disk_unit: - value: null - corosync_disks: - value: null - corosync_min_disk_space: - value: null - corosync_monitor_interval: - value: null - corosync_roles: - value: null - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/configure_default_route/actions/run.pp b/f2s/resources/configure_default_route/actions/run.pp deleted file mode 100644 index 775cc48e..00000000 --- a/f2s/resources/configure_default_route/actions/run.pp +++ /dev/null @@ -1,11 +0,0 @@ -notice('MODULAR: configure_default_route.pp') - -$network_scheme = hiera('network_scheme') -$management_vrouter_vip = hiera('management_vrouter_vip') - -prepare_network_config($network_scheme) -$management_int = get_network_role_property('management', 'interface') -$fw_admin_int = get_network_role_property('fw-admin', 'interface') -$ifconfig = configure_default_route($network_scheme, $management_vrouter_vip, $fw_admin_int, $management_int ) - -notice ($ifconfig) diff --git a/f2s/resources/configure_default_route/meta.yaml b/f2s/resources/configure_default_route/meta.yaml deleted file mode 100644 index dfc00382..00000000 --- a/f2s/resources/configure_default_route/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -id: configure_default_route -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vrouter_vip: - value: null - network_scheme: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/connectivity_tests/actions/run.pp b/f2s/resources/connectivity_tests/actions/run.pp deleted file mode 100644 index 54296d8b..00000000 --- a/f2s/resources/connectivity_tests/actions/run.pp +++ /dev/null @@ -1,5 +0,0 @@ -notice('MODULAR: connectivity_tests.pp') -# Pull the list of repos from hiera -$repo_setup = hiera('repo_setup') -# test that the repos are accessible -url_available($repo_setup['repos']) diff --git a/f2s/resources/connectivity_tests/meta.yaml b/f2s/resources/connectivity_tests/meta.yaml deleted file mode 100644 index 31564b65..00000000 --- a/f2s/resources/connectivity_tests/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -id: connectivity_tests -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - repo_setup: - value: null - role: - value: null diff --git a/f2s/resources/conntrackd/actions/run.pp b/f2s/resources/conntrackd/actions/run.pp deleted file mode 100644 index 360d98ff..00000000 --- a/f2s/resources/conntrackd/actions/run.pp +++ /dev/null @@ -1,79 +0,0 @@ -notice('MODULAR: conntrackd.pp') - -prepare_network_config(hiera('network_scheme', {})) -$vrouter_name = hiera('vrouter_name', 'pub') - -case $operatingsystem { - Centos: { $conntrackd_package = 'conntrack-tools' } - Ubuntu: { $conntrackd_package = 'conntrackd' } -} - - -### CONNTRACKD for CentOS 6 doesn't work under namespaces ## - -if $operatingsystem == 'Ubuntu' { - $bind_address = get_network_role_property('mgmt/vip', 'ipaddr') - $mgmt_bridge = get_network_role_property('mgmt/vip', 'interface') - - package { $conntrackd_package: - ensure => installed, - } -> - - file { '/etc/conntrackd/conntrackd.conf': - content => template('cluster/conntrackd.conf.erb'), - } -> - - cs_resource {'p_conntrackd': - ensure => present, - primitive_class => 'ocf', - provided_by => 'fuel', - primitive_type => 'ns_conntrackd', - metadata => { - 'migration-threshold' => 'INFINITY', - 'failure-timeout' => '180s' - }, - parameters => { - 'bridge' => $mgmt_bridge, - }, - complex_type => 'master', - ms_metadata => { - 'notify' => 'true', - 'ordered' => 'false', - 'interleave' => 'true', - 'clone-node-max' => '1', - 'master-max' => '1', - 'master-node-max' => '1', - 'target-role' => 'Master' - }, - operations => { - 'monitor' => { - 'interval' => '30', - 'timeout' => '60' - }, - 'monitor:Master' => { - 'role' => 'Master', - 'interval' => '27', - 'timeout' => '60' - }, - }, - } - - cs_colocation { "conntrackd-with-${vrouter_name}-vip": - primitives => [ 'master_p_conntrackd:Master', "vip__vrouter_${vrouter_name}" ], - } - - File['/etc/conntrackd/conntrackd.conf'] -> Cs_resource['p_conntrackd'] -> Service['p_conntrackd'] -> Cs_colocation["conntrackd-with-${vrouter_name}-vip"] - - service { 'p_conntrackd': - ensure => 'running', - enable => true, - provider => 'pacemaker', - } - - # Workaround to ensure log is rotated properly - file { '/etc/logrotate.d/conntrackd': - content => template('openstack/95-conntrackd.conf.erb'), - } - - Package[$conntrackd_package] -> File['/etc/logrotate.d/conntrackd'] -} diff --git a/f2s/resources/conntrackd/meta.yaml b/f2s/resources/conntrackd/meta.yaml deleted file mode 100644 index 5debbebe..00000000 --- a/f2s/resources/conntrackd/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -id: conntrackd -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - network_scheme: - value: null - puppet_modules: - value: null - role: - value: null - vrouter_name: - value: null diff --git a/f2s/resources/controller_remaining_tasks/actions/run.pp b/f2s/resources/controller_remaining_tasks/actions/run.pp deleted file mode 100644 index d20ddc5d..00000000 --- a/f2s/resources/controller_remaining_tasks/actions/run.pp +++ /dev/null @@ -1,49 +0,0 @@ -notice('MODULAR: controller.pp') - -# Pulling hiera -$primary_controller = hiera('primary_controller') -$neutron_mellanox = hiera('neutron_mellanox', false) -$use_neutron = hiera('use_neutron', false) - -# Do the stuff -if $neutron_mellanox { - $mellanox_mode = $neutron_mellanox['plugin'] -} else { - $mellanox_mode = 'disabled' -} - -if $primary_controller { - if ($mellanox_mode == 'ethernet') { - $test_vm_pkg = 'cirros-testvm-mellanox' - } else { - $test_vm_pkg = 'cirros-testvm' - } - package { 'cirros-testvm' : - ensure => 'installed', - name => $test_vm_pkg, - } -} - -Exec { logoutput => true } - -if ($::mellanox_mode == 'ethernet') { - $ml2_eswitch = $neutron_mellanox['ml2_eswitch'] - class { 'mellanox_openstack::controller': - eswitch_vnic_type => $ml2_eswitch['vnic_type'], - eswitch_apply_profile_patch => $ml2_eswitch['apply_profile_patch'], - } -} - -# NOTE(bogdando) for nodes with pacemaker, we should use OCF instead of monit - -# BP https://blueprints.launchpad.net/mos/+spec/include-openstackclient -package { 'python-openstackclient' : - ensure => installed, -} - -# Reduce swapiness on controllers, see LP#1413702 -sysctl::value { 'vm.swappiness': - value => '10' -} - -# vim: set ts=2 sw=2 et : diff --git a/f2s/resources/controller_remaining_tasks/meta.yaml b/f2s/resources/controller_remaining_tasks/meta.yaml deleted file mode 100644 index c7437a01..00000000 --- a/f2s/resources/controller_remaining_tasks/meta.yaml +++ /dev/null @@ -1,19 +0,0 @@ -id: controller_remaining_tasks -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - neutron_mellanox: - value: null - primary_controller: - value: null - puppet_modules: - value: null - role: - value: null - use_neutron: - value: null diff --git a/f2s/resources/database/actions/run.pp b/f2s/resources/database/actions/run.pp deleted file mode 100644 index b056e3fb..00000000 --- a/f2s/resources/database/actions/run.pp +++ /dev/null @@ -1,132 +0,0 @@ -notice('MODULAR: database.pp') - -prepare_network_config(hiera('network_scheme', {})) -$use_syslog = hiera('use_syslog', true) -$primary_controller = hiera('primary_controller') -$mysql_hash = hiera_hash('mysql', {}) -$management_vip = hiera('management_vip') -$database_vip = hiera('database_vip', $management_vip) - -$network_scheme = hiera('network_scheme', {}) -$mgmt_iface = get_network_role_property('mgmt/database', 'interface') -$direct_networks = split(direct_networks($network_scheme['endpoints'], $mgmt_iface, 'netmask'), ' ') -$access_networks = flatten(['localhost', '127.0.0.1', '240.0.0.0/255.255.0.0', $direct_networks]) - -$haproxy_stats_port = '10000' -$haproxy_stats_url = "http://${database_vip}:${haproxy_stats_port}/;csv" - -$mysql_database_password = $mysql_hash['root_password'] -$enabled = pick($mysql_hash['enabled'], true) - -$galera_node_address = get_network_role_property('mgmt/database', 'ipaddr') -$galera_nodes = values(get_node_to_ipaddr_map_by_network_role(hiera_hash('database_nodes'), 'mgmt/database')) -$galera_primary_controller = hiera('primary_database', $primary_controller) -$mysql_bind_address = '0.0.0.0' -$galera_cluster_name = 'openstack' - -$mysql_skip_name_resolve = true -$custom_setup_class = hiera('mysql_custom_setup_class', 'galera') - -# Get galera gcache factor based on cluster node's count -$galera_gcache_factor = count(unique(filter_hash(hiera('nodes', []), 'uid'))) - -$status_user = 'clustercheck' -$status_password = $mysql_hash['wsrep_password'] -$backend_port = '3307' -$backend_timeout = '10' - -############################################################################# -validate_string($status_password) -validate_string($mysql_database_password) -validate_string($status_password) - -if $enabled { - - if $custom_setup_class { - file { '/etc/mysql/my.cnf': - ensure => absent, - require => Class['mysql::server'] - } - $config_hash_real = { - 'config_file' => '/etc/my.cnf' - } - } else { - $config_hash_real = { } - } - - if '/var/lib/mysql' in split($::mounts, ',') { - $ignore_db_dirs = ['lost+found'] - } else { - $ignore_db_dirs = [] - } - - class { 'mysql::server': - bind_address => '0.0.0.0', - etc_root_password => true, - root_password => $mysql_database_password, - old_root_password => '', - galera_cluster_name => $galera_cluster_name, - primary_controller => $galera_primary_controller, - galera_node_address => $galera_node_address, - galera_nodes => $galera_nodes, - galera_gcache_factor => $galera_gcache_factor, - enabled => $enabled, - custom_setup_class => $custom_setup_class, - mysql_skip_name_resolve => $mysql_skip_name_resolve, - use_syslog => $use_syslog, - config_hash => $config_hash_real, - ignore_db_dirs => $ignore_db_dirs, - } - - class { 'osnailyfacter::mysql_user': - password => $mysql_database_password, - access_networks => $access_networks, - } - - exec { 'initial_access_config': - command => '/bin/ln -sf /etc/mysql/conf.d/password.cnf /root/.my.cnf', - } - - if ($custom_mysql_setup_class == 'percona_packages' and $::osfamily == 'RedHat') { - # This is a work around to prevent the conflict between the - # MySQL-shared-wsrep package (included as a dependency for MySQL-python) and - # the Percona shared package Percona-XtraDB-Cluster-shared-56. They both - # provide the libmysql client libraries. Since we are requiring the - # installation of the Percona package here before mysql::python, the python - # client is happy and the server installation won't fail due to the - # installation of our shared package - package { 'Percona-XtraDB-Cluster-shared-56': - ensure => 'present', - before => Class['mysql::python'], - } - } - - $management_networks = get_routable_networks_for_network_role($network_scheme, 'mgmt/database', ' ') - - class { 'openstack::galera::status': - status_user => $status_user, - status_password => $status_password, - status_allow => $galera_node_address, - backend_host => $galera_node_address, - backend_port => $backend_port, - backend_timeout => $backend_timeout, - only_from => "127.0.0.1 240.0.0.2 ${management_networks}", - } - - haproxy_backend_status { 'mysql': - name => 'mysqld', - url => $haproxy_stats_url, - } - - class { 'osnailyfacter::mysql_access': - db_password => $mysql_database_password, - } - - Class['mysql::server'] -> - Class['osnailyfacter::mysql_user'] -> - Exec['initial_access_config'] -> - Class['openstack::galera::status'] -> - Haproxy_backend_status['mysql'] -> - Class['osnailyfacter::mysql_access'] - -} diff --git a/f2s/resources/database/meta.yaml b/f2s/resources/database/meta.yaml deleted file mode 100644 index e2cd878e..00000000 --- a/f2s/resources/database/meta.yaml +++ /dev/null @@ -1,33 +0,0 @@ -id: database -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - database_nodes: - value: null - database_vip: - value: null - fqdn: - value: null - management_vip: - value: null - mysql: - value: null - mysql_custom_setup_class: - value: null - network_scheme: - value: null - nodes: - value: null - primary_controller: - value: null - primary_database: - value: null - puppet_modules: - value: null - role: - value: null - use_syslog: - value: null diff --git a/f2s/resources/disable_keystone_service_token/actions/run.pp b/f2s/resources/disable_keystone_service_token/actions/run.pp deleted file mode 100644 index 2708a261..00000000 --- a/f2s/resources/disable_keystone_service_token/actions/run.pp +++ /dev/null @@ -1,41 +0,0 @@ -notice('MODULAR: service_token_off.pp') - -#################################################################### -# Used as singular by post-deployment action to disable admin_token -# - -$keystone_params = hiera_hash('keystone_hash', {}) - -if $keystone_params['service_token_off'] { - - include ::keystone::params - include ::tweaks::apache_wrappers - - keystone_config { - 'DEFAULT/admin_token': ensure => absent; - } - - # Get paste.ini source - $keystone_paste_ini = $::keystone::params::paste_config ? { - undef => '/etc/keystone/keystone-paste.ini', - default => $::keystone::params::paste_config, - } - - # Remove admin_token_auth middleware from public/admin/v3 pipelines - exec { 'remove_admin_token_auth_middleware': - path => ['/bin', '/usr/bin'], - command => "sed -i.dist 's/ admin_token_auth//' $keystone_paste_ini", - onlyif => "fgrep -q ' admin_token_auth' $keystone_paste_ini", - } - - service { 'httpd': - ensure => 'running', - name => $::tweaks::apache_wrappers::service_name, - enable => true, - } - - # Restart service that changes to take effect - Keystone_config<||> ~> Service['httpd'] - Exec['remove_admin_token_auth_middleware'] ~> Service['httpd'] - -} diff --git a/f2s/resources/disable_keystone_service_token/meta.yaml b/f2s/resources/disable_keystone_service_token/meta.yaml deleted file mode 100644 index ae29a628..00000000 --- a/f2s/resources/disable_keystone_service_token/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -id: disable_keystone_service_token -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - keystone_hash: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/dns-client/actions/run.pp b/f2s/resources/dns-client/actions/run.pp deleted file mode 100644 index f7207b37..00000000 --- a/f2s/resources/dns-client/actions/run.pp +++ /dev/null @@ -1,8 +0,0 @@ -notice('MODULAR: dns-client.pp') - -$management_vip = hiera('management_vrouter_vip') - -class { 'osnailyfacter::resolvconf': - management_vip => $management_vip, -} - diff --git a/f2s/resources/dns-client/meta.yaml b/f2s/resources/dns-client/meta.yaml deleted file mode 100644 index f1f9d6bc..00000000 --- a/f2s/resources/dns-client/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -id: dns-client -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vrouter_vip: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/dns-server/actions/run.pp b/f2s/resources/dns-server/actions/run.pp deleted file mode 100644 index 54f0ad75..00000000 --- a/f2s/resources/dns-server/actions/run.pp +++ /dev/null @@ -1,16 +0,0 @@ -notice('MODULAR: dns-server.pp') - -$dns_servers = hiera('external_dns') -$primary_controller = hiera('primary_controller') -$master_ip = hiera('master_ip') -$management_vrouter_vip = hiera('management_vrouter_vip') - -class { 'osnailyfacter::dnsmasq': - external_dns => strip(split($dns_servers['dns_list'], ',')), - master_ip => $master_ip, - management_vrouter_vip => $management_vrouter_vip, -} -> - -class { 'cluster::dns_ocf': - primary_controller => $primary_controller, -} diff --git a/f2s/resources/dns-server/meta.yaml b/f2s/resources/dns-server/meta.yaml deleted file mode 100644 index 24ee68a1..00000000 --- a/f2s/resources/dns-server/meta.yaml +++ /dev/null @@ -1,21 +0,0 @@ -id: dns-server -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - external_dns: - value: null - fqdn: - value: null - management_vrouter_vip: - value: null - master_ip: - value: null - primary_controller: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/dump_rabbitmq_definitions/actions/run.pp b/f2s/resources/dump_rabbitmq_definitions/actions/run.pp deleted file mode 100644 index 1ddca51b..00000000 --- a/f2s/resources/dump_rabbitmq_definitions/actions/run.pp +++ /dev/null @@ -1,33 +0,0 @@ -notice('MODULAR: dump_rabbitmq_definitions.pp') - -$definitions_dump_file = '/etc/rabbitmq/definitions' -$original_definitions_dump_file = '/etc/rabbitmq/definitions.full' -$rabbit_hash = hiera_hash('rabbit_hash', - { - 'user' => false, - 'password' => false, - } - ) -$rabbit_enabled = pick($rabbit_hash['enabled'], true) - - -if ($rabbit_enabled) { - $rabbit_api_endpoint = 'http://localhost:15672/api/definitions' - $rabbit_credentials = "${rabbit_hash['user']}:${rabbit_hash['password']}" - - exec { 'rabbitmq-dump-definitions': - path => ['/usr/bin', '/usr/sbin', '/sbin', '/bin'], - command => "curl -u ${rabbit_credentials} ${rabbit_api_endpoint} -o ${original_definitions_dump_file}", - }-> - exec { 'rabbitmq-dump-clean': - path => ['/usr/bin', '/usr/sbin', '/sbin', '/bin'], - command => "rabbitmq-dump-clean.py < ${original_definitions_dump_file} > ${definitions_dump_file}", - } - - file { [$definitions_dump_file, $original_definitions_dump_file]: - ensure => file, - owner => 'root', - group => 'root', - mode => '0600', - } -} diff --git a/f2s/resources/dump_rabbitmq_definitions/meta.yaml b/f2s/resources/dump_rabbitmq_definitions/meta.yaml deleted file mode 100644 index 93ece163..00000000 --- a/f2s/resources/dump_rabbitmq_definitions/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -id: dump_rabbitmq_definitions -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - rabbit_hash: - value: null - role: - value: null diff --git a/f2s/resources/enable_cinder_volume_service/actions/run.pp b/f2s/resources/enable_cinder_volume_service/actions/run.pp deleted file mode 100644 index 3dbe8e63..00000000 --- a/f2s/resources/enable_cinder_volume_service/actions/run.pp +++ /dev/null @@ -1,10 +0,0 @@ -include cinder::params - -$volume_service = $::cinder::params::volume_service - -service { $volume_service: - ensure => running, - enable => true, - hasstatus => true, - hasrestart => true, -} diff --git a/f2s/resources/enable_cinder_volume_service/meta.yaml b/f2s/resources/enable_cinder_volume_service/meta.yaml deleted file mode 100644 index ce45311b..00000000 --- a/f2s/resources/enable_cinder_volume_service/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: enable_cinder_volume_service -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/enable_nova_compute_service/actions/run.pp b/f2s/resources/enable_nova_compute_service/actions/run.pp deleted file mode 100644 index 8738f70e..00000000 --- a/f2s/resources/enable_nova_compute_service/actions/run.pp +++ /dev/null @@ -1,10 +0,0 @@ -include nova::params - -$compute_service_name = $::nova::params::compute_service_name - -service { $compute_service_name: - ensure => running, - enable => true, - hasstatus => true, - hasrestart => true, -} diff --git a/f2s/resources/enable_nova_compute_service/meta.yaml b/f2s/resources/enable_nova_compute_service/meta.yaml deleted file mode 100644 index 37cca6e9..00000000 --- a/f2s/resources/enable_nova_compute_service/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: enable_nova_compute_service -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/enable_rados/actions/run.pp b/f2s/resources/enable_rados/actions/run.pp deleted file mode 100644 index e9ebbab5..00000000 --- a/f2s/resources/enable_rados/actions/run.pp +++ /dev/null @@ -1,17 +0,0 @@ -include ::ceph::params - -$radosgw_service = $::ceph::params::service_radosgw - -# ensure the service is running and will start on boot -service { $radosgw_service: - ensure => running, - enable => true, -} - -# The Ubuntu upstart script is incompatible with the upstart provider -# This will force the service to fall back to the debian init script -if ($::operatingsystem == 'Ubuntu') { - Service['radosgw'] { - provider => 'debian' - } -} diff --git a/f2s/resources/enable_rados/meta.yaml b/f2s/resources/enable_rados/meta.yaml deleted file mode 100644 index 1af87035..00000000 --- a/f2s/resources/enable_rados/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: enable_rados -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/firewall/actions/run.pp b/f2s/resources/firewall/actions/run.pp deleted file mode 100644 index ffac1cd8..00000000 --- a/f2s/resources/firewall/actions/run.pp +++ /dev/null @@ -1,133 +0,0 @@ -notice('MODULAR: firewall.pp') - -$network_scheme = hiera_hash('network_scheme') -$ironic_hash = hiera_hash('ironic', {}) - -# Workaround for fuel bug with firewall -firewall {'003 remote rabbitmq ': - sport => [ 4369, 5672, 41055, 55672, 61613 ], - source => hiera('master_ip'), - proto => 'tcp', - action => 'accept', - require => Class['openstack::firewall'], -} - -firewall {'004 remote puppet ': - sport => [ 8140 ], - source => hiera('master_ip'), - proto => 'tcp', - action => 'accept', - require => Class['openstack::firewall'], -} - -# allow local rabbitmq admin traffic for LP#1383258 -firewall {'005 local rabbitmq admin': - sport => [ 15672 ], - iniface => 'lo', - proto => 'tcp', - action => 'accept', - require => Class['openstack::firewall'], -} - -# reject all non-local rabbitmq admin traffic for LP#1450443 -firewall {'006 reject non-local rabbitmq admin': - sport => [ 15672 ], - proto => 'tcp', - action => 'drop', - require => Class['openstack::firewall'], -} - -# allow connections from haproxy namespace -firewall {'030 allow connections from haproxy namespace': - source => '240.0.0.2', - action => 'accept', - require => Class['openstack::firewall'], -} - -prepare_network_config(hiera_hash('network_scheme')) -class { 'openstack::firewall' : - nova_vnc_ip_range => get_routable_networks_for_network_role($network_scheme, 'nova/api'), - nova_api_ip_range => get_network_role_property('nova/api', 'network'), - libvirt_network => get_network_role_property('management', 'network'), - keystone_network => get_network_role_property('keystone/api', 'network'), - iscsi_ip => get_network_role_property('cinder/iscsi', 'ipaddr'), -} - -if $ironic_hash['enabled'] { - $nodes_hash = hiera('nodes', {}) - $roles = node_roles($nodes_hash, hiera('uid')) - $network_metadata = hiera_hash('network_metadata', {}) - $baremetal_int = get_network_role_property('ironic/baremetal', 'interface') - $baremetal_vip = $network_metadata['vips']['baremetal']['ipaddr'] - $baremetal_ipaddr = get_network_role_property('ironic/baremetal', 'ipaddr') - $baremetal_network = get_network_role_property('ironic/baremetal', 'network') - - firewallchain { 'baremetal:filter:IPv4': - ensure => present, - } -> - firewall { '999 drop all baremetal': - chain => 'baremetal', - action => 'drop', - proto => 'all', - } -> - firewall {'00 baremetal-filter': - proto => 'all', - iniface => $baremetal_int, - jump => 'baremetal', - require => Class['openstack::firewall'], - } - - if member($roles, 'controller') or member($roles, 'primary-controller') { - firewall { '100 allow baremetal ping from VIP': - chain => 'baremetal', - source => $baremetal_vip, - destination => $baremetal_ipaddr, - proto => 'icmp', - icmp => 'echo-request', - action => 'accept', - } - firewall { '207 ironic-api' : - dport => '6385', - proto => 'tcp', - action => 'accept', - } - } - - if member($roles, 'ironic') { - firewall { '101 allow baremetal-related': - chain => 'baremetal', - source => $baremetal_network, - destination => $baremetal_ipaddr, - proto => 'all', - state => ['RELATED', 'ESTABLISHED'], - action => 'accept', - } - - firewall { '102 allow baremetal-rsyslog': - chain => 'baremetal', - source => $baremetal_network, - destination => $baremetal_ipaddr, - proto => 'udp', - dport => '514', - action => 'accept', - } - - firewall { '103 allow baremetal-TFTP': - chain => 'baremetal', - source => $baremetal_network, - destination => $baremetal_ipaddr, - proto => 'udp', - dport => '69', - action => 'accept', - } - - k_mod {'nf_conntrack_tftp': - ensure => 'present' - } - - file_line {'nf_conntrack_tftp_on_boot': - path => '/etc/modules', - line => 'nf_conntrack_tftp', - } - } -} diff --git a/f2s/resources/firewall/meta.yaml b/f2s/resources/firewall/meta.yaml deleted file mode 100644 index 50a062b8..00000000 --- a/f2s/resources/firewall/meta.yaml +++ /dev/null @@ -1,19 +0,0 @@ -id: firewall -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - ironic: - value: null - master_ip: - value: null - network_scheme: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/fuel_pkgs/actions/run.pp b/f2s/resources/fuel_pkgs/actions/run.pp deleted file mode 100644 index d425f78a..00000000 --- a/f2s/resources/fuel_pkgs/actions/run.pp +++ /dev/null @@ -1,10 +0,0 @@ -notice('MODULAR: fuel_pkgs.pp') - -$fuel_packages = [ - 'fuel-ha-utils', - 'fuel-misc', -] - -package { $fuel_packages : - ensure => 'latest', -} diff --git a/f2s/resources/fuel_pkgs/meta.yaml b/f2s/resources/fuel_pkgs/meta.yaml deleted file mode 100644 index ee066f1f..00000000 --- a/f2s/resources/fuel_pkgs/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: fuel_pkgs -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/generate_vms/actions/run.pp b/f2s/resources/generate_vms/actions/run.pp deleted file mode 100644 index 29a8201e..00000000 --- a/f2s/resources/generate_vms/actions/run.pp +++ /dev/null @@ -1,49 +0,0 @@ -notice('MODULAR: generate_vms.pp') - -$libvirt_dir = '/etc/libvirt/qemu' -$template_dir = '/var/lib/nova' -$packages = ['qemu-utils', 'qemu-kvm', 'libvirt-bin', 'xmlstarlet'] -$libvirt_service_name = 'libvirtd' - -$vms = hiera_array('vms_conf') - -define vm_config { - $details = $name - $id = $details['id'] - - file { "${template_dir}/template_${id}_vm.xml": - owner => 'root', - group => 'root', - content => template('osnailyfacter/vm_libvirt.erb'), - } -} - -package { $packages: - ensure => 'installed', -} - -service { $libvirt_service_name: - ensure => 'running', - require => Package[$packages], - before => Exec['generate_vms'], -} - -file { "${libvirt_dir}/autostart": - ensure => 'directory', - require => Package[$packages], -} - -file { "${template_dir}": - ensure => 'directory', -} - -vm_config { $vms: - before => Exec['generate_vms'], - require => File["${template_dir}"], -} - -exec { 'generate_vms': - command => "/usr/bin/generate_vms.sh ${libvirt_dir} ${template_dir}", - path => ['/usr/sbin', '/usr/bin' , '/sbin', '/bin'], - require => [File["${template_dir}"], File["${libvirt_dir}/autostart"]], -} diff --git a/f2s/resources/generate_vms/meta.yaml b/f2s/resources/generate_vms/meta.yaml deleted file mode 100644 index feff4a76..00000000 --- a/f2s/resources/generate_vms/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: generate_vms -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/genkeys/actions/run.sh b/f2s/resources/genkeys/actions/run.sh deleted file mode 100644 index 182f3007..00000000 --- a/f2s/resources/genkeys/actions/run.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh - -cluster_id={{uid}} -open_ssl_keys='{{ssl|join(' ')}}' -ssh_keys='{{ ssh|join(' ') }}' -keys_path={{path}} - -BASE_PATH=$keys_path/$cluster_id/ - -function generate_open_ssl_keys { - for i in $open_ssl_keys - do - local dir_path=$BASE_PATH$i/ - local key_path=$dir_path$i.key - mkdir -p $dir_path - if [ ! -f $key_path ]; then - openssl rand -base64 741 > $key_path 2>&1 - else - echo 'Key $key_path already exists' - fi - done -} - -function generate_ssh_keys { - for i in $ssh_keys - do - local dir_path=$BASE_PATH$i/ - local key_path=$dir_path$i - mkdir -p $dir_path - if [ ! -f $key_path ]; then - ssh-keygen -b 2048 -t rsa -N '' -f $key_path 2>&1 - else - echo 'Key $key_path already exists' - fi - done -} - -generate_open_ssl_keys -generate_ssh_keys diff --git a/f2s/resources/genkeys/meta.yaml b/f2s/resources/genkeys/meta.yaml deleted file mode 100644 index cbd450c6..00000000 --- a/f2s/resources/genkeys/meta.yaml +++ /dev/null @@ -1,20 +0,0 @@ -id: genkeys -handler: shell -version: 0.0.1 -input: - uid: - schema: str! - value: - path: - schema: str! - value: /var/lib/fuel/keys/ - ssl: - schema: [] - value: - - mongo - ssh: - schema: [] - value: - - neutron - - nova - - mysql diff --git a/f2s/resources/glance-db/actions/run.pp b/f2s/resources/glance-db/actions/run.pp deleted file mode 100644 index bdec1277..00000000 --- a/f2s/resources/glance-db/actions/run.pp +++ /dev/null @@ -1,53 +0,0 @@ -notice('MODULAR: glance/db.pp') - -$glance_hash = hiera_hash('glance', {}) -$mysql_hash = hiera_hash('mysql', {}) -$management_vip = hiera('management_vip') -$database_vip = hiera('database_vip') - -$mysql_root_user = pick($mysql_hash['root_user'], 'root') -$mysql_db_create = pick($mysql_hash['db_create'], true) -$mysql_root_password = $mysql_hash['root_password'] - -$db_user = pick($glance_hash['db_user'], 'glance') -$db_name = pick($glance_hash['db_name'], 'glance') -$db_password = pick($glance_hash['db_password'], $mysql_root_password) - -$db_host = pick($glance_hash['db_host'], $database_vip) -$db_create = pick($glance_hash['db_create'], $mysql_db_create) -$db_root_user = pick($glance_hash['root_user'], $mysql_root_user) -$db_root_password = pick($glance_hash['root_password'], $mysql_root_password) - -$allowed_hosts = [ hiera('node_name'), 'localhost', '127.0.0.1', '%' ] - -validate_string($mysql_root_user) -validate_string($database_vip) - - -if $db_create { - class { 'galera::client': - custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), - } - - class { 'glance::db::mysql': - user => $db_user, - password => $db_password, - dbname => $db_name, - allowed_hosts => $allowed_hosts, - } - - class { 'osnailyfacter::mysql_access': - db_host => $db_host, - db_user => $db_root_user, - db_password => $db_root_password, - } - - Class['galera::client'] -> - Class['osnailyfacter::mysql_access'] -> - Class['glance::db::mysql'] -} - -class mysql::config {} -include mysql::config -class mysql::server {} -include mysql::server diff --git a/f2s/resources/glance-db/meta.yaml b/f2s/resources/glance-db/meta.yaml deleted file mode 100644 index a22af0a3..00000000 --- a/f2s/resources/glance-db/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -id: glance-db -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - database_vip: - value: null - fqdn: - value: null - glance: - value: null - management_vip: - value: null - mysql: - value: null - mysql_custom_setup_class: - value: null - node_name: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/glance-keystone/actions/run.pp b/f2s/resources/glance-keystone/actions/run.pp deleted file mode 100644 index 6a06be29..00000000 --- a/f2s/resources/glance-keystone/actions/run.pp +++ /dev/null @@ -1,42 +0,0 @@ -notice('MODULAR: glance/keystone.pp') - -$glance_hash = hiera_hash('glance', {}) -$public_vip = hiera('public_vip') -$public_ssl_hash = hiera('public_ssl') -$admin_address = hiera('management_vip') -$region = pick($glance_hash['region'], hiera('region', 'RegionOne')) -$password = $glance_hash['user_password'] -$auth_name = pick($glance_hash['auth_name'], 'glance') -$configure_endpoint = pick($glance_hash['configure_endpoint'], true) -$configure_user = pick($glance_hash['configure_user'], true) -$configure_user_role = pick($glance_hash['configure_user_role'], true) -$service_name = pick($glance_hash['service_name'], 'glance') -$tenant = pick($glance_hash['tenant'], 'services') - -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} - -$public_url = "${public_protocol}://${public_address}:9292" -$admin_url = "http://${admin_address}:9292" - -validate_string($public_address) -validate_string($password) - -class { '::glance::keystone::auth': - password => $password, - auth_name => $auth_name, - configure_endpoint => $configure_endpoint, - configure_user => $configure_user, - configure_user_role => $configure_user_role, - service_name => $service_name, - public_url => $public_url, - admin_url => $admin_url, - internal_url => $admin_url, - region => $region, -} diff --git a/f2s/resources/glance-keystone/meta.yaml b/f2s/resources/glance-keystone/meta.yaml deleted file mode 100644 index 584c59a1..00000000 --- a/f2s/resources/glance-keystone/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: glance-keystone -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - glance: - value: null - management_vip: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null diff --git a/f2s/resources/glance/actions/run.pp b/f2s/resources/glance/actions/run.pp deleted file mode 100644 index 66d8de90..00000000 --- a/f2s/resources/glance/actions/run.pp +++ /dev/null @@ -1,128 +0,0 @@ -notice('MODULAR: glance.pp') - -$network_scheme = hiera_hash('network_scheme', {}) -$network_metadata = hiera_hash('network_metadata', {}) -prepare_network_config($network_scheme) - -$glance_hash = hiera_hash('glance', {}) -$verbose = pick($glance_hash['verbose'], hiera('verbose', true)) -$debug = pick($glance_hash['debug'], hiera('debug', false)) -$management_vip = hiera('management_vip') -$database_vip = hiera('database_vip') -$service_endpoint = hiera('service_endpoint') -$storage_hash = hiera('storage') -$use_syslog = hiera('use_syslog', true) -$use_stderr = hiera('use_stderr', false) -$syslog_log_facility = hiera('syslog_log_facility_glance') -$rabbit_hash = hiera_hash('rabbit_hash', {}) -$max_pool_size = hiera('max_pool_size') -$max_overflow = hiera('max_overflow') -$ceilometer_hash = hiera_hash('ceilometer', {}) -$region = hiera('region','RegionOne') -$glance_endpoint = $management_vip -$service_workers = pick($glance_hash['glance_workers'], min(max($::processorcount, 2), 16)) - -$db_type = 'mysql' -$db_host = pick($glance_hash['db_host'], $database_vip) -$api_bind_address = get_network_role_property('glance/api', 'ipaddr') -$enabled = true -$max_retries = '-1' -$idle_timeout = '3600' -$auth_uri = "http://${service_endpoint}:5000/" - -$rabbit_password = $rabbit_hash['password'] -$rabbit_user = $rabbit_hash['user'] -$rabbit_hosts = split(hiera('amqp_hosts',''), ',') -$rabbit_virtual_host = '/' - -$glance_db_user = pick($glance_hash['db_user'], 'glance') -$glance_db_dbname = pick($glance_hash['db_name'], 'glance') -$glance_db_password = $glance_hash['db_password'] -$glance_user = pick($glance_hash['user'],'glance') -$glance_user_password = $glance_hash['user_password'] -$glance_tenant = pick($glance_hash['tenant'],'services') -$glance_vcenter_host = $glance_hash['vc_host'] -$glance_vcenter_user = $glance_hash['vc_user'] -$glance_vcenter_password = $glance_hash['vc_password'] -$glance_vcenter_datacenter = $glance_hash['vc_datacenter'] -$glance_vcenter_datastore = $glance_hash['vc_datastore'] -$glance_vcenter_image_dir = $glance_hash['vc_image_dir'] -$glance_vcenter_api_retry_count = '20' -$glance_image_cache_max_size = $glance_hash['image_cache_max_size'] -$glance_pipeline = pick($glance_hash['pipeline'], 'keystone') -$glance_large_object_size = pick($glance_hash['large_object_size'], '5120') - -$rados_connect_timeout = '30' - -if ($storage_hash['images_ceph']) { - $glance_backend = 'ceph' - $glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ] - $glance_show_image_direct_url = pick($glance_hash['show_image_direct_url'], true) -} elsif ($storage_hash['images_vcenter']) { - $glance_backend = 'vmware' - $glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ] - $glance_show_image_direct_url = pick($glance_hash['show_image_direct_url'], true) -} else { - $glance_backend = 'swift' - $glance_known_stores = [ 'glance.store.swift.Store', 'glance.store.http.Store' ] - $swift_store_large_object_size = $glance_large_object_size - $glance_show_image_direct_url = pick($glance_hash['show_image_direct_url'], false) -} - -############################################################################### - -class { 'openstack::glance': - verbose => $verbose, - debug => $debug, - db_type => $db_type, - db_host => $db_host, - glance_db_user => $glance_db_user, - glance_db_dbname => $glance_db_dbname, - glance_db_password => $glance_db_password, - glance_user => $glance_user, - glance_user_password => $glance_user_password, - glance_tenant => $glance_tenant, - glance_vcenter_host => $glance_vcenter_host, - glance_vcenter_user => $glance_vcenter_user, - glance_vcenter_password => $glance_vcenter_password, - glance_vcenter_datacenter => $glance_vcenter_datacenter, - glance_vcenter_datastore => $glance_vcenter_datastore, - glance_vcenter_image_dir => $glance_vcenter_image_dir, - glance_vcenter_api_retry_count => $glance_vcenter_api_retry_count, - auth_uri => $auth_uri, - keystone_host => $service_endpoint, - region => $region, - bind_host => $api_bind_address, - enabled => $enabled, - glance_backend => $glance_backend, - registry_host => $glance_endpoint, - use_syslog => $use_syslog, - use_stderr => $use_stderr, - show_image_direct_url => $glance_show_image_direct_url, - swift_store_large_object_size => $swift_store_large_object_size, - pipeline => $glance_pipeline, - syslog_log_facility => $syslog_log_facility, - glance_image_cache_max_size => $glance_image_cache_max_size, - max_retries => $max_retries, - max_pool_size => $max_pool_size, - max_overflow => $max_overflow, - idle_timeout => $idle_timeout, - rabbit_password => $rabbit_password, - rabbit_userid => $rabbit_user, - rabbit_hosts => $rabbit_hosts, - rabbit_virtual_host => $rabbit_virtual_host, - known_stores => $glance_known_stores, - ceilometer => $ceilometer_hash[enabled], - service_workers => $service_workers, - rados_connect_timeout => $rados_connect_timeout, -} - -####### Disable upstart startup on install ####### -if($::operatingsystem == 'Ubuntu') { - tweaks::ubuntu_service_override { 'glance-api': - package_name => 'glance-api', - } - tweaks::ubuntu_service_override { 'glance-registry': - package_name => 'glance-registry', - } -} diff --git a/f2s/resources/glance/meta.yaml b/f2s/resources/glance/meta.yaml deleted file mode 100644 index d1e28e4f..00000000 --- a/f2s/resources/glance/meta.yaml +++ /dev/null @@ -1,49 +0,0 @@ -id: glance -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - amqp_hosts: - value: null - ceilometer: - value: null - database_vip: - value: null - debug: - value: null - fqdn: - value: null - glance: - value: null - management_vip: - value: null - max_overflow: - value: null - max_pool_size: - value: null - network_metadata: - value: null - network_scheme: - value: null - puppet_modules: - value: null - rabbit_hash: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - storage: - value: null - syslog_log_facility_glance: - value: null - use_stderr: - value: null - use_syslog: - value: null - verbose: - value: null diff --git a/f2s/resources/globals/actions/run.pp b/f2s/resources/globals/actions/run.pp deleted file mode 100644 index e8d712fd..00000000 --- a/f2s/resources/globals/actions/run.pp +++ /dev/null @@ -1,293 +0,0 @@ -notice('MODULAR: globals.pp') - -$service_token_off = false -$globals_yaml_file = '/etc/hiera/globals.yaml' - -# remove cached globals values before anything else -remove_file($globals_yaml_file) - -$network_scheme = hiera_hash('network_scheme', {}) -if empty($network_scheme) { - fail("Network_scheme not given in the astute.yaml") -} -$network_metadata = hiera_hash('network_metadata', {}) -if empty($network_metadata) { - fail("Network_metadata not given in the astute.yaml") -} - -$node_name = regsubst(hiera('fqdn', $::hostname), '\..*$', '') -$node = $network_metadata['nodes'][$node_name] -if empty($node) { - fail("Node hostname is not defined in the astute.yaml") -} - -prepare_network_config($network_scheme) - -# DEPRICATED -$nodes_hash = hiera('nodes', {}) - -$deployment_mode = hiera('deployment_mode', 'ha_compact') -$roles = $node['node_roles'] -$storage_hash = hiera('storage', {}) -$syslog_hash = hiera('syslog', {}) -$base_syslog_hash = hiera('base_syslog', {}) -$sahara_hash = hiera('sahara', {}) -$murano_hash = hiera('murano', {}) -$heat_hash = hiera_hash('heat', {}) -$vcenter_hash = hiera('vcenter', {}) -$nova_hash = hiera_hash('nova', {}) -$mysql_hash = hiera('mysql', {}) -$rabbit_hash = hiera_hash('rabbit', {}) -$glance_hash = hiera_hash('glance', {}) -$swift_hash = hiera('swift', {}) -$cinder_hash = hiera_hash('cinder', {}) -$ceilometer_hash = hiera('ceilometer',{}) -$access_hash = hiera_hash('access', {}) -$mp_hash = hiera('mp', {}) -$keystone_hash = merge({'service_token_off' => $service_token_off}, - hiera_hash('keystone', {})) - -$node_role = hiera('role') -$dns_nameservers = hiera('dns_nameservers', []) -$use_ceilometer = $ceilometer_hash['enabled'] -$use_neutron = hiera('quantum', false) -$use_ovs = hiera('use_ovs', $use_neutron) -$verbose = true -$debug = hiera('debug', false) -$use_monit = false -$master_ip = hiera('master_ip') -$use_syslog = hiera('use_syslog', true) -$syslog_log_facility_glance = hiera('syslog_log_facility_glance', 'LOG_LOCAL2') -$syslog_log_facility_cinder = hiera('syslog_log_facility_cinder', 'LOG_LOCAL3') -$syslog_log_facility_neutron = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4') -$syslog_log_facility_nova = hiera('syslog_log_facility_nova','LOG_LOCAL6') -$syslog_log_facility_keystone = hiera('syslog_log_facility_keystone', 'LOG_LOCAL7') -$syslog_log_facility_murano = hiera('syslog_log_facility_murano', 'LOG_LOCAL0') -$syslog_log_facility_heat = hiera('syslog_log_facility_heat','LOG_LOCAL0') -$syslog_log_facility_sahara = hiera('syslog_log_facility_sahara','LOG_LOCAL0') -$syslog_log_facility_ceilometer = hiera('syslog_log_facility_ceilometer','LOG_LOCAL0') -$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') - -$nova_report_interval = hiera('nova_report_interval', 60) -$nova_service_down_time = hiera('nova_service_down_time', 180) -$apache_ports = hiera_array('apache_ports', ['80', '8888', '5000', '35357']) - -$openstack_version = hiera('openstack_version', - { - 'keystone' => 'installed', - 'glance' => 'installed', - 'horizon' => 'installed', - 'nova' => 'installed', - 'novncproxy' => 'installed', - 'cinder' => 'installed', - } -) - -$nova_rate_limits = hiera('nova_rate_limits', - { - 'POST' => 100000, - 'POST_SERVERS' => 100000, - 'PUT' => 1000, - 'GET' => 100000, - 'DELETE' => 100000 - } -) - -$cinder_rate_limits = hiera('cinder_rate_limits', - { - 'POST' => 100000, - 'POST_SERVERS' => 100000, - 'PUT' => 100000, - 'GET' => 100000, - 'DELETE' => 100000 - } -) - -$default_gateway = get_default_gateways() -$public_vip = $network_metadata['vips']['public']['ipaddr'] -$management_vip = $network_metadata['vips']['management']['ipaddr'] -$public_vrouter_vip = $network_metadata['vips']['vrouter_pub']['ipaddr'] -$management_vrouter_vip = $network_metadata['vips']['vrouter']['ipaddr'] - -$database_vip = is_hash($network_metadata['vips']['database']) ? { - true => pick($network_metadata['vips']['database']['ipaddr'], $management_vip), - default => $management_vip -} -$service_endpoint = is_hash($network_metadata['vips']['service_endpoint']) ? { - true => pick($network_metadata['vips']['service_endpoint']['ipaddr'], $management_vip), - default => $management_vip -} - -if $use_neutron { - $novanetwork_params = {} - $neutron_config = hiera_hash('quantum_settings') - $network_provider = 'neutron' - $neutron_db_password = $neutron_config['database']['passwd'] - $neutron_user_password = $neutron_config['keystone']['admin_password'] - $neutron_metadata_proxy_secret = $neutron_config['metadata']['metadata_proxy_shared_secret'] - $base_mac = $neutron_config['L2']['base_mac'] - $management_network_range = get_network_role_property('mgmt/vip', 'network') -} else { - $neutron_config = {} - $novanetwork_params = hiera('novanetwork_parameters') - $network_size = $novanetwork_params['network_size'] - $num_networks = $novanetwork_params['num_networks'] - $network_provider = 'nova' - if ( $novanetwork_params['network_manager'] == 'FlatDHCPManager') { - $private_int = get_network_role_property('novanetwork/fixed', 'interface') - } else { - $private_int = get_network_role_property('novanetwork/vlan', 'interface') - $vlan_start = $novanetwork_params['vlan_start'] - $network_config = { - 'vlan_start' => $vlan_start, - } - } - $network_manager = "nova.network.manager.${novanetwork_params['network_manager']}" - $management_network_range = hiera('management_network_range') -} - -if $node_role == 'primary-controller' { - $primary_controller = true -} else { - $primary_controller = false -} - -$controllers_hash = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) -$mountpoints = filter_hash($mp_hash,'point') - -# AMQP configuration -$queue_provider = hiera('queue_provider','rabbitmq') -$rabbit_ha_queues = true - -if !$rabbit_hash['user'] { - $rabbit_hash['user'] = 'nova' -} - -$amqp_port = hiera('amqp_ports', '5673') -if hiera('amqp_hosts', false) { - # using pre-defined in astute.yaml RabbitMQ servers - $amqp_hosts = hiera('amqp_hosts') -} else { - # using RabbitMQ servers on controllers - # todo(sv): switch from 'controller' nodes to 'rmq' nodes as soon as it was implemented as additional node-role - $controllers_with_amqp_server = get_node_to_ipaddr_map_by_network_role($controllers_hash, 'mgmt/messaging') - $amqp_nodes = ipsort(values($controllers_with_amqp_server)) - # amqp_hosts() randomize order of RMQ endpoints and put local one first - $amqp_hosts = amqp_hosts($amqp_nodes, $amqp_port, get_network_role_property('mgmt/messaging', 'ipaddr')) -} - -# MySQL and SQLAlchemy backend configuration -$custom_mysql_setup_class = hiera('custom_mysql_setup_class', 'galera') -$max_pool_size = hiera('max_pool_size', min($::processorcount * 5 + 0, 30 + 0)) -$max_overflow = hiera('max_overflow', min($::processorcount * 5 + 0, 60 + 0)) -$max_retries = hiera('max_retries', '-1') -$idle_timeout = hiera('idle_timeout','3600') -$nova_db_password = $nova_hash['db_password'] -$sql_connection = "mysql://nova:${nova_db_password}@${database_vip}/nova?read_timeout = 6 0" -$mirror_type = hiera('mirror_type', 'external') -$multi_host = hiera('multi_host', true) - -# Determine who should get the volume service -if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) { - $manage_volumes = 'iscsi' -} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) { - $manage_volumes = 'vmdk' -} elsif ($storage_hash['volumes_ceph']) { - $manage_volumes = 'ceph' -} else { - $manage_volumes = false -} - -# Define ceph-related variables -$ceph_primary_monitor_node = get_nodes_hash_by_roles($network_metadata, ['primary-controller']) -$ceph_monitor_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) -$ceph_rgw_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -#Determine who should be the default backend -if ($storage_hash['images_ceph']) { - $glance_backend = 'ceph' - $glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ] -} elsif ($storage_hash['images_vcenter']) { - $glance_backend = 'vmware' - $glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ] -} else { - $glance_backend = 'file' - $glance_known_stores = false -} - -# Define ceilometer-related variables: -# todo: use special node-roles instead controllers in the future -$ceilometer_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -# Define memcached-related variables: -$memcache_roles = hiera('memcache_roles', ['primary-controller', 'controller']) - -# Define node roles, that will carry corosync/pacemaker -$corosync_roles = hiera('corosync_roles', ['primary-controller', 'controller']) - -# Define cinder-related variables -# todo: use special node-roles instead controllers in the future -$cinder_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -# Define horizon-related variables: -# todo: use special node-roles instead controllers in the future -$horizon_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -# Define swift-related variables -# todo(sv): use special node-roles instead controllers in the future -$swift_master_role = 'primary-controller' -$swift_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) -$swift_proxies = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) -$swift_proxy_caches = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) # memcache for swift -$is_primary_swift_proxy = $primary_controller - -# Define murano-related variables -$murano_roles = ['primary-controller', 'controller'] - -# Define heat-related variables: -$heat_roles = ['primary-controller', 'controller'] - -# Define sahara-related variable -$sahara_roles = ['primary-controller', 'controller'] - -# Define ceilometer-releated parameters -if !$ceilometer_hash['event_time_to_live'] { $ceilometer_hash['event_time_to_live'] = '604800'} -if !$ceilometer_hash['metering_time_to_live'] { $ceilometer_hash['metering_time_to_live'] = '604800' } -if !$ceilometer_hash['http_timeout'] { $ceilometer_hash['http_timeout'] = '600' } - -# Define database-related variables: -# todo: use special node-roles instead controllers in the future -$database_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -# Define Nova-API variables: -# todo: use special node-roles instead controllers in the future -$nova_api_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -# Define mongo-related variables -$mongo_roles = ['primary-mongo', 'mongo'] - -# Define neutron-related variables: -# todo: use special node-roles instead controllers in the future -$neutron_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -#Define Ironic-related variables: -$ironic_api_nodes = $controllers_hash - -# Change nova_hash to add vnc port to it -# TODO(sbog): change this when we will get rid of global hashes -$public_ssl_hash = hiera('public_ssl') -if $public_ssl_hash['services'] { - $nova_hash['vncproxy_protocol'] = 'https' -} else { - $nova_hash['vncproxy_protocol'] = 'http' -} - -# save all these global variables into hiera yaml file for later use -# by other manifests with hiera function -file { $globals_yaml_file : - ensure => 'present', - mode => '0644', - owner => 'root', - group => 'root', - content => template('osnailyfacter/globals_yaml.erb') -} diff --git a/f2s/resources/globals/meta.yaml b/f2s/resources/globals/meta.yaml deleted file mode 100644 index 7bc10735..00000000 --- a/f2s/resources/globals/meta.yaml +++ /dev/null @@ -1,127 +0,0 @@ -id: globals -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - access: - value: null - amqp_hosts: - value: null - amqp_ports: - value: null - apache_ports: - value: null - base_syslog: - value: null - ceilometer: - value: null - cinder: - value: null - cinder_rate_limits: - value: null - corosync_roles: - value: null - custom_mysql_setup_class: - value: null - debug: - value: null - deployment_mode: - value: null - dns_nameservers: - value: null - fqdn: - value: null - glance: - value: null - heat: - value: null - idle_timeout: - value: null - keystone: - value: null - master_ip: - value: null - max_overflow: - value: null - max_pool_size: - value: null - max_retries: - value: null - memcache_roles: - value: null - mirror_type: - value: null - mp: - value: null - multi_host: - value: null - murano: - value: null - mysql: - value: null - network_metadata: - value: null - network_scheme: - value: null - nodes: - value: null - nova: - value: null - nova_rate_limits: - value: null - nova_report_interval: - value: null - nova_service_down_time: - value: null - openstack_version: - value: null - public_ssl: - value: null - puppet_modules: - value: null - quantum: - value: null - quantum_settings: - value: null - queue_provider: - value: null - rabbit: - value: null - role: - value: null - sahara: - value: null - storage: - value: null - swift: - value: null - syslog: - value: null - syslog_log_facility_ceilometer: - value: null - syslog_log_facility_ceph: - value: null - syslog_log_facility_cinder: - value: null - syslog_log_facility_glance: - value: null - syslog_log_facility_heat: - value: null - syslog_log_facility_keystone: - value: null - syslog_log_facility_murano: - value: null - syslog_log_facility_neutron: - value: null - syslog_log_facility_nova: - value: null - syslog_log_facility_sahara: - value: null - use_ovs: - value: null - use_syslog: - value: null - vcenter: - value: null diff --git a/f2s/resources/heat-db/actions/run.pp b/f2s/resources/heat-db/actions/run.pp deleted file mode 100644 index ed7c4c9a..00000000 --- a/f2s/resources/heat-db/actions/run.pp +++ /dev/null @@ -1,53 +0,0 @@ -notice('MODULAR: heat/db.pp') - -$heat_hash = hiera_hash('heat', {}) -$mysql_hash = hiera_hash('mysql', {}) -$management_vip = hiera('management_vip', undef) -$database_vip = hiera('database_vip', undef) - -$mysql_root_user = pick($mysql_hash['root_user'], 'root') -$mysql_db_create = pick($mysql_hash['db_create'], true) -$mysql_root_password = $mysql_hash['root_password'] - -$db_user = pick($heat_hash['db_user'], 'heat') -$db_name = pick($heat_hash['db_name'], 'heat') -$db_password = pick($heat_hash['db_password'], $mysql_root_password) - -$db_host = pick($heat_hash['db_host'], $database_vip) -$db_create = pick($heat_hash['db_create'], $mysql_db_create) -$db_root_user = pick($heat_hash['root_user'], $mysql_root_user) -$db_root_password = pick($heat_hash['root_password'], $mysql_root_password) - -$allowed_hosts = [ $::hostname, 'localhost', '127.0.0.1', '%' ] - -validate_string($mysql_root_user) - -if $db_create { - - class { 'galera::client': - custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), - } - - class { 'heat::db::mysql': - user => $db_user, - password => $db_password, - dbname => $db_name, - allowed_hosts => $allowed_hosts, - } - - class { 'osnailyfacter::mysql_access': - db_host => $db_host, - db_user => $db_root_user, - db_password => $db_root_password, - } - - Class['galera::client'] -> - Class['osnailyfacter::mysql_access'] -> - Class['heat::db::mysql'] - -} - -class mysql::config {} -include mysql::config -class mysql::server {} -include mysql::server diff --git a/f2s/resources/heat-db/meta.yaml b/f2s/resources/heat-db/meta.yaml deleted file mode 100644 index 7a4ae4c1..00000000 --- a/f2s/resources/heat-db/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: heat-db -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - database_vip: - value: null - fqdn: - value: null - heat: - value: null - management_vip: - value: null - mysql: - value: null - mysql_custom_setup_class: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/heat-keystone/actions/run.pp b/f2s/resources/heat-keystone/actions/run.pp deleted file mode 100644 index 50034745..00000000 --- a/f2s/resources/heat-keystone/actions/run.pp +++ /dev/null @@ -1,59 +0,0 @@ -notice('MODULAR: heat/keystone.pp') - -$heat_hash = hiera_hash('heat', {}) -$public_vip = hiera('public_vip') -$admin_address = hiera('management_vip') -$region = pick($heat_hash['region'], hiera('region', 'RegionOne')) -$public_ssl_hash = hiera('public_ssl') -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} - -$password = $heat_hash['user_password'] -$auth_name = pick($heat_hash['auth_name'], 'heat') -$configure_endpoint = pick($heat_hash['configure_endpoint'], true) -$configure_user = pick($heat_hash['configure_user'], true) -$configure_user_role = pick($heat_hash['configure_user_role'], true) -$service_name = pick($heat_hash['service_name'], 'heat') -$tenant = pick($heat_hash['tenant'], 'services') - -validate_string($public_address) -validate_string($password) - -$public_url = "${public_protocol}://${public_address}:8004/v1/%(tenant_id)s" -$admin_url = "http://${admin_address}:8004/v1/%(tenant_id)s" -$public_url_cfn = "${public_protocol}://${public_address}:8000/v1" -$admin_url_cfn = "http://${admin_address}:8000/v1" - - - -class { '::heat::keystone::auth' : - password => $password, - auth_name => $auth_name, - region => $region, - tenant => $keystone_tenant, - email => "${auth_name}@localhost", - configure_endpoint => true, - trusts_delegated_roles => $trusts_delegated_roles, - public_url => $public_url, - internal_url => $admin_url, - admin_url => $admin_url, -} - -class { '::heat::keystone::auth_cfn' : - password => $password, - auth_name => "${auth_name}-cfn", - service_type => 'cloudformation', - region => $region, - tenant => $keystone_tenant, - email => "${auth_name}-cfn@localhost", - configure_endpoint => true, - public_url => $public_url_cfn, - internal_url => $admin_url_cfn, - admin_url => $admin_url_cfn, -} diff --git a/f2s/resources/heat-keystone/meta.yaml b/f2s/resources/heat-keystone/meta.yaml deleted file mode 100644 index 727409ea..00000000 --- a/f2s/resources/heat-keystone/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: heat-keystone -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - heat: - value: null - management_vip: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null diff --git a/f2s/resources/heat/actions/run.pp b/f2s/resources/heat/actions/run.pp deleted file mode 100644 index 5ed5e9e3..00000000 --- a/f2s/resources/heat/actions/run.pp +++ /dev/null @@ -1,169 +0,0 @@ -notice('MODULAR: heat.pp') - -prepare_network_config(hiera('network_scheme', {})) -$management_vip = hiera('management_vip') -$heat_hash = hiera_hash('heat', {}) -$rabbit_hash = hiera_hash('rabbit_hash', {}) -$max_retries = hiera('max_retries') -$max_pool_size = hiera('max_pool_size') -$max_overflow = hiera('max_overflow') -$idle_timeout = hiera('idle_timeout') -$service_endpoint = hiera('service_endpoint') -$debug = pick($heat_hash['debug'], hiera('debug', false)) -$verbose = pick($heat_hash['verbose'], hiera('verbose', true)) -$use_stderr = hiera('use_stderr', false) -$use_syslog = hiera('use_syslog', true) -$syslog_log_facility_heat = hiera('syslog_log_facility_heat') -$deployment_mode = hiera('deployment_mode') -$bind_address = get_network_role_property('heat/api', 'ipaddr') -$database_password = $heat_hash['db_password'] -$keystone_user = pick($heat_hash['user'], 'heat') -$keystone_tenant = pick($heat_hash['tenant'], 'services') -$db_host = pick($heat_hash['db_host'], hiera('database_vip')) -$database_user = pick($heat_hash['db_user'], 'heat') -$database_name = hiera('heat_db_name', 'heat') -$read_timeout = '60' -$sql_connection = "mysql://${database_user}:${database_password}@${db_host}/${database_name}?read_timeout=${read_timeout}" -$region = hiera('region', 'RegionOne') -$public_ssl_hash = hiera('public_ssl') -$public_ip = hiera('public_vip') -$public_protocol = pick($public_ssl_hash['services'], false) ? { - true => 'https', - default => 'http', -} - -$public_address = pick($public_ssl_hash['services'], false) ? { - true => pick($public_ssl_hash['hostname']), - default => $public_ip, -} -$auth_uri = "${public_protocol}://${public_address}:5000/v2.0/" -$identity_uri = "http://${service_endpoint}:35357/" - -####### Disable upstart startup on install ####### -if $::operatingsystem == 'Ubuntu' { - tweaks::ubuntu_service_override { 'heat-api-cloudwatch': - package_name => 'heat-api-cloudwatch', - } - tweaks::ubuntu_service_override { 'heat-api-cfn': - package_name => 'heat-api-cfn', - } - tweaks::ubuntu_service_override { 'heat-api': - package_name => 'heat-api', - } - tweaks::ubuntu_service_override { 'heat-engine': - package_name => 'heat-engine', - } - - Tweaks::Ubuntu_service_override['heat-api'] -> Service['heat-api'] - Tweaks::Ubuntu_service_override['heat-api-cfn'] -> Service['heat-api-cfn'] - Tweaks::Ubuntu_service_override['heat-api-cloudwatch'] -> Service['heat-api-cloudwatch'] - Tweaks::Ubuntu_service_override['heat-engine'] -> Service['heat-engine'] -} - -class { 'openstack::heat' : - external_ip => $management_vip, - keystone_auth => pick($heat_hash['keystone_auth'], true), - api_bind_host => $bind_address, - api_cfn_bind_host => $bind_address, - api_cloudwatch_bind_host => $bind_address, - auth_uri => $auth_uri, - identity_uri => $identity_uri, - keystone_user => $keystone_user, - keystone_password => $heat_hash['user_password'], - keystone_tenant => $keystone_tenant, - keystone_ec2_uri => "http://${service_endpoint}:5000/v2.0", - region => $region, - public_ssl => $public_ssl_hash['services'], - rpc_backend => 'rabbit', - amqp_hosts => split(hiera('amqp_hosts',''), ','), - amqp_user => $rabbit_hash['user'], - amqp_password => $rabbit_hash['password'], - sql_connection => $sql_connection, - db_host => $db_host, - db_password => $database_password, - max_retries => $max_retries, - max_pool_size => $max_pool_size, - max_overflow => $max_overflow, - idle_timeout => $idle_timeout, - debug => $debug, - verbose => $verbose, - use_syslog => $use_syslog, - use_stderr => $use_stderr, - syslog_log_facility => $syslog_log_facility_heat, - auth_encryption_key => $heat_hash['auth_encryption_key'], -} - -if hiera('heat_ha_engine', true){ - if ($deployment_mode == 'ha') or ($deployment_mode == 'ha_compact') { - include ::heat_ha::engine - } -} - -#------------------------------ - -class heat::docker_resource ( - $enabled = true, - $package_name = 'heat-docker', -) { - if $enabled { - package { 'heat-docker': - ensure => installed, - name => $package_name, - } - - Package['heat-docker'] ~> Service<| title == 'heat-engine' |> - } -} - -if $::osfamily == 'RedHat' { - $docker_resource_package_name = 'openstack-heat-docker' -} elsif $::osfamily == 'Debian' { - $docker_resource_package_name = 'heat-docker' -} - -class { 'heat::docker_resource' : - package_name => $docker_resource_package_name, -} - -$haproxy_stats_url = "http://${service_endpoint}:10000/;csv" - -haproxy_backend_status { 'keystone-admin' : - name => 'keystone-2', - count => '200', - step => '6', - url => $haproxy_stats_url, -} - -class { 'heat::keystone::domain' : - auth_url => "http://${service_endpoint}:35357/v2.0", - keystone_admin => $keystone_user, - keystone_password => $heat_hash['user_password'], - keystone_tenant => $keystone_tenant, - domain_name => 'heat', - domain_admin => 'heat_admin', - domain_password => $heat_hash['user_password'], -} - -Class['heat'] -> -Haproxy_backend_status['keystone-admin'] -> -Class['heat::keystone::domain'] ~> -Service<| title == 'heat-engine' |> - -###################### - -exec { 'wait_for_heat_config' : - command => 'sync && sleep 3', - provider => 'shell', -} - -Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-api'] -Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-api-cfn'] -Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-api-cloudwatch'] -Heat_config <||> -> Exec['wait_for_heat_config'] -> Service['heat-engine'] - -###################### - -class mysql::server {} -class mysql::config {} -include mysql::server -include mysql::config diff --git a/f2s/resources/heat/meta.yaml b/f2s/resources/heat/meta.yaml deleted file mode 100644 index 362afea5..00000000 --- a/f2s/resources/heat/meta.yaml +++ /dev/null @@ -1,55 +0,0 @@ -id: heat -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - amqp_hosts: - value: null - database_vip: - value: null - debug: - value: null - deployment_mode: - value: null - fqdn: - value: null - heat: - value: null - heat_db_name: - value: null - heat_ha_engine: - value: null - idle_timeout: - value: null - management_vip: - value: null - max_overflow: - value: null - max_pool_size: - value: null - max_retries: - value: null - network_scheme: - value: null - public_ssl: - value: null - puppet_modules: - value: null - rabbit_hash: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - syslog_log_facility_heat: - value: null - use_stderr: - value: null - use_syslog: - value: null - verbose: - value: null diff --git a/f2s/resources/hiera/actions/run.pp b/f2s/resources/hiera/actions/run.pp deleted file mode 100644 index e23a1cb7..00000000 --- a/f2s/resources/hiera/actions/run.pp +++ /dev/null @@ -1,75 +0,0 @@ -notice('MODULAR: hiera.pp') - -$deep_merge_package_name = $::osfamily ? { - /RedHat/ => 'rubygem-deep_merge', - /Debian/ => 'ruby-deep-merge', -} - -$data_dir = '/etc/hiera' -$data = [ - 'override/node/%{::fqdn}', - 'override/class/%{calling_class}', - 'override/module/%{calling_module}', - 'override/plugins', - 'override/common', - 'class/%{calling_class}', - 'module/%{calling_module}', - 'nodes', - 'globals', - 'astute' -] -$astute_data_file = '/etc/astute.yaml' -$hiera_main_config = '/etc/hiera.yaml' -$hiera_puppet_config = '/etc/puppet/hiera.yaml' -$hiera_data_file = "${data_dir}/astute.yaml" - -File { - owner => 'root', - group => 'root', - mode => '0644', -} - -$hiera_config_content = inline_template(' ---- -:backends: - - yaml - -:hierarchy: -<% @data.each do |name| -%> - - <%= name %> -<% end -%> - -:yaml: - :datadir: <%= @data_dir %> -:merge_behavior: deeper -:logger: noop -') - -file { 'hiera_data_dir' : - ensure => 'directory', - path => $data_dir, -} - -file { 'hiera_config' : - ensure => 'present', - path => $hiera_main_config, - content => $hiera_config_content, -} - -file { 'hiera_data_astute' : - ensure => 'symlink', - path => $hiera_data_file, - target => $astute_data_file, -} - -file { 'hiera_puppet_config' : - ensure => 'symlink', - path => $hiera_puppet_config, - target => $hiera_main_config, -} - -# needed to support the 'deeper' merge_behavior setting for hiera -package { 'rubygem-deep_merge': - ensure => present, - name => $deep_merge_package_name, -} diff --git a/f2s/resources/hiera/meta.yaml b/f2s/resources/hiera/meta.yaml deleted file mode 100644 index 0b9a6580..00000000 --- a/f2s/resources/hiera/meta.yaml +++ /dev/null @@ -1,11 +0,0 @@ -id: hiera -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null diff --git a/f2s/resources/horizon/actions/run.pp b/f2s/resources/horizon/actions/run.pp deleted file mode 100644 index 23bd602c..00000000 --- a/f2s/resources/horizon/actions/run.pp +++ /dev/null @@ -1,79 +0,0 @@ -notice('MODULAR: horizon.pp') - -prepare_network_config(hiera('network_scheme', {})) -$horizon_hash = hiera_hash('horizon', {}) -$service_endpoint = hiera('service_endpoint') -$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles')) -$memcache_address_map = get_node_to_ipaddr_map_by_network_role($memcache_nodes, 'mgmt/memcache') -$bind_address = get_network_role_property('horizon', 'ipaddr') -$neutron_advanced_config = hiera_hash('neutron_advanced_configuration', {}) -$public_ssl = hiera('public_ssl') -$ssl_no_verify = $public_ssl['horizon'] - -if $horizon_hash['secret_key'] { - $secret_key = $horizon_hash['secret_key'] -} else { - $secret_key = 'dummy_secret_key' -} - -$neutron_dvr = pick($neutron_advanced_config['neutron_dvr'], false) - -$keystone_scheme = 'http' -$keystone_host = $service_endpoint -$keystone_port = '5000' -$keystone_api = 'v2.0' -$keystone_url = "${keystone_scheme}://${keystone_host}:${keystone_port}/${keystone_api}" - -$neutron_options = {'enable_distributed_router' => $neutron_dvr} - -class { 'openstack::horizon': - secret_key => $secret_key, - cache_server_ip => ipsort(values($memcache_address_map)), - package_ensure => hiera('horizon_package_ensure', 'installed'), - bind_address => $bind_address, - cache_server_port => hiera('memcache_server_port', '11211'), - cache_backend => 'django.core.cache.backends.memcached.MemcachedCache', - cache_options => {'SOCKET_TIMEOUT' => 1,'SERVER_RETRIES' => 1,'DEAD_RETRY' => 1}, - neutron => hiera('use_neutron'), - keystone_url => $keystone_url, - use_ssl => hiera('horizon_use_ssl', false), - ssl_no_verify => $ssl_no_verify, - verbose => pick($horizon_hash['verbose'], hiera('verbose', true)), - debug => pick($horizon_hash['debug'], hiera('debug')), - use_syslog => hiera('use_syslog', true), - nova_quota => hiera('nova_quota'), - servername => hiera('public_vip'), - neutron_options => $neutron_options, -} - -$haproxy_stats_url = "http://${service_endpoint}:10000/;csv" - -haproxy_backend_status { 'keystone-admin' : - name => 'keystone-2', - count => '30', - step => '3', - url => $haproxy_stats_url, -} - -haproxy_backend_status { 'keystone-public' : - name => 'keystone-1', - count => '30', - step => '3', - url => $haproxy_stats_url, -} - -Class['openstack::horizon'] -> Haproxy_backend_status['keystone-admin'] -Class['openstack::horizon'] -> Haproxy_backend_status['keystone-public'] - -# TODO(aschultz): remove this if openstack-dashboard stops installing -# openstack-dashboard-apache -if $::osfamily == 'Debian' { - # LP#1513252 - remove this package if it's installed by the - # openstack-dashboard package installation. - package { 'openstack-dashboard-apache': - ensure => 'absent', - require => Package['openstack-dashboard'] - } ~> Service[$::apache::params::service_name] -} - -include ::tweaks::apache_wrappers diff --git a/f2s/resources/horizon/meta.yaml b/f2s/resources/horizon/meta.yaml deleted file mode 100644 index 2e8ad481..00000000 --- a/f2s/resources/horizon/meta.yaml +++ /dev/null @@ -1,47 +0,0 @@ -id: horizon -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - apache_ports: - value: null - debug: - value: null - fqdn: - value: null - horizon: - value: null - horizon_package_ensure: - value: null - horizon_use_ssl: - value: null - memcache_roles: - value: null - memcache_server_port: - value: null - network_metadata: - value: null - network_scheme: - value: null - neutron_advanced_configuration: - value: null - nova_quota: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null - service_endpoint: - value: null - use_neutron: - value: null - use_syslog: - value: null - verbose: - value: null diff --git a/f2s/resources/hosts/actions/run.pp b/f2s/resources/hosts/actions/run.pp deleted file mode 100644 index e82bddff..00000000 --- a/f2s/resources/hosts/actions/run.pp +++ /dev/null @@ -1,5 +0,0 @@ -notice('MODULAR: hosts.pp') - -class { "l23network::hosts_file": - nodes => hiera('nodes'), -} diff --git a/f2s/resources/hosts/meta.yaml b/f2s/resources/hosts/meta.yaml deleted file mode 100644 index 2ba7e90f..00000000 --- a/f2s/resources/hosts/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: hosts -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - nodes: - value: null - puppet_modules: - value: null diff --git a/f2s/resources/ironic-api/actions/run.pp b/f2s/resources/ironic-api/actions/run.pp deleted file mode 100644 index b4fc31ef..00000000 --- a/f2s/resources/ironic-api/actions/run.pp +++ /dev/null @@ -1,61 +0,0 @@ -notice('MODULAR: ironic/ironic.pp') - -$ironic_hash = hiera_hash('ironic', {}) -$public_vip = hiera('public_vip') -$management_vip = hiera('management_vip') - -$network_metadata = hiera_hash('network_metadata', {}) - -$database_vip = hiera('database_vip') -$keystone_endpoint = hiera('service_endpoint') -$neutron_endpoint = hiera('neutron_endpoint', $management_vip) -$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") -$debug = hiera('debug', false) -$verbose = hiera('verbose', true) -$use_syslog = hiera('use_syslog', true) -$syslog_log_facility_ironic = hiera('syslog_log_facility_ironic', 'LOG_USER') -$rabbit_hash = hiera_hash('rabbit_hash', {}) -$rabbit_ha_queues = hiera('rabbit_ha_queues') -$amqp_hosts = hiera('amqp_hosts') -$amqp_port = hiera('amqp_port', '5673') -$rabbit_hosts = split($amqp_hosts, ',') -$neutron_config = hiera_hash('quantum_settings') - -$db_host = pick($ironic_hash['db_host'], $database_vip) -$db_user = pick($ironic_hash['db_user'], 'ironic') -$db_name = pick($ironic_hash['db_name'], 'ironic') -$db_password = pick($ironic_hash['db_password'], 'ironic') -$database_connection = "mysql://${db_name}:${db_password}@${db_host}/${db_name}?charset=utf8&read_timeout=60" - -$ironic_tenant = pick($ironic_hash['tenant'],'services') -$ironic_user = pick($ironic_hash['auth_name'],'ironic') -$ironic_user_password = pick($ironic_hash['user_password'],'ironic') - -prepare_network_config(hiera('network_scheme', {})) - -$baremetal_vip = $network_metadata['vips']['baremetal']['ipaddr'] - -class { 'ironic': - verbose => $verbose, - debug => $debug, - rabbit_hosts => $rabbit_hosts, - rabbit_port => $amqp_port, - rabbit_userid => $rabbit_hash['user'], - rabbit_password => $rabbit_hash['password'], - amqp_durable_queues => $rabbit_ha_queues, - use_syslog => $use_syslog, - log_facility => $syslog_log_facility_ironic, - database_connection => $database_connection, - glance_api_servers => $glance_api_servers, -} - -class { 'ironic::client': } - -class { 'ironic::api': - host_ip => get_network_role_property('ironic/api', 'ipaddr'), - auth_host => $keystone_endpoint, - admin_tenant_name => $ironic_tenant, - admin_user => $ironic_user, - admin_password => $ironic_user_password, - neutron_url => "http://${neutron_endpoint}:9696", -} diff --git a/f2s/resources/ironic-api/meta.yaml b/f2s/resources/ironic-api/meta.yaml deleted file mode 100644 index df25c34b..00000000 --- a/f2s/resources/ironic-api/meta.yaml +++ /dev/null @@ -1,11 +0,0 @@ -id: ironic-api -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - ironic: - value: null - puppet_modules: - value: null diff --git a/f2s/resources/ironic-compute/actions/run.pp b/f2s/resources/ironic-compute/actions/run.pp deleted file mode 100644 index b637a8e6..00000000 --- a/f2s/resources/ironic-compute/actions/run.pp +++ /dev/null @@ -1,98 +0,0 @@ -##################################################################################### -### ironic-compute is additional compute role with compute_driver=ironic. ### -### It can't be assigned with nova-compute to the same node. It doesn't include ### -### openstack::compute class it is configured separately. ### -##################################################################################### - -notice('MODULAR: ironic/ironic-compute.pp') - -$ironic_hash = hiera_hash('ironic', {}) -$nova_hash = hiera_hash('nova', {}) -$management_vip = hiera('management_vip') -$database_vip = hiera('database_vip') -$service_endpoint = hiera('service_endpoint') -$neutron_endpoint = hiera('neutron_endpoint', $management_vip) -$ironic_endpoint = hiera('ironic_endpoint', $management_vip) -$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") -$debug = hiera('debug', false) -$verbose = hiera('verbose', true) -$use_syslog = hiera('use_syslog', true) -$syslog_log_facility_ironic = hiera('syslog_log_facility_ironic', 'LOG_LOCAL0') -$syslog_log_facility_nova = hiera('syslog_log_facility_nova', 'LOG_LOCAL6') -$amqp_hosts = hiera('amqp_hosts') -$rabbit_hash = hiera_hash('rabbit_hash') -$nova_report_interval = hiera('nova_report_interval') -$nova_service_down_time = hiera('nova_service_down_time') -$neutron_config = hiera_hash('quantum_settings') - -$ironic_tenant = pick($ironic_hash['tenant'],'services') -$ironic_user = pick($ironic_hash['auth_name'],'ironic') -$ironic_user_password = pick($ironic_hash['user_password'],'ironic') - -$db_host = pick($nova_hash['db_host'], $database_vip) -$db_user = pick($nova_hash['db_user'], 'nova') -$db_name = pick($nova_hash['db_name'], 'nova') -$db_password = pick($nova_hash['db_password'], 'nova') -$database_connection = "mysql://${db_name}:${db_password}@${db_host}/${db_name}?read_timeout=60" - -$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles')) -$cache_server_ip = ipsort(values(get_node_to_ipaddr_map_by_network_role($memcache_nodes,'mgmt/memcache'))) -$memcached_addresses = suffix($cache_server_ip, inline_template(":<%= @cache_server_port %>")) -$notify_on_state_change = 'vm_and_task_state' - - -class { '::nova': - install_utilities => false, - ensure_package => installed, - database_connection => $database_connection, - rpc_backend => 'nova.openstack.common.rpc.impl_kombu', - #FIXME(bogdando) we have to split amqp_hosts until all modules synced - rabbit_hosts => split($amqp_hosts, ','), - rabbit_userid => $rabbit_hash['user'], - rabbit_password => $rabbit_hash['password'], - image_service => 'nova.image.glance.GlanceImageService', - glance_api_servers => $glance_api_servers, - verbose => $verbose, - debug => $debug, - use_syslog => $use_syslog, - log_facility => $syslog_log_facility_nova, - state_path => $nova_hash['state_path'], - report_interval => $nova_report_interval, - service_down_time => $nova_service_down_time, - notify_on_state_change => $notify_on_state_change, - memcached_servers => $memcached_addresses, -} - - -class { '::nova::compute': - ensure_package => installed, - enabled => true, - vnc_enabled => false, - force_config_drive => $nova_hash['force_config_drive'], - #NOTE(bogdando) default became true in 4.0.0 puppet-nova (was false) - neutron_enabled => true, - default_availability_zone => $nova_hash['default_availability_zone'], - default_schedule_zone => $nova_hash['default_schedule_zone'], - reserved_host_memory => '0', -} - - -class { 'nova::compute::ironic': - admin_url => "http://${service_endpoint}:35357/v2.0", - admin_user => $ironic_user, - admin_tenant_name => $ironic_tenant, - admin_passwd => $ironic_user_password, - api_endpoint => "http://${ironic_endpoint}:6385/v1", -} - -class { 'nova::network::neutron': - neutron_admin_password => $neutron_config['keystone']['admin_password'], - neutron_url => "http://${neutron_endpoint}:9696", - neutron_admin_auth_url => "http://${service_endpoint}:35357/v2.0", -} - -file { '/etc/nova/nova-compute.conf': - ensure => absent, - require => Package['nova-compute'], -} ~> Service['nova-compute'] - diff --git a/f2s/resources/ironic-compute/meta.yaml b/f2s/resources/ironic-compute/meta.yaml deleted file mode 100644 index 83e62fa0..00000000 --- a/f2s/resources/ironic-compute/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: ironic-compute -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/ironic-conductor/actions/run.pp b/f2s/resources/ironic-conductor/actions/run.pp deleted file mode 100644 index 1dc47155..00000000 --- a/f2s/resources/ironic-conductor/actions/run.pp +++ /dev/null @@ -1,121 +0,0 @@ -notice('MODULAR: ironic/ironic-conductor.pp') - -$network_scheme = hiera('network_scheme', {}) -prepare_network_config($network_scheme) -$baremetal_address = get_network_role_property('ironic/baremetal', 'ipaddr') -$ironic_hash = hiera_hash('ironic', {}) -$management_vip = hiera('management_vip') - -$network_metadata = hiera_hash('network_metadata', {}) -$baremetal_vip = $network_metadata['vips']['baremetal']['ipaddr'] - -$database_vip = hiera('database_vip') -$service_endpoint = hiera('service_endpoint') -$neutron_endpoint = hiera('neutron_endpoint', $management_vip) -$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") -$amqp_hosts = hiera('amqp_hosts') -$rabbit_hosts = split($amqp_hosts, ',') -$debug = hiera('debug', false) -$verbose = hiera('verbose', true) -$use_syslog = hiera('use_syslog', true) -$syslog_log_facility_ironic = hiera('syslog_log_facility_ironic', 'LOG_USER') -$rabbit_hash = hiera_hash('rabbit_hash') -$rabbit_ha_queues = hiera('rabbit_ha_queues') - -$ironic_tenant = pick($ironic_hash['tenant'],'services') -$ironic_user = pick($ironic_hash['auth_name'],'ironic') -$ironic_user_password = pick($ironic_hash['user_password'],'ironic') -$ironic_swift_tempurl_key = pick($ironic_hash['swift_tempurl_key'],'ironic') - -$db_host = pick($ironic_hash['db_host'], $database_vip) -$db_user = pick($ironic_hash['db_user'], 'ironic') -$db_name = pick($ironic_hash['db_name'], 'ironic') -$db_password = pick($ironic_hash['db_password'], 'ironic') -$database_connection = "mysql://${db_name}:${db_password}@${db_host}/${db_name}?charset=utf8&read_timeout=60" - -$tftp_root = '/var/lib/ironic/tftpboot' - -package { 'ironic-fa-deploy': - ensure => 'present', -} - -class { '::ironic': - verbose => $verbose, - debug => $debug, - enabled_drivers => ['fuel_ssh', 'fuel_ipmitool', 'fake'], - rabbit_hosts => $rabbit_hosts, - rabbit_userid => $rabbit_hash['user'], - rabbit_password => $rabbit_hash['password'], - amqp_durable_queues => $rabbit_ha_queues, - use_syslog => $use_syslog, - log_facility => $syslog_log_facility_ironic, - database_connection => $database_connection, - glance_api_servers => $glance_api_servers, -} - -class { '::ironic::client': } - -class { '::ironic::conductor': } - -class { '::ironic::drivers::pxe': - tftp_server => $baremetal_address, - tftp_root => $tftp_root, - tftp_master_path => "${tftp_root}/master_images", -} - -ironic_config { - 'neutron/url': value => "http://${neutron_endpoint}:9696"; - 'keystone_authtoken/auth_uri': value => "http://${service_endpoint}:5000/"; - 'keystone_authtoken/auth_host': value => $service_endpoint; - 'keystone_authtoken/admin_tenant_name': value => $ironic_tenant; - 'keystone_authtoken/admin_user': value => $ironic_user; - 'keystone_authtoken/admin_password': value => $ironic_user_password, secret => true; - 'glance/swift_temp_url_key': value => $ironic_swift_tempurl_key; - 'glance/swift_endpoint_url': value => "http://${baremetal_vip}:8080"; - 'conductor/api_url': value => "http://${baremetal_vip}:6385"; -} - -file { $tftp_root: - ensure => directory, - owner => 'ironic', - group => 'ironic', - mode => '0755', - require => Class['ironic'], -} - -file { "${tftp_root}/pxelinux.0": - ensure => present, - source => '/usr/lib/syslinux/pxelinux.0', - require => Package['syslinux'], -} - -file { "${tftp_root}/map-file": - content => "r ^([^/]) ${tftp_root}/\\1", -} - -class { '::tftp': - username => 'ironic', - directory => $tftp_root, - options => "--map-file ${tftp_root}/map-file", - inetd => false, - require => File["${tftp_root}/map-file"], -} - -package { 'syslinux': - ensure => 'present', -} - -package { 'ipmitool': - ensure => 'present', - before => Class['::ironic::conductor'], -} - -file { "/etc/ironic/fuel_key": - ensure => present, - source => '/var/lib/astute/ironic/ironic', - owner => 'ironic', - group => 'ironic', - mode => '0600', - require => Class['ironic'], -} - diff --git a/f2s/resources/ironic-conductor/meta.yaml b/f2s/resources/ironic-conductor/meta.yaml deleted file mode 100644 index 3ccd6f0a..00000000 --- a/f2s/resources/ironic-conductor/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: ironic-conductor -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/ironic-db/actions/run.pp b/f2s/resources/ironic-db/actions/run.pp deleted file mode 100644 index b663b73f..00000000 --- a/f2s/resources/ironic-db/actions/run.pp +++ /dev/null @@ -1,51 +0,0 @@ -notice('MODULAR: ironic/db.pp') - -$ironic_hash = hiera_hash('ironic', {}) -$mysql_hash = hiera_hash('mysql', {}) -$database_vip = hiera('database_vip') - -$mysql_root_user = pick($mysql_hash['root_user'], 'root') -$mysql_db_create = pick($mysql_hash['db_create'], true) -$mysql_root_password = $mysql_hash['root_password'] - -$db_user = pick($ironic_hash['db_user'], 'ironic') -$db_name = pick($ironic_hash['db_name'], 'ironic') -$db_password = pick($ironic_hash['db_password'], $mysql_root_password) - -$db_host = pick($ironic_hash['db_host'], $database_vip) -$db_create = pick($ironic_hash['db_create'], $mysql_db_create) -$db_root_user = pick($ironic_hash['root_user'], $mysql_root_user) -$db_root_password = pick($ironic_hash['root_password'], $mysql_root_password) - -$allowed_hosts = [ hiera('node_name'), 'localhost', '127.0.0.1', '%' ] - -validate_string($mysql_root_user) -validate_string($database_vip) - -if $db_create { - class { 'galera::client': - custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), - } - - class { 'ironic::db::mysql': - user => $db_user, - password => $db_password, - dbname => $db_name, - allowed_hosts => $allowed_hosts, - } - - class { 'osnailyfacter::mysql_access': - db_host => $db_host, - db_user => $db_root_user, - db_password => $db_root_password, - } - - Class['galera::client'] -> - Class['osnailyfacter::mysql_access'] -> - Class['ironic::db::mysql'] -} - -class mysql::config {} -include mysql::config -class mysql::server {} -include mysql::server diff --git a/f2s/resources/ironic-db/meta.yaml b/f2s/resources/ironic-db/meta.yaml deleted file mode 100644 index f2e12eb0..00000000 --- a/f2s/resources/ironic-db/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: ironic-db -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - database_vip: - value: null - fqdn: - value: null - ironic: - value: null - mysql: - value: null - mysql_custom_setup_class: - value: null - node_name: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/ironic-keystone/actions/run.pp b/f2s/resources/ironic-keystone/actions/run.pp deleted file mode 100644 index da4f136b..00000000 --- a/f2s/resources/ironic-keystone/actions/run.pp +++ /dev/null @@ -1,39 +0,0 @@ -notice('MODULAR: ironic/keystone.pp') - -$ironic_hash = hiera_hash('ironic', {}) -$public_vip = hiera('public_vip') -$management_vip = hiera('management_vip') -$public_ssl_hash = hiera('public_ssl') -$ironic_tenant = pick($ironic_hash['tenant'],'services') -$ironic_user = pick($ironic_hash['auth_name'],'ironic') -$ironic_user_password = pick($ironic_hash['user_password'],'ironic') -$configure_endpoint = pick($ironic_hash['configure_endpoint'], true) -$configure_user = pick($ironic_hash['configure_user'], true) -$configure_user_role = pick($ironic_hash['configure_user_role'], true) -$service_name = pick($ironic_hash['service_name'], 'ironic') - -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} - -$region = hiera('region', 'RegionOne') -$public_url = "${public_protocol}://${public_address}:6385" -$admin_url = "http://${management_vip}:6385" -$internal_url = "http://${management_vip}:6385" - -class { 'ironic::keystone::auth': - password => $ironic_user_password, - region => $region, - public_url => $public_url, - internal_url => $internal_url, - admin_url => $admin_url, - configure_endpoint => $configure_endpoint, - configure_user => $configure_user, - configure_user_role => $configure_user_role, - service_name => $service_name, -} diff --git a/f2s/resources/ironic-keystone/meta.yaml b/f2s/resources/ironic-keystone/meta.yaml deleted file mode 100644 index 54d62f52..00000000 --- a/f2s/resources/ironic-keystone/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: ironic-keystone -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - ironic: - value: null - management_vip: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null diff --git a/f2s/resources/keystone-db/actions/run.pp b/f2s/resources/keystone-db/actions/run.pp deleted file mode 100644 index f6d5947c..00000000 --- a/f2s/resources/keystone-db/actions/run.pp +++ /dev/null @@ -1,54 +0,0 @@ -notice('MODULAR: keystone/db.pp') - -$node_name = hiera('node_name') -$network_metadata = hiera_hash('network_metadata', {}) - -$keystone_hash = hiera_hash('keystone', {}) -$mysql_hash = hiera_hash('mysql', {}) -$database_vip = hiera('database_vip') - -$mysql_root_user = pick($mysql_hash['root_user'], 'root') -$mysql_db_create = pick($mysql_hash['db_create'], true) -$mysql_root_password = $mysql_hash['root_password'] - -$db_user = pick($keystone_hash['db_user'], 'keystone') -$db_name = pick($keystone_hash['db_name'], 'keystone') -$db_password = pick($keystone_hash['db_password'], $mysql_root_password) - -$db_host = pick($keystone_hash['db_host'], $database_vip) -$db_create = pick($keystone_hash['db_create'], $mysql_db_create) -$db_root_user = pick($keystone_hash['root_user'], $mysql_root_user) -$db_root_password = pick($keystone_hash['root_password'], $mysql_root_password) - -$allowed_hosts = [ $node_name, 'localhost', '127.0.0.1', '%' ] - -if $db_create { - - class { 'galera::client': - custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), - } - - class { 'keystone::db::mysql': - user => $db_user, - password => $db_password, - dbname => $db_name, - allowed_hosts => $allowed_hosts, - } - - class { 'osnailyfacter::mysql_access': - db_host => $db_host, - db_user => $db_root_user, - db_password => $db_root_password, - } - - Class['galera::client'] -> - Class['osnailyfacter::mysql_access'] -> - Class['keystone::db::mysql'] - - -} - -class mysql::config {} -include mysql::config -class mysql::server {} -include mysql::server diff --git a/f2s/resources/keystone-db/meta.yaml b/f2s/resources/keystone-db/meta.yaml deleted file mode 100644 index fb74c231..00000000 --- a/f2s/resources/keystone-db/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -id: keystone-db -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - database_vip: - value: null - fqdn: - value: null - keystone: - value: null - mysql: - value: null - mysql_custom_setup_class: - value: null - network_metadata: - value: null - node_name: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/keystone/actions/run.pp b/f2s/resources/keystone/actions/run.pp deleted file mode 100644 index 4582ec0c..00000000 --- a/f2s/resources/keystone/actions/run.pp +++ /dev/null @@ -1,247 +0,0 @@ -notice('MODULAR: keystone.pp') - -$network_scheme = hiera_hash('network_scheme', {}) -$network_metadata = hiera_hash('network_metadata', {}) -prepare_network_config($network_scheme) - -$node_name = hiera('node_name') - -$keystone_hash = hiera_hash('keystone', {}) -$verbose = pick($keystone_hash['verbose'], hiera('verbose', true)) -$debug = pick($keystone_hash['debug'], hiera('debug', false)) -$use_neutron = hiera('use_neutron', false) -$use_syslog = hiera('use_syslog', true) -$use_stderr = hiera('use_stderr', false) -$access_hash = hiera_hash('access',{}) -$management_vip = hiera('management_vip') -$database_vip = hiera('database_vip') -$public_vip = hiera('public_vip') -$service_endpoint = hiera('service_endpoint') -$glance_hash = hiera_hash('glance', {}) -$nova_hash = hiera_hash('nova', {}) -$cinder_hash = hiera_hash('cinder', {}) -$ceilometer_hash = hiera_hash('ceilometer', {}) -$syslog_log_facility = hiera('syslog_log_facility_keystone') -$rabbit_hash = hiera_hash('rabbit_hash', {}) -$neutron_user_password = hiera('neutron_user_password', false) -$service_workers = pick($keystone_hash['workers'], - min(max($::processorcount, 2), 16)) - -$db_type = 'mysql' -$db_host = pick($keystone_hash['db_host'], $database_vip) -$db_password = $keystone_hash['db_password'] -$db_name = pick($keystone_hash['db_name'], 'keystone') -$db_user = pick($keystone_hash['db_user'], 'keystone') - -$admin_token = $keystone_hash['admin_token'] -$admin_tenant = $access_hash['tenant'] -$admin_email = $access_hash['email'] -$admin_user = $access_hash['user'] -$admin_password = $access_hash['password'] -$region = hiera('region', 'RegionOne') - -$public_ssl_hash = hiera('public_ssl') -$public_service_endpoint = hiera('public_service_endpoint', $public_vip) -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_service_endpoint, -} -$public_cert = $public_ssl_hash['services']? { - true => '/etc/pki/tls/certs/public_haproxy.pem', - default => undef, -} - -$admin_address = $service_endpoint -$local_address_for_bind = get_network_role_property('keystone/api', 'ipaddr') - -$memcache_server_port = hiera('memcache_server_port', '11211') -$memcache_pool_maxsize = '100' -$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles')) -$memcache_address_map = get_node_to_ipaddr_map_by_network_role($memcache_nodes, 'mgmt/memcache') - -$public_port = '5000' -$admin_port = '35357' -$internal_port = '5000' -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} - -$public_url = "${public_protocol}://${public_address}:${public_port}" -$admin_url = "http://${admin_address}:${admin_port}" -$internal_url = "http://${service_endpoint}:${internal_port}" - -$revoke_driver = 'keystone.contrib.revoke.backends.sql.Revoke' - -$enabled = true -$ssl = false - -$vhost_limit_request_field_size = 'LimitRequestFieldSize 81900' - -$rabbit_password = $rabbit_hash['password'] -$rabbit_user = $rabbit_hash['user'] -$rabbit_hosts = split(hiera('amqp_hosts',''), ',') -$rabbit_virtual_host = '/' - -$max_pool_size = hiera('max_pool_size') -$max_overflow = hiera('max_overflow') -$max_retries = '-1' -$database_idle_timeout = '3600' - -$murano_settings_hash = hiera('murano_settings', {}) -if has_key($murano_settings_hash, 'murano_repo_url') { - $murano_repo_url = $murano_settings_hash['murano_repo_url'] -} else { - $murano_repo_url = 'http://storage.apps.openstack.org' -} - -############################################################################### - -####### KEYSTONE ########### -class { 'openstack::keystone': - verbose => $verbose, - debug => $debug, - db_type => $db_type, - db_host => $db_host, - db_password => $db_password, - db_name => $db_name, - db_user => $db_user, - admin_token => $admin_token, - public_address => $public_address, - public_ssl => $public_ssl_hash['services'], - public_hostname => $public_ssl_hash['hostname'], - internal_address => $service_endpoint, - admin_address => $admin_address, - public_bind_host => $local_address_for_bind, - admin_bind_host => $local_address_for_bind, - enabled => $enabled, - use_syslog => $use_syslog, - use_stderr => $use_stderr, - syslog_log_facility => $syslog_log_facility, - region => $region, - memcache_servers => values($memcache_address_map), - memcache_server_port => $memcache_server_port, - memcache_pool_maxsize => $memcache_pool_maxsize, - max_retries => $max_retries, - max_pool_size => $max_pool_size, - max_overflow => $max_overflow, - rabbit_password => $rabbit_password, - rabbit_userid => $rabbit_user, - rabbit_hosts => $rabbit_hosts, - rabbit_virtual_host => $rabbit_virtual_host, - database_idle_timeout => $database_idle_timeout, - revoke_driver => $revoke_driver, - public_url => $public_url, - admin_url => $admin_url, - internal_url => $internal_url, - ceilometer => $ceilometer_hash['enabled'], - service_workers => $service_workers, -} - -####### WSGI ########### - -class { 'osnailyfacter::apache': - listen_ports => hiera_array('apache_ports', ['80', '8888', '5000', '35357']), -} - -class { 'keystone::wsgi::apache': - priority => '05', - threads => 3, - workers => min($::processorcount, 6), - ssl => $ssl, - vhost_custom_fragment => $vhost_limit_request_field_size, - access_log_format => '%h %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"', - - wsgi_script_ensure => $::osfamily ? { - 'RedHat' => 'link', - default => 'file', - }, - wsgi_script_source => $::osfamily ? { - # TODO: (adidenko) use file from package for Debian, when - # https://bugs.launchpad.net/fuel/+bug/1476688 is fixed. - # 'Debian' => '/usr/share/keystone/wsgi.py', - 'RedHat' => '/usr/share/keystone/keystone.wsgi', - default => undef, - }, -} - -include ::tweaks::apache_wrappers - -############################################################################### - -class { 'keystone::roles::admin': - admin => $admin_user, - password => $admin_password, - email => $admin_email, - admin_tenant => $admin_tenant, -} - -class { 'openstack::auth_file': - admin_user => $admin_user, - admin_password => $admin_password, - admin_tenant => $admin_tenant, - region_name => $region, - controller_node => $service_endpoint, - murano_repo_url => $murano_repo_url, - cacert => $public_cert -} - -# Get paste.ini source -include keystone::params -$keystone_paste_ini = $::keystone::params::paste_config ? { - undef => '/etc/keystone/keystone-paste.ini', - default => $::keystone::params::paste_config, -} - -# Make sure admin token auth middleware is in place -exec { 'add_admin_token_auth_middleware': - path => ['/bin', '/usr/bin'], - command => "sed -i 's/\\( token_auth \\)/\\1admin_token_auth /' $keystone_paste_ini", - unless => "fgrep -q ' admin_token_auth' $keystone_paste_ini", - require => Package['keystone'], -} - -#Can't use openrc to create admin user -exec { 'purge_openrc': - path => '/bin:/usr/bin:/sbin:/usr/sbin', - command => 'rm -f /root/openrc', - onlyif => 'test -f /root/openrc', -} - -Exec <| title == 'keystone-manage db_sync' |> ~> -Exec <| title == 'purge_openrc' |> - -Exec <| title == 'add_admin_token_auth_middleware' |> -> -Exec <| title == 'keystone-manage db_sync' |> -> -Exec <| title == 'purge_openrc' |> -> -Class['keystone::roles::admin'] -> -Class['openstack::auth_file'] - -$haproxy_stats_url = "http://${service_endpoint}:10000/;csv" - -haproxy_backend_status { 'keystone-public' : - name => 'keystone-1', - url => $haproxy_stats_url, -} - -haproxy_backend_status { 'keystone-admin' : - name => 'keystone-2', - url => $haproxy_stats_url, -} - -Service['keystone'] -> Haproxy_backend_status<||> -Service<| title == 'httpd' |> -> Haproxy_backend_status<||> -Haproxy_backend_status<||> -> Class['keystone::roles::admin'] - -####### Disable upstart startup on install ####### -if ($::operatingsystem == 'Ubuntu') { - tweaks::ubuntu_service_override { 'keystone': - package_name => 'keystone', - } -} - -# Override confguration options -$override_configuration = hiera_hash('configuration', {}) -override_resources { 'keystone_config': - data => $override_configuration['keystone_config'] -} diff --git a/f2s/resources/keystone/meta.yaml b/f2s/resources/keystone/meta.yaml deleted file mode 100644 index 3d6630ce..00000000 --- a/f2s/resources/keystone/meta.yaml +++ /dev/null @@ -1,77 +0,0 @@ -id: keystone -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - access: - value: null - amqp_hosts: - value: null - apache_ports: - value: null - ceilometer: - value: null - cinder: - value: null - database_vip: - value: null - debug: - value: null - fqdn: - value: null - glance: - value: null - keystone: - value: null - management_vip: - value: null - max_overflow: - value: null - max_pool_size: - value: null - memcache_roles: - value: null - memcache_server_port: - value: null - murano_settings: - value: null - network_metadata: - value: null - network_scheme: - value: null - neutron_user_password: - value: null - node_name: - value: null - nodes: - value: null - nova: - value: null - public_service_endpoint: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - rabbit_hash: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - syslog_log_facility_keystone: - value: null - use_neutron: - value: null - use_stderr: - value: null - use_syslog: - value: null - verbose: - value: null diff --git a/f2s/resources/logging/actions/run.pp b/f2s/resources/logging/actions/run.pp deleted file mode 100644 index 44bbab58..00000000 --- a/f2s/resources/logging/actions/run.pp +++ /dev/null @@ -1,67 +0,0 @@ -notice('MODULAR: logging.pp') - -$base_syslog_hash = hiera('base_syslog_hash') -$syslog_hash = hiera('syslog_hash') -$use_syslog = hiera('use_syslog', true) -$debug = pick($syslog_hash['debug'], hiera('debug', false)) -$nodes_hash = hiera('nodes', {}) -$roles = node_roles($nodes_hash, hiera('uid')) - -################################################## - -$base_syslog_rserver = { - 'remote_type' => 'tcp', - 'server' => $base_syslog_hash['syslog_server'], - 'port' => $base_syslog_hash['syslog_port'] -} - -$syslog_rserver = { - 'remote_type' => $syslog_hash['syslog_transport'], - 'server' => $syslog_hash['syslog_server'], - 'port' => $syslog_hash['syslog_port'], -} - -if $syslog_hash['metadata']['enabled'] { - $rservers = [$base_syslog_rserver, $syslog_rserver] -} else { - $rservers = [$base_syslog_rserver] -} - -if $use_syslog { - if ($::operatingsystem == 'Ubuntu') { - # ensure the var log folder permissions are correct even if it's a mount - # LP#1489347 - file { '/var/log': - owner => 'root', - group => 'syslog', - mode => '0775', - } - } - - if member($roles, 'ironic') { - $ironic_collector = true - } - - class { '::openstack::logging': - role => 'client', - show_timezone => true, - # log both locally include auth, and remote - log_remote => true, - log_local => true, - log_auth_local => true, - # keep four weekly log rotations, - # force rotate if 300M size have exceeded - rotation => 'weekly', - keep => '4', - minsize => '10M', - maxsize => '100M', - # remote servers to send logs to - rservers => $rservers, - # should be true, if client is running at virtual node - virtual => str2bool($::is_virtual), - # Rabbit doesn't support syslog directly - rabbit_log_level => 'NOTICE', - debug => $debug, - ironic_collector => $ironic_collector, - } -} diff --git a/f2s/resources/logging/meta.yaml b/f2s/resources/logging/meta.yaml deleted file mode 100644 index 7ce66c81..00000000 --- a/f2s/resources/logging/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -id: logging -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - base_syslog_hash: - value: null - debug: - value: null - fqdn: - value: null - node_role: - value: null - nodes: - value: null - puppet_modules: - value: null - role: - value: null - syslog_hash: - value: null - uid: - value: null - use_syslog: - value: null diff --git a/f2s/resources/memcached/actions/run.pp b/f2s/resources/memcached/actions/run.pp deleted file mode 100644 index 877a381f..00000000 --- a/f2s/resources/memcached/actions/run.pp +++ /dev/null @@ -1,8 +0,0 @@ -notice('MODULAR: memcached.pp') - -prepare_network_config(hiera('network_scheme', {})) - -class { 'memcached': - listen_ip => get_network_role_property('mgmt/memcache', 'ipaddr'), - max_memory => '50%', -} diff --git a/f2s/resources/memcached/meta.yaml b/f2s/resources/memcached/meta.yaml deleted file mode 100644 index 0800af06..00000000 --- a/f2s/resources/memcached/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -id: memcached -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - network_scheme: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/murano-db/actions/run.pp b/f2s/resources/murano-db/actions/run.pp deleted file mode 100644 index ddc326ba..00000000 --- a/f2s/resources/murano-db/actions/run.pp +++ /dev/null @@ -1,57 +0,0 @@ -notice('MODULAR: murano/db.pp') - -$node_name = hiera('node_name') -$murano_hash = hiera_hash('murano_hash', {}) -$murano_enabled = pick($murano_hash['enabled'], false) -$mysql_hash = hiera_hash('mysql_hash', {}) -$management_vip = hiera('management_vip', undef) -$database_vip = hiera('database_vip') - -$mysql_root_user = pick($mysql_hash['root_user'], 'root') -$mysql_db_create = pick($mysql_hash['db_create'], true) -$mysql_root_password = $mysql_hash['root_password'] - -$db_user = pick($murano_hash['db_user'], 'murano') -$db_name = pick($murano_hash['db_name'], 'murano') -$db_password = pick($murano_hash['db_password'], $mysql_root_password) - -$db_host = pick($murano_hash['db_host'], $database_vip) -$db_create = pick($murano_hash['db_create'], $mysql_db_create) -$db_root_user = pick($murano_hash['root_user'], $mysql_root_user) -$db_root_password = pick($murano_hash['root_password'], $mysql_root_password) - -$allowed_hosts = [ $node_name, 'localhost', '127.0.0.1', '%' ] - -validate_string($mysql_root_user) - -if $murano_enabled and $db_create { - - class { 'galera::client': - custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), - } - - class { 'murano::db::mysql': - user => $db_user, - password => $db_password, - dbname => $db_name, - allowed_hosts => $allowed_hosts, - } - - class { 'osnailyfacter::mysql_access': - db_host => $db_host, - db_user => $db_root_user, - db_password => $db_root_password, - } - - Class['galera::client'] -> - Class['osnailyfacter::mysql_access'] -> - Class['murano::db::mysql'] - -} - -class mysql::config {} -include mysql::config -class mysql::server {} -include mysql::server -class murano::api {} -include murano::api diff --git a/f2s/resources/murano-db/meta.yaml b/f2s/resources/murano-db/meta.yaml deleted file mode 100644 index d0ef0ebe..00000000 --- a/f2s/resources/murano-db/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -id: murano-db -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - database_vip: - value: null - fqdn: - value: null - management_vip: - value: null - murano: - value: null - murano_hash: - value: null - mysql_hash: - value: null - node_name: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/murano-keystone/actions/run.pp b/f2s/resources/murano-keystone/actions/run.pp deleted file mode 100644 index 4d132d74..00000000 --- a/f2s/resources/murano-keystone/actions/run.pp +++ /dev/null @@ -1,36 +0,0 @@ -notice('MODULAR: murano/keystone.pp') - -$murano_hash = hiera_hash('murano_hash', {}) -$public_ip = hiera('public_vip') -$management_ip = hiera('management_vip') -$public_ssl = hiera('public_ssl') -$region = hiera('region', 'RegionOne') - - -$public_protocol = $public_ssl['services'] ? { - true => 'https', - default => 'http', -} - -$public_address = $public_ssl['services'] ? { - true => $public_ssl['hostname'], - default => $public_ip, -} - -$api_bind_port = '8082' - -$tenant = pick($murano_hash['tenant'], 'services') -$public_url = "${public_protocol}://${public_address}:${api_bind_port}" -$admin_url = "http://${management_ip}:${api_bind_port}" - -################################################################# - -class { 'murano::keystone::auth': - password => $murano_hash['user_password'], - service_type => 'application_catalog', - region => $region, - tenant => $tenant, - public_url => $public_url, - admin_url => $admin_url, - internal_url => $admin_url, -} diff --git a/f2s/resources/murano-keystone/meta.yaml b/f2s/resources/murano-keystone/meta.yaml deleted file mode 100644 index 47f2ecce..00000000 --- a/f2s/resources/murano-keystone/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -id: murano-keystone -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vip: - value: null - murano_hash: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null diff --git a/f2s/resources/murano-rabbitmq/actions/run.pp b/f2s/resources/murano-rabbitmq/actions/run.pp deleted file mode 100644 index fa766e01..00000000 --- a/f2s/resources/murano-rabbitmq/actions/run.pp +++ /dev/null @@ -1,13 +0,0 @@ -notice('MODULAR: murano/rabbitmq.pp') - -$rabbit_hash = hiera_hash('rabbit_hash', {}) - -################################################################# - -rabbitmq_vhost { '/murano': } - -rabbitmq_user_permissions { "${rabbit_hash['user']}@/murano": - configure_permission => '.*', - read_permission => '.*', - write_permission => '.*', -} diff --git a/f2s/resources/murano-rabbitmq/meta.yaml b/f2s/resources/murano-rabbitmq/meta.yaml deleted file mode 100644 index 09230cf3..00000000 --- a/f2s/resources/murano-rabbitmq/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: murano-rabbitmq -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/murano/actions/run.pp b/f2s/resources/murano/actions/run.pp deleted file mode 100644 index b8ad753e..00000000 --- a/f2s/resources/murano/actions/run.pp +++ /dev/null @@ -1,151 +0,0 @@ -notice('MODULAR: murano.pp') - -prepare_network_config(hiera('network_scheme', {})) - -$murano_hash = hiera_hash('murano_hash', {}) -$murano_settings_hash = hiera_hash('murano_settings', {}) -$rabbit_hash = hiera_hash('rabbit_hash', {}) -$neutron_config = hiera_hash('neutron_config', {}) -$node_role = hiera('node_role') -$public_ip = hiera('public_vip') -$database_ip = hiera('database_vip') -$management_ip = hiera('management_vip') -$region = hiera('region', 'RegionOne') -$use_neutron = hiera('use_neutron', false) -$service_endpoint = hiera('service_endpoint') -$syslog_log_facility_murano = hiera('syslog_log_facility_murano') -$debug = pick($murano_hash['debug'], hiera('debug', false)) -$verbose = pick($murano_hash['verbose'], hiera('verbose', true)) -$use_syslog = hiera('use_syslog', true) -$use_stderr = hiera('use_stderr', false) -$rabbit_ha_queues = hiera('rabbit_ha_queues') -$amqp_port = hiera('amqp_port') -$amqp_hosts = hiera('amqp_hosts') -$public_ssl = hiera_hash('public_ssl', {}) - -################################################################# - -if $murano_hash['enabled'] { - $public_protocol = pick($public_ssl['services'], false) ? { - true => 'https', - default => 'http', - } - - $public_address = pick($public_ssl['services'], false) ? { - true => pick($public_ssl['hostname']), - default => $public_ip, - } - - $firewall_rule = '202 murano-api' - - $api_bind_port = '8082' - $api_bind_host = get_network_role_property('murano/api', 'ipaddr') - - $murano_user = pick($murano_hash['user'], 'murano') - $tenant = pick($murano_hash['tenant'], 'services') - $internal_url = "http://${api_bind_host}:${api_bind_port}" - $db_user = pick($murano_hash['db_user'], 'murano') - $db_name = pick($murano_hash['db_name'], 'murano') - $db_password = pick($murano_hash['db_password']) - $db_host = pick($murano_hash['db_host'], $database_ip) - $read_timeout = '60' - $sql_connection = "mysql://${db_user}:${db_password}@${db_host}/${db_name}?read_timeout=${read_timeout}" - - $external_network = $use_neutron ? { - true => get_ext_net_name($neutron_config['predefined_networks']), - default => undef, - } - - $repository_url = has_key($murano_settings_hash, 'murano_repo_url') ? { - true => $murano_settings_hash['murano_repo_url'], - default => 'http://storage.apps.openstack.org', - } - - ####### Disable upstart startup on install ####### - tweaks::ubuntu_service_override { ['murano-api', 'murano-engine']: - package_name => 'murano', - } - - firewall { $firewall_rule : - dport => $api_bind_port, - proto => 'tcp', - action => 'accept', - } - - class { 'murano' : - verbose => $verbose, - debug => $debug, - use_syslog => $use_syslog, - use_stderr => $use_stderr, - log_facility => $syslog_log_facility_murano, - database_connection => $sql_connection, - auth_uri => "${public_protocol}://${public_address}:5000/v2.0/", - admin_user => $murano_user, - admin_password => $murano_hash['user_password'], - admin_tenant_name => $tenant, - identity_uri => "http://${service_endpoint}:35357/", - use_neutron => $use_neutron, - rabbit_os_user => $rabbit_hash['user'], - rabbit_os_password => $rabbit_hash['password'], - rabbit_os_port => $amqp_port, - rabbit_os_host => split($amqp_hosts, ','), - rabbit_ha_queues => $rabbit_ha_queues, - rabbit_own_host => $management_ip, - rabbit_own_port => $amqp_port, - rabbit_own_vhost => '/murano', - rabbit_own_user => $rabbit_hash['user'], - rabbit_own_password => $rabbit_hash['password'], - service_host => $api_bind_host, - service_port => $api_bind_port, - external_network => $external_network, - use_trusts => true, - } - - class { 'murano::api': - host => $api_bind_host, - port => $api_bind_port, - } - - class { 'murano::engine': } - - class { 'murano::client': } - - class { 'murano::dashboard': - api_url => $internal_url, - repo_url => $repository_url, - } - - $haproxy_stats_url = "http://${management_ip}:10000/;csv" - - haproxy_backend_status { 'murano-api' : - name => 'murano-api', - url => $haproxy_stats_url, - } - - if ($node_role == 'primary-controller') { - haproxy_backend_status { 'keystone-public' : - name => 'keystone-1', - url => $haproxy_stats_url, - } - - haproxy_backend_status { 'keystone-admin' : - name => 'keystone-2', - url => $haproxy_stats_url, - } - - murano::application { 'io.murano' : } - - Haproxy_backend_status['keystone-admin'] -> Haproxy_backend_status['murano-api'] - Haproxy_backend_status['keystone-public'] -> Haproxy_backend_status['murano-api'] - Haproxy_backend_status['murano-api'] -> Murano::Application['io.murano'] - - Service['murano-api'] -> Murano::Application['io.murano'] - } - - Firewall[$firewall_rule] -> Class['murano::api'] - Service['murano-api'] -> Haproxy_backend_status['murano-api'] -} -######################### - -class openstack::firewall {} -include openstack::firewall diff --git a/f2s/resources/murano/meta.yaml b/f2s/resources/murano/meta.yaml deleted file mode 100644 index 3162a425..00000000 --- a/f2s/resources/murano/meta.yaml +++ /dev/null @@ -1,59 +0,0 @@ -id: murano -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - amqp_hosts: - value: null - amqp_port: - value: null - database_vip: - value: null - debug: - value: null - fqdn: - value: null - heat_hash: - value: null - management_vip: - value: null - murano: - value: null - murano_hash: - value: null - murano_settings: - value: null - network_scheme: - value: null - neutron_config: - value: null - node_role: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - rabbit_ha_queues: - value: null - rabbit_hash: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - syslog_log_facility_murano: - value: null - use_neutron: - value: null - use_stderr: - value: null - use_syslog: - value: null - verbose: - value: null diff --git a/f2s/resources/netconfig/actions/run.pp b/f2s/resources/netconfig/actions/run.pp deleted file mode 100644 index 7494a336..00000000 --- a/f2s/resources/netconfig/actions/run.pp +++ /dev/null @@ -1,106 +0,0 @@ -notice('MODULAR: netconfig.pp') - -$network_scheme = hiera('network_scheme') - -if ( $::l23_os =~ /(?i:centos6)/ and $::kernelmajversion == '3.10' ) { - $ovs_datapath_package_name = 'kmod-openvswitch-lt' -} - -class { 'l23network' : - use_ovs => hiera('use_ovs', false), - use_ovs_dkms_datapath_module => $::l23_os ? { - /(?i:redhat7|centos7)/ => false, - default => true - }, - ovs_datapath_package_name => $ovs_datapath_package_name, -} -prepare_network_config($network_scheme) -$sdn = generate_network_config() -notify {'SDN': message => $sdn } - -#Set arp_accept to 1 by default #lp1456272 -sysctl::value { 'net.ipv4.conf.all.arp_accept': value => '1' } -sysctl::value { 'net.ipv4.conf.default.arp_accept': value => '1' } - -# setting kernel reserved ports -# defaults are 49000,49001,35357,41055,41056,58882 -class { 'openstack::reserved_ports': } - -### TCP connections keepalives and failover related parameters ### -# configure TCP keepalive for host OS. -# Send 3 probes each 8 seconds, if the connection was idle -# for a 30 seconds. Consider it dead, if there was no responces -# during the check time frame, i.e. 30+3*8=54 seconds overall. -# (note: overall check time frame should be lower then -# nova_report_interval). -class { 'openstack::keepalive' : - tcpka_time => '30', - tcpka_probes => '8', - tcpka_intvl => '3', - tcp_retries2 => '5', -} - -# increase network backlog for performance on fast networks -sysctl::value { 'net.core.netdev_max_backlog': value => '261144' } - -L2_port<||> -> Sysfs_config_value<||> -L3_ifconfig<||> -> Sysfs_config_value<||> -L3_route<||> -> Sysfs_config_value<||> - -class { 'sysfs' :} - -if hiera('set_rps', true) { - sysfs_config_value { 'rps_cpus' : - ensure => 'present', - name => '/etc/sysfs.d/rps_cpus.conf', - value => cpu_affinity_hex($::processorcount), - sysfs => '/sys/class/net/*/queues/rx-*/rps_cpus', - exclude => '/sys/class/net/lo/*', - } -} - -if hiera('set_xps', true) { - sysfs_config_value { 'xps_cpus' : - ensure => 'present', - name => '/etc/sysfs.d/xps_cpus.conf', - value => cpu_affinity_hex($::processorcount), - sysfs => '/sys/class/net/*/queues/tx-*/xps_cpus', - exclude => '/sys/class/net/lo/*', - } -} - -if !defined(Package['irqbalance']) { - package { 'irqbalance': - ensure => installed, - } -} - -if !defined(Service['irqbalance']) { - service { 'irqbalance': - ensure => running, - require => Package['irqbalance'], - } -} - -# We need to wait at least 30 seconds for the bridges and other interfaces to -# come up after being created. This should allow for all interfaces to be up -# and ready for traffic before proceeding with further deploy steps. LP#1458954 -exec { 'wait-for-interfaces': - path => '/usr/bin:/bin', - command => 'sleep 32', -} - -# check that network was configured successfully -# and the default gateway is online -$default_gateway = hiera('default_gateway') - -ping_host { $default_gateway : - ensure => 'up', -} -L2_port<||> -> Ping_host[$default_gateway] -L2_bond<||> -> Ping_host[$default_gateway] -L3_ifconfig<||> -> Ping_host[$default_gateway] -L3_route<||> -> Ping_host[$default_gateway] - -Class['l23network'] -> -Exec['wait-for-interfaces'] diff --git a/f2s/resources/netconfig/meta.yaml b/f2s/resources/netconfig/meta.yaml deleted file mode 100644 index 5472a442..00000000 --- a/f2s/resources/netconfig/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -id: netconfig -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - default_gateway: - value: null - fqdn: - value: null - network_metadata: - value: null - network_scheme: - value: null - puppet_modules: - value: null - role: - value: null - set_rps: - value: null - set_xps: - value: null - use_neutron: - value: null - use_ovs: - value: null diff --git a/f2s/resources/neutron-db/actions/run.pp b/f2s/resources/neutron-db/actions/run.pp deleted file mode 100644 index 8cdbae15..00000000 --- a/f2s/resources/neutron-db/actions/run.pp +++ /dev/null @@ -1,59 +0,0 @@ -notice('MODULAR: openstack-network/db.pp') - -$node_name = hiera('node_name') -$use_neutron = hiera('use_neutron', false) -$neutron_hash = hiera_hash('quantum_settings', {}) -$mysql_hash = hiera_hash('mysql', {}) -$management_vip = hiera('management_vip', undef) -$database_vip = hiera('database_vip', undef) - -$mysql_root_user = pick($mysql_hash['root_user'], 'root') -$mysql_db_create = pick($mysql_hash['db_create'], true) -$mysql_root_password = $mysql_hash['root_password'] - -$neutron_db = merge($neutron_hash['database'], {}) - -$db_user = pick($neutron_db['db_user'], 'neutron') -$db_name = pick($neutron_db['db_name'], 'neutron') -$db_password = pick($neutron_db['passwd'], $mysql_root_password) - -$db_host = pick($neutron_db['db_host'], $database_vip) -$db_create = pick($neutron_db['db_create'], $mysql_db_create) -$db_root_user = pick($neutron_db['root_user'], $mysql_root_user) -$db_root_password = pick($neutron_db['root_password'], $mysql_root_password) - -$allowed_hosts = [ $node_name, 'localhost', '127.0.0.1', '%' ] - -validate_string($mysql_root_user) - -if $use_neutron and $db_create { - - class { 'galera::client': - custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), - } - - class { 'neutron::db::mysql': - user => $db_user, - password => $db_password, - dbname => $db_name, - allowed_hosts => $allowed_hosts, - } - - class { 'osnailyfacter::mysql_access': - db_host => $db_host, - db_user => $db_root_user, - db_password => $db_root_password, - } - - Class['galera::client'] -> - Class['osnailyfacter::mysql_access'] -> - Class['neutron::db::mysql'] - -} - -# =========================================================================== - -class mysql::config {} -include mysql::config -class mysql::server {} -include mysql::server diff --git a/f2s/resources/neutron-db/meta.yaml b/f2s/resources/neutron-db/meta.yaml deleted file mode 100644 index 847541d7..00000000 --- a/f2s/resources/neutron-db/meta.yaml +++ /dev/null @@ -1,29 +0,0 @@ -id: neutron-db -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - database_vip: - value: null - fqdn: - value: null - management_vip: - value: null - mysql: - value: null - mysql_custom_setup_class: - value: null - neutron_db_password: - value: null - node_name: - value: null - puppet_modules: - value: null - quantum_settings: - value: null - role: - value: null - use_neutron: - value: null diff --git a/f2s/resources/neutron-keystone/actions/run.pp b/f2s/resources/neutron-keystone/actions/run.pp deleted file mode 100644 index 937b42b7..00000000 --- a/f2s/resources/neutron-keystone/actions/run.pp +++ /dev/null @@ -1,50 +0,0 @@ -notice('MODULAR: openstack-network/keystone.pp') - -$use_neutron = hiera('use_neutron', false) -$neutron_hash = hiera_hash('quantum_settings', {}) -$public_vip = hiera('public_vip') -$public_ssl_hash = hiera('public_ssl') -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} -$admin_address = hiera('management_vip') -$admin_protocol = 'http' -$region = pick($neutron_hash['region'], hiera('region', 'RegionOne')) - -$password = $neutron_hash['keystone']['admin_password'] -$auth_name = pick($neutron_hash['auth_name'], 'neutron') -$configure_endpoint = pick($neutron_hash['configure_endpoint'], true) -$configure_user = pick($neutron_hash['configure_user'], true) -$configure_user_role = pick($neutron_hash['configure_user_role'], true) -$service_name = pick($neutron_hash['service_name'], 'neutron') -$tenant = pick($neutron_hash['tenant'], 'services') - -$port = '9696' - -$public_url = "${public_protocol}://${public_address}:${port}" -$internal_url = "${admin_protocol}://${admin_address}:${port}" -$admin_url = "${admin_protocol}://${admin_address}:${port}" - - -validate_string($public_address) -validate_string($password) - -if $use_neutron { - class { '::neutron::keystone::auth': - password => $password, - auth_name => $auth_name, - configure_endpoint => $configure_endpoint, - configure_user => $configure_user, - configure_user_role => $configure_user_role, - service_name => $service_name, - public_url => $public_url, - internal_url => $internal_url, - admin_url => $admin_url, - region => $region, - } -} diff --git a/f2s/resources/neutron-keystone/meta.yaml b/f2s/resources/neutron-keystone/meta.yaml deleted file mode 100644 index 44b19133..00000000 --- a/f2s/resources/neutron-keystone/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -id: neutron-keystone -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vip: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - quantum_settings: - value: null - region: - value: null - role: - value: null - use_neutron: - value: null diff --git a/f2s/resources/nova-db/actions/run.pp b/f2s/resources/nova-db/actions/run.pp deleted file mode 100644 index 88470175..00000000 --- a/f2s/resources/nova-db/actions/run.pp +++ /dev/null @@ -1,53 +0,0 @@ -notice('MODULAR: openstack-controller/db.pp') - -$nova_hash = hiera_hash('nova', {}) -$mysql_hash = hiera_hash('mysql', {}) -$management_vip = hiera('management_vip', undef) -$database_vip = hiera('database_vip', undef) - -$mysql_root_user = pick($mysql_hash['root_user'], 'root') -$mysql_db_create = pick($mysql_hash['db_create'], true) -$mysql_root_password = $mysql_hash['root_password'] - -$db_user = pick($nova_hash['db_user'], 'nova') -$db_name = pick($nova_hash['db_name'], 'nova') -$db_password = pick($nova_hash['db_password'], $mysql_root_password) - -$db_host = pick($nova_hash['db_host'], $database_vip) -$db_create = pick($nova_hash['db_create'], $mysql_db_create) -$db_root_user = pick($nova_hash['root_user'], $mysql_root_user) -$db_root_password = pick($nova_hash['root_password'], $mysql_root_password) - -$allowed_hosts = [ $::hostname, 'localhost', '127.0.0.1', '%' ] - -validate_string($mysql_root_user) - -if $db_create { - - class { 'galera::client': - custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), - } - - class { 'nova::db::mysql': - user => $db_user, - password => $db_password, - dbname => $db_name, - allowed_hosts => $allowed_hosts, - } - - class { 'osnailyfacter::mysql_access': - db_host => $db_host, - db_user => $db_root_user, - db_password => $db_root_password, - } - - Class['galera::client'] -> - Class['osnailyfacter::mysql_access'] -> - Class['nova::db::mysql'] - -} - -class mysql::config {} -include mysql::config -class mysql::server {} -include mysql::server diff --git a/f2s/resources/nova-db/meta.yaml b/f2s/resources/nova-db/meta.yaml deleted file mode 100644 index 1164afa3..00000000 --- a/f2s/resources/nova-db/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: nova-db -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - database_vip: - value: null - fqdn: - value: null - management_vip: - value: null - mysql: - value: null - mysql_custom_setup_class: - value: null - nova: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/nova-keystone/actions/run.pp b/f2s/resources/nova-keystone/actions/run.pp deleted file mode 100644 index 0f5a4fc8..00000000 --- a/f2s/resources/nova-keystone/actions/run.pp +++ /dev/null @@ -1,56 +0,0 @@ -notice('MODULAR: openstack-controller/keystone.pp') - -$nova_hash = hiera_hash('nova', {}) -$public_vip = hiera('public_vip') -$public_ssl_hash = hiera('public_ssl') -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} -$admin_protocol = 'http' -$admin_address = hiera('management_vip') -$region = pick($nova_hash['region'], hiera('region', 'RegionOne')) - -$password = $nova_hash['user_password'] -$auth_name = pick($nova_hash['auth_name'], 'nova') -$configure_endpoint = pick($nova_hash['configure_endpoint'], true) -$configure_user = pick($nova_hash['configure_user'], true) -$configure_user_role = pick($nova_hash['configure_user_role'], true) -$service_name = pick($nova_hash['service_name'], 'nova') -$tenant = pick($nova_hash['tenant'], 'services') - -$compute_port = '8774' -$public_base_url = "${public_protocol}://${public_address}:${compute_port}" -$admin_base_url = "${admin_protocol}://${admin_address}:${compute_port}" - -$ec2_port = '8773' -$ec2_public_url = "${public_protocol}://${public_address}:${ec2_port}/services/Cloud" -$ec2_internal_url = "${admin_protocol}://${admin_address}:${ec2_port}/services/Cloud" -$ec2_admin_url = "${admin_protocol}://${admin_address}:${ec2_port}/services/Admin" - -validate_string($public_address) -validate_string($password) - -class { '::nova::keystone::auth': - password => $password, - auth_name => $auth_name, - configure_endpoint => $configure_endpoint, - configure_endpoint_v3 => $configure_endpoint, - configure_user => $configure_user, - configure_user_role => $configure_user_role, - service_name => $service_name, - public_url => "${public_base_url}/v2/%(tenant_id)s", - public_url_v3 => "${public_base_url}/v3", - internal_url => "${admin_base_url}/v2/%(tenant_id)s", - internal_url_v3 => "${admin_base_url}/v3", - admin_url => "${admin_base_url}/v2/%(tenant_id)s", - admin_url_v3 => "${admin_base_url}/v3", - region => $region, - ec2_public_url => $ec2_public_url, - ec2_internal_url => $ec2_internal_url, - ec2_admin_url => $ec2_admin_url, -} diff --git a/f2s/resources/nova-keystone/meta.yaml b/f2s/resources/nova-keystone/meta.yaml deleted file mode 100644 index 5cba5bae..00000000 --- a/f2s/resources/nova-keystone/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: nova-keystone -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vip: - value: null - nova: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null diff --git a/f2s/resources/ntp-check/actions/run.pp b/f2s/resources/ntp-check/actions/run.pp deleted file mode 100644 index 193e64f2..00000000 --- a/f2s/resources/ntp-check/actions/run.pp +++ /dev/null @@ -1,6 +0,0 @@ -notice('MODULAR: ntp-check.pp') -# get the ntp configuration from hiera -$ntp_servers = hiera('external_ntp') -# take the comma seperated list and turn it into an array of servers and then -# pass it to the ntp_available function to check that at least 1 server works -ntp_available(strip(split($ntp_servers['ntp_list'], ','))) diff --git a/f2s/resources/ntp-check/meta.yaml b/f2s/resources/ntp-check/meta.yaml deleted file mode 100644 index 8a753150..00000000 --- a/f2s/resources/ntp-check/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -id: ntp-check -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - external_ntp: - value: null - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/ntp-client/actions/run.pp b/f2s/resources/ntp-client/actions/run.pp deleted file mode 100644 index 38a37f2b..00000000 --- a/f2s/resources/ntp-client/actions/run.pp +++ /dev/null @@ -1,26 +0,0 @@ -notice('MODULAR: ntp-client.pp') - -$management_vrouter_vip = hiera('management_vrouter_vip') -$ntp_servers = hiera_array('ntp_servers', [$management_vrouter_vip]) -$nodes_hash = hiera('nodes', {}) -$roles = node_roles($nodes_hash, hiera('uid')) - -if !(member($roles, 'controller') or member($roles, 'primary-controller')) { - class { 'ntp': - servers => $ntp_servers, - service_ensure => 'running', - service_enable => true, - disable_monitor => true, - iburst_enable => true, - tinker => true, - panic => '0', - stepout => '5', - minpoll => '3', - } - - include ntp::params - tweaks::ubuntu_service_override { 'ntpd': - package_name => $ntp::params::package_name, - service_name => $ntp::params::service_name, - } -} diff --git a/f2s/resources/ntp-client/meta.yaml b/f2s/resources/ntp-client/meta.yaml deleted file mode 100644 index 20f6ae83..00000000 --- a/f2s/resources/ntp-client/meta.yaml +++ /dev/null @@ -1,21 +0,0 @@ -id: ntp-client -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vrouter_vip: - value: null - nodes: - value: null - ntp_servers: - value: null - puppet_modules: - value: null - role: - value: null - uid: - value: null diff --git a/f2s/resources/ntp-server/actions/run.pp b/f2s/resources/ntp-server/actions/run.pp deleted file mode 100644 index 6055f681..00000000 --- a/f2s/resources/ntp-server/actions/run.pp +++ /dev/null @@ -1,31 +0,0 @@ -notice('MODULAR: ntp-server.pp') - -$ntp_servers = hiera('external_ntp') - -class { 'ntp': - servers => strip(split($ntp_servers['ntp_list'], ',')), - service_enable => true, - service_ensure => 'running', - disable_monitor => true, - iburst_enable => true, - tinker => true, - panic => '0', - stepout => '5', - minpoll => '3', - restrict => [ - '-4 default kod nomodify notrap nopeer noquery', - '-6 default kod nomodify notrap nopeer noquery', - '127.0.0.1', - '::1', - ], -} - -class { 'cluster::ntp_ocf': } - -if $::operatingsystem == 'Ubuntu' { - include ntp::params - tweaks::ubuntu_service_override { 'ntpd': - package_name => $ntp::params::package_name, - service_name => $ntp::params::service_name, - } -} diff --git a/f2s/resources/ntp-server/meta.yaml b/f2s/resources/ntp-server/meta.yaml deleted file mode 100644 index 338e23dd..00000000 --- a/f2s/resources/ntp-server/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -id: ntp-server -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - external_ntp: - value: null - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/openstack-cinder/actions/run.pp b/f2s/resources/openstack-cinder/actions/run.pp deleted file mode 100644 index 7ea72770..00000000 --- a/f2s/resources/openstack-cinder/actions/run.pp +++ /dev/null @@ -1,107 +0,0 @@ -notice('MODULAR: openstack-cinder.pp') - -#Network stuff -prepare_network_config(hiera('network_scheme', {})) -$cinder_hash = hiera_hash('cinder_hash', {}) -$management_vip = hiera('management_vip') -$queue_provider = hiera('queue_provider', 'rabbitmq') -$cinder_volume_group = hiera('cinder_volume_group', 'cinder') -$nodes_hash = hiera('nodes', {}) -$storage_hash = hiera_hash('storage', {}) -$ceilometer_hash = hiera_hash('ceilometer_hash',{}) -$rabbit_hash = hiera_hash('rabbit_hash', {}) -$service_endpoint = hiera('service_endpoint') -$service_workers = pick($cinder_hash['workers'], - min(max($::processorcount, 2), 16)) - -$cinder_db_password = $cinder_hash[db_password] -$cinder_user_password = $cinder_hash[user_password] -$keystone_user = pick($cinder_hash['user'], 'cinder') -$keystone_tenant = pick($cinder_hash['tenant'], 'services') -$region = hiera('region', 'RegionOne') -$db_host = pick($cinder_hash['db_host'], hiera('database_vip')) -$cinder_db_user = pick($cinder_hash['db_user'], 'cinder') -$cinder_db_name = pick($cinder_hash['db_name'], 'cinder') -$roles = node_roles($nodes_hash, hiera('uid')) -$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") - -# Determine who should get the volume service -if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) { - $manage_volumes = 'iscsi' -} elsif ($storage_hash['volumes_ceph']) { - $manage_volumes = 'ceph' -} elsif member($roles, 'cinder-vmware') { - $manage_volumes = 'vmdk' -} else { - $manage_volumes = false -} - -# SQLAlchemy backend configuration -$max_pool_size = min($::processorcount * 5 + 0, 30 + 0) -$max_overflow = min($::processorcount * 5 + 0, 60 + 0) -$max_retries = '-1' -$idle_timeout = '3600' - -$keystone_auth_protocol = 'http' -$keystone_auth_host = $service_endpoint -$service_port = '5000' -$auth_uri = "${keystone_auth_protocol}://${keystone_auth_host}:${service_port}/" -$identity_uri = "${keystone_auth_protocol}://${keystone_auth_host}:${service_port}/" - -$openstack_version = { - 'keystone' => 'installed', - 'glance' => 'installed', - 'horizon' => 'installed', - 'nova' => 'installed', - 'novncproxy' => 'installed', - 'cinder' => 'installed', -} - -######### Cinder Controller Services ######## -class {'openstack::cinder': - sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_name}?charset=utf8&read_timeout=60", - queue_provider => $queue_provider, - amqp_hosts => hiera('amqp_hosts',''), - amqp_user => $rabbit_hash['user'], - amqp_password => $rabbit_hash['password'], - rabbit_ha_queues => true, - volume_group => $cinder_volume_group, - physical_volume => undef, - manage_volumes => $manage_volumes, - enabled => true, - glance_api_servers => $glance_api_servers, - auth_host => $service_endpoint, - bind_host => get_network_role_property('cinder/api', 'ipaddr'), - iscsi_bind_host => get_network_role_property('cinder/iscsi', 'ipaddr'), - keystone_user => $keystone_user, - keystone_tenant => $keystone_tenant, - auth_uri => $auth_uri, - region => $region, - identity_uri => $identity_uri, - cinder_user_password => $cinder_user_password, - use_syslog => hiera('use_syslog', true), - use_stderr => hiera('use_stderr', false), - verbose => pick($cinder_hash['verbose'], hiera('verbose', true)), - debug => pick($cinder_hash['debug'], hiera('debug', true)), - syslog_log_facility => hiera('syslog_log_facility_cinder', 'LOG_LOCAL3'), - cinder_rate_limits => hiera('cinder_rate_limits'), - max_retries => $max_retries, - max_pool_size => $max_pool_size, - max_overflow => $max_overflow, - idle_timeout => $idle_timeout, - ceilometer => $ceilometer_hash[enabled], - service_workers => $service_workers, -} # end class - -####### Disable upstart startup on install ####### -if($::operatingsystem == 'Ubuntu') { - tweaks::ubuntu_service_override { 'cinder-api': - package_name => 'cinder-api', - } - tweaks::ubuntu_service_override { 'cinder-backup': - package_name => 'cinder-backup', - } - tweaks::ubuntu_service_override { 'cinder-scheduler': - package_name => 'cinder-scheduler', - } -} diff --git a/f2s/resources/openstack-cinder/meta.yaml b/f2s/resources/openstack-cinder/meta.yaml deleted file mode 100644 index 3e10bd60..00000000 --- a/f2s/resources/openstack-cinder/meta.yaml +++ /dev/null @@ -1,59 +0,0 @@ -id: openstack-cinder -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - amqp_hosts: - value: null - ceilometer_hash: - value: null - cinder: - value: null - cinder_hash: - value: null - cinder_rate_limits: - value: null - cinder_volume_group: - value: null - database_vip: - value: null - debug: - value: null - fqdn: - value: null - glance_api_servers: - value: null - management_vip: - value: null - network_scheme: - value: null - nodes: - value: null - puppet_modules: - value: null - queue_provider: - value: null - rabbit_ha_queues: - value: null - rabbit_hash: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - storage: - value: null - syslog_log_facility_cinder: - value: null - uid: - value: null - use_stderr: - value: null - use_syslog: - value: null - verbose: - value: null diff --git a/f2s/resources/openstack-controller/actions/run.pp b/f2s/resources/openstack-controller/actions/run.pp deleted file mode 100644 index 583e2568..00000000 --- a/f2s/resources/openstack-controller/actions/run.pp +++ /dev/null @@ -1,242 +0,0 @@ -notice('MODULAR: openstack-controller.pp') - -$network_scheme = hiera_hash('network_scheme', {}) -$network_metadata = hiera_hash('network_metadata', {}) -prepare_network_config($network_scheme) - -$nova_rate_limits = hiera('nova_rate_limits') -$primary_controller = hiera('primary_controller') -$use_neutron = hiera('use_neutron', false) -$nova_report_interval = hiera('nova_report_interval') -$nova_service_down_time = hiera('nova_service_down_time') -$use_syslog = hiera('use_syslog', true) -$use_stderr = hiera('use_stderr', false) -$syslog_log_facility_glance = hiera('syslog_log_facility_glance', 'LOG_LOCAL2') -$syslog_log_facility_neutron = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4') -$syslog_log_facility_nova = hiera('syslog_log_facility_nova','LOG_LOCAL6') -$syslog_log_facility_keystone = hiera('syslog_log_facility_keystone', 'LOG_LOCAL7') -$management_vip = hiera('management_vip') -$public_vip = hiera('public_vip') -$sahara_hash = hiera_hash('sahara', {}) -$nodes_hash = hiera('nodes', {}) -$mysql_hash = hiera_hash('mysql', {}) -$access_hash = hiera_hash('access', {}) -$keystone_hash = hiera_hash('keystone', {}) -$glance_hash = hiera_hash('glance', {}) -$storage_hash = hiera_hash('storage', {}) -$nova_hash = hiera_hash('nova', {}) -$nova_config_hash = hiera_hash('nova_config', {}) -$api_bind_address = get_network_role_property('nova/api', 'ipaddr') -$rabbit_hash = hiera_hash('rabbit_hash', {}) -$ceilometer_hash = hiera_hash('ceilometer',{}) -$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') -$workloads_hash = hiera_hash('workloads_collector', {}) -$service_endpoint = hiera('service_endpoint') -$db_host = pick($nova_hash['db_host'], hiera('database_vip')) -$nova_db_user = pick($nova_hash['db_user'], 'nova') -$keystone_user = pick($nova_hash['user'], 'nova') -$keystone_tenant = pick($nova_hash['tenant'], 'services') -$glance_api_servers = hiera('glance_api_servers', "$management_vip:9292") -$region = hiera('region', 'RegionOne') -$service_workers = pick($nova_hash['workers'], - min(max($::processorcount, 2), 16)) -$ironic_hash = hiera_hash('ironic', {}) - -$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles')) -$memcache_ipaddrs = ipsort(values(get_node_to_ipaddr_map_by_network_role($memcache_nodes,'mgmt/memcache'))) -$roles = node_roles($nodes_hash, hiera('uid')) -$openstack_controller_hash = hiera_hash('openstack_controller', {}) - -$floating_hash = {} - -if $use_neutron { - $network_provider = 'neutron' - $novanetwork_params = {} - $neutron_config = hiera_hash('quantum_settings') - $neutron_db_password = $neutron_config['database']['passwd'] - $neutron_user_password = $neutron_config['keystone']['admin_password'] - $neutron_metadata_proxy_secret = $neutron_config['metadata']['metadata_proxy_shared_secret'] - $base_mac = $neutron_config['L2']['base_mac'] -} else { - $network_provider = 'nova' - $floating_ips_range = hiera('floating_network_range') - $neutron_config = {} - $novanetwork_params = hiera('novanetwork_parameters') -} - -# SQLAlchemy backend configuration -$max_pool_size = min($::processorcount * 5 + 0, 30 + 0) -$max_overflow = min($::processorcount * 5 + 0, 60 + 0) -$max_retries = '-1' -$idle_timeout = '3600' - -# TODO: openstack_version is confusing, there's such string var in hiera and hardcoded hash -$hiera_openstack_version = hiera('openstack_version') -$openstack_version = { - 'keystone' => 'installed', - 'glance' => 'installed', - 'horizon' => 'installed', - 'nova' => 'installed', - 'novncproxy' => 'installed', - 'cinder' => 'installed', -} - -################################################################# -if hiera('use_vcenter', false) or hiera('libvirt_type') == 'vcenter' { - $multi_host = false -} else { - $multi_host = true -} - -class { '::openstack::controller': - private_interface => $use_neutron ? { true=>false, default=>hiera('private_int')}, - public_interface => hiera('public_int', undef), - public_address => $public_vip, # It is feature for HA mode. - internal_address => $management_vip, # All internal traffic goes - admin_address => $management_vip, # through load balancer. - floating_range => $use_neutron ? { true =>$floating_hash, default =>false}, - fixed_range => $use_neutron ? { true =>false, default =>hiera('fixed_network_range')}, - multi_host => $multi_host, - network_config => hiera('network_config', {}), - num_networks => hiera('num_networks', undef), - network_size => hiera('network_size', undef), - network_manager => hiera('network_manager', undef), - network_provider => $network_provider, - verbose => pick($openstack_controller_hash['verbose'], true), - debug => pick($openstack_controller_hash['debug'], hiera('debug', true)), - auto_assign_floating_ip => hiera('auto_assign_floating_ip', false), - glance_api_servers => $glance_api_servers, - primary_controller => $primary_controller, - novnc_address => $api_bind_address, - nova_db_user => $nova_db_user, - nova_db_password => $nova_hash[db_password], - nova_user => $keystone_user, - nova_user_password => $nova_hash[user_password], - nova_user_tenant => $keystone_tenant, - nova_hash => $nova_hash, - queue_provider => 'rabbitmq', - amqp_hosts => hiera('amqp_hosts',''), - amqp_user => $rabbit_hash['user'], - amqp_password => $rabbit_hash['password'], - rabbit_ha_queues => true, - cache_server_ip => $memcache_ipaddrs, - api_bind_address => $api_bind_address, - db_host => $db_host, - service_endpoint => $service_endpoint, - neutron_metadata_proxy_secret => $neutron_metadata_proxy_secret, - cinder => true, - ceilometer => $ceilometer_hash[enabled], - service_workers => $service_workers, - use_syslog => $use_syslog, - use_stderr => $use_stderr, - syslog_log_facility_nova => $syslog_log_facility_nova, - nova_rate_limits => $nova_rate_limits, - nova_report_interval => $nova_report_interval, - nova_service_down_time => $nova_service_down_time, - ha_mode => true, - # SQLALchemy backend - max_retries => $max_retries, - max_pool_size => $max_pool_size, - max_overflow => $max_overflow, - idle_timeout => $idle_timeout, -} - -#TODO: PUT this configuration stanza into nova class -nova_config { 'DEFAULT/use_cow_images': value => hiera('use_cow_images')} - -if $primary_controller { - - $haproxy_stats_url = "http://${management_vip}:10000/;csv" - - haproxy_backend_status { 'nova-api' : - name => 'nova-api-2', - url => $haproxy_stats_url, - } - - Openstack::Ha::Haproxy_service <| |> -> Haproxy_backend_status <| |> - - Class['nova::api'] -> Haproxy_backend_status['nova-api'] - - exec { 'create-m1.micro-flavor' : - path => '/sbin:/usr/sbin:/bin:/usr/bin', - environment => [ - "OS_TENANT_NAME=${keystone_tenant}", - "OS_USERNAME=${keystone_user}", - "OS_PASSWORD=${nova_hash['user_password']}", - "OS_AUTH_URL=http://${service_endpoint}:5000/v2.0/", - 'OS_ENDPOINT_TYPE=internalURL', - "OS_REGION_NAME=${region}", - "NOVA_ENDPOINT_TYPE=internalURL", - ], - command => 'bash -c "nova flavor-create --is-public true m1.micro auto 64 0 1"', - #FIXME(mattymo): Upstream bug PUP-2299 for retries in unless/onlyif - # Retry nova-flavor list until it exits 0, then exit with grep status, - # finally exit 1 if tries exceeded - # lint:ignore:single_quote_string_with_variables - unless => 'bash -c \'for tries in {1..10}; do - nova flavor-list | grep m1.micro; - status=("${PIPESTATUS[@]}"); - (( ! status[0] )) && exit "${status[1]}"; - sleep 2; - done; exit 1\'', - # lint:endignore - tries => 10, - try_sleep => 2, - require => Class['nova'], - } - - Haproxy_backend_status <| |> -> Exec<| title == 'create-m1.micro-flavor' |> - - if ! $use_neutron { - nova_floating_range { $floating_ips_range: - ensure => 'present', - pool => 'nova', - username => $access_hash[user], - api_key => $access_hash[password], - auth_method => 'password', - auth_url => "http://${service_endpoint}:5000/v2.0/", - authtenant_name => $access_hash[tenant], - api_retries => 10, - } - Haproxy_backend_status['nova-api'] -> Nova_floating_range <| |> - } -} - -nova_config { - 'DEFAULT/teardown_unused_network_gateway': value => 'True' -} - -if $sahara_hash['enabled'] { - $nova_scheduler_default_filters = [ 'DifferentHostFilter' ] - if $storage_hash['volumes_lvm'] { - $cinder_scheduler_filters = [ 'InstanceLocalityFilter' ] - } else { - $cinder_scheduler_filters = [] - } -} else { - $nova_scheduler_default_filters = [] - $cinder_scheduler_filters = [] -} - -if $ironic_hash['enabled'] { - $scheduler_host_manager = 'nova.scheduler.ironic_host_manager.IronicHostManager' -} - -class { '::nova::scheduler::filter': - cpu_allocation_ratio => pick($nova_hash['cpu_allocation_ratio'], '8.0'), - disk_allocation_ratio => pick($nova_hash['disk_allocation_ratio'], '1.0'), - ram_allocation_ratio => pick($nova_hash['ram_allocation_ratio'], '1.0'), - scheduler_host_subset_size => pick($nova_hash['scheduler_host_subset_size'], '30'), - scheduler_default_filters => concat($nova_scheduler_default_filters, pick($nova_config_hash['default_filters'], [ 'RetryFilter', 'AvailabilityZoneFilter', 'RamFilter', 'CoreFilter', 'DiskFilter', 'ComputeFilter', 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter' ])), - scheduler_host_manager => $scheduler_host_manager, -} - -class { 'cinder::scheduler::filter': - scheduler_default_filters => concat($cinder_scheduler_filters, [ 'AvailabilityZoneFilter', 'CapacityFilter', 'CapabilitiesFilter' ]) -} - -# From logasy filter.pp -nova_config { - 'DEFAULT/ram_weight_multiplier': value => '1.0' -} - diff --git a/f2s/resources/openstack-controller/meta.yaml b/f2s/resources/openstack-controller/meta.yaml deleted file mode 100644 index 632aafbc..00000000 --- a/f2s/resources/openstack-controller/meta.yaml +++ /dev/null @@ -1,113 +0,0 @@ -id: openstack-controller -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - access: - value: null - amqp_hosts: - value: null - auto_assign_floating_ip: - value: null - ceilometer: - value: null - database_vip: - value: null - debug: - value: null - fqdn: - value: null - glance: - value: null - glance_api_servers: - value: null - ironic: - value: null - keystone: - value: null - libvirt_type: - value: null - management_vip: - value: null - memcache_roles: - value: null - mysql: - value: null - network_config: - value: null - network_manager: - value: null - network_metadata: - value: null - network_scheme: - value: null - network_size: - value: null - nodes: - value: null - nova: - value: null - nova_config: - value: null - nova_quota: - value: null - nova_rate_limits: - value: null - nova_report_interval: - value: null - nova_service_down_time: - value: null - num_networks: - value: null - openstack_controller: - value: null - openstack_version: - value: null - primary_controller: - value: null - public_int: - value: null - public_vip: - value: null - puppet_modules: - value: null - quantum_settings: - value: null - rabbit_hash: - value: null - region: - value: null - role: - value: null - sahara: - value: null - service_endpoint: - value: null - storage: - value: null - syslog_log_facility_ceph: - value: null - syslog_log_facility_glance: - value: null - syslog_log_facility_keystone: - value: null - syslog_log_facility_neutron: - value: null - syslog_log_facility_nova: - value: null - uid: - value: null - use_cow_images: - value: null - use_neutron: - value: null - use_stderr: - value: null - use_syslog: - value: null - use_vcenter: - value: null - workloads_collector: - value: null diff --git a/f2s/resources/openstack-haproxy-ceilometer/actions/run.pp b/f2s/resources/openstack-haproxy-ceilometer/actions/run.pp deleted file mode 100644 index 74edc62e..00000000 --- a/f2s/resources/openstack-haproxy-ceilometer/actions/run.pp +++ /dev/null @@ -1,23 +0,0 @@ -notice('MODULAR: openstack-haproxy-ceilometer.pp') - -$ceilometer_hash = hiera_hash('ceilometer',{}) -# NOT enabled by default -$use_ceilometer = pick($ceilometer_hash['enabled'], false) -$public_ssl_hash = hiera('public_ssl') -$ceilometer_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceilometer_nodes'), 'ceilometer/api') - -if ($use_ceilometer) { - $server_names = hiera_array('ceilometer_names', keys($ceilometer_address_map)) - $ipaddresses = hiera_array('ceilometer_ipaddresses', values($ceilometer_address_map)) - $public_virtual_ip = hiera('public_vip') - $internal_virtual_ip = hiera('management_vip') - - # configure ceilometer ha proxy - class { '::openstack::ha::ceilometer': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], - } -} diff --git a/f2s/resources/openstack-haproxy-ceilometer/meta.yaml b/f2s/resources/openstack-haproxy-ceilometer/meta.yaml deleted file mode 100644 index 82b06c8c..00000000 --- a/f2s/resources/openstack-haproxy-ceilometer/meta.yaml +++ /dev/null @@ -1,19 +0,0 @@ -id: openstack-haproxy-ceilometer -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - ceilometer: - value: null - ceilometer_nodes: - value: null - fqdn: - value: null - public_ssl: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/openstack-haproxy-cinder/actions/run.pp b/f2s/resources/openstack-haproxy-cinder/actions/run.pp deleted file mode 100644 index 238e0ecd..00000000 --- a/f2s/resources/openstack-haproxy-cinder/actions/run.pp +++ /dev/null @@ -1,24 +0,0 @@ -notice('MODULAR: openstack-haproxy-cinder.pp') - -$network_metadata = hiera_hash('network_metadata') -$cinder_hash = hiera_hash('cinder_hash', {}) -# enabled by default -$use_cinder = pick($cinder_hash['enabled'], true) -$public_ssl_hash = hiera('public_ssl') - -$cinder_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('cinder_nodes'), 'cinder/api') -if ($use_cinder) { - $server_names = hiera_array('cinder_names', keys($cinder_address_map)) - $ipaddresses = hiera_array('cinder_ipaddresses', values($cinder_address_map)) - $public_virtual_ip = hiera('public_vip') - $internal_virtual_ip = hiera('management_vip') - - # configure cinder ha proxy - class { '::openstack::ha::cinder': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], - } -} diff --git a/f2s/resources/openstack-haproxy-cinder/meta.yaml b/f2s/resources/openstack-haproxy-cinder/meta.yaml deleted file mode 100644 index c16a2b9c..00000000 --- a/f2s/resources/openstack-haproxy-cinder/meta.yaml +++ /dev/null @@ -1,29 +0,0 @@ -id: openstack-haproxy-cinder -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - cinder_hash: - value: null - cinder_ipaddresses: - value: null - cinder_names: - value: null - cinder_nodes: - value: null - fqdn: - value: null - management_vip: - value: null - network_metadata: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/openstack-haproxy-glance/actions/run.pp b/f2s/resources/openstack-haproxy-glance/actions/run.pp deleted file mode 100644 index 14dec201..00000000 --- a/f2s/resources/openstack-haproxy-glance/actions/run.pp +++ /dev/null @@ -1,26 +0,0 @@ -notice('MODULAR: openstack-haproxy-glance.pp') - -$network_metadata = hiera_hash('network_metadata') -$glance_hash = hiera_hash('glance', {}) -# enabled by default -$use_glance = pick($glance_hash['enabled'], true) -$public_ssl_hash = hiera('public_ssl') - - -#todo(sv): change to 'glance' as soon as glance as node-role was ready -$glances_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']), 'glance/api') - -if ($use_glance) { - $server_names = hiera_array('glance_names', keys($glances_address_map)) - $ipaddresses = hiera_array('glance_ipaddresses', values($glances_address_map)) - $public_virtual_ip = hiera('public_vip') - $internal_virtual_ip = hiera('management_vip') - - class { '::openstack::ha::glance': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], - } -} diff --git a/f2s/resources/openstack-haproxy-glance/meta.yaml b/f2s/resources/openstack-haproxy-glance/meta.yaml deleted file mode 100644 index 3726b277..00000000 --- a/f2s/resources/openstack-haproxy-glance/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -id: openstack-haproxy-glance -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - glance: - value: null - glance_ipaddresses: - value: null - glance_names: - value: null - management_vip: - value: null - network_metadata: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/openstack-haproxy-heat/actions/run.pp b/f2s/resources/openstack-haproxy-heat/actions/run.pp deleted file mode 100644 index 4bbf4336..00000000 --- a/f2s/resources/openstack-haproxy-heat/actions/run.pp +++ /dev/null @@ -1,24 +0,0 @@ -notice('MODULAR: openstack-haproxy-heat.pp') - -$heat_hash = hiera_hash('heat', {}) -# enabled by default -$use_heat = pick($heat_hash['enabled'], true) -$public_ssl_hash = hiera('public_ssl') -$network_metadata = hiera_hash('network_metadata') -$heat_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, hiera('heat_roles')), 'heat/api') - -if ($use_heat) { - $server_names = hiera_array('heat_names',keys($heat_address_map)) - $ipaddresses = hiera_array('heat_ipaddresses', values($heat_address_map)) - $public_virtual_ip = hiera('public_vip') - $internal_virtual_ip = hiera('management_vip') - -# configure heat ha proxy - class { '::openstack::ha::heat': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], - } -} diff --git a/f2s/resources/openstack-haproxy-heat/meta.yaml b/f2s/resources/openstack-haproxy-heat/meta.yaml deleted file mode 100644 index 3570bb5f..00000000 --- a/f2s/resources/openstack-haproxy-heat/meta.yaml +++ /dev/null @@ -1,29 +0,0 @@ -id: openstack-haproxy-heat -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - heat: - value: null - heat_ipaddresses: - value: null - heat_names: - value: null - heat_roles: - value: null - management_vip: - value: null - network_metadata: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/openstack-haproxy-horizon/actions/run.pp b/f2s/resources/openstack-haproxy-horizon/actions/run.pp deleted file mode 100644 index a491245a..00000000 --- a/f2s/resources/openstack-haproxy-horizon/actions/run.pp +++ /dev/null @@ -1,24 +0,0 @@ -notice('MODULAR: openstack-haproxy-horizon.pp') - -$network_metadata = hiera_hash('network_metadata') -$horizon_hash = hiera_hash('horizon', {}) -# enabled by default -$use_horizon = pick($horizon_hash['enabled'], true) -$public_ssl_hash = hiera('public_ssl') - -$horizon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('horizon_nodes'), 'horizon') -if ($use_horizon) { - $server_names = hiera_array('horizon_names', keys($horizon_address_map)) - $ipaddresses = hiera_array('horizon_ipaddresses', values($horizon_address_map)) - $public_virtual_ip = hiera('public_vip') - $internal_virtual_ip = hiera('management_vip') - - # configure horizon ha proxy - class { '::openstack::ha::horizon': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - use_ssl => $public_ssl_hash['horizon'], - } -} diff --git a/f2s/resources/openstack-haproxy-horizon/meta.yaml b/f2s/resources/openstack-haproxy-horizon/meta.yaml deleted file mode 100644 index eb66d189..00000000 --- a/f2s/resources/openstack-haproxy-horizon/meta.yaml +++ /dev/null @@ -1,29 +0,0 @@ -id: openstack-haproxy-horizon -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - horizon: - value: null - horizon_ipaddresses: - value: null - horizon_names: - value: null - horizon_nodes: - value: null - management_vip: - value: null - network_metadata: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/openstack-haproxy-ironic/actions/run.pp b/f2s/resources/openstack-haproxy-ironic/actions/run.pp deleted file mode 100644 index df256fc6..00000000 --- a/f2s/resources/openstack-haproxy-ironic/actions/run.pp +++ /dev/null @@ -1,22 +0,0 @@ -notice('MODULAR: openstack-haproxy-ironic.pp') - -$network_metadata = hiera_hash('network_metadata') -$public_ssl_hash = hiera('public_ssl') -$ironic_hash = hiera_hash('ironic', {}) - -$ironic_address_map = get_node_to_ipaddr_map_by_network_role(hiera('ironic_api_nodes'), 'ironic/api') - -$server_names = hiera_array('ironic_server_names', keys($ironic_address_map)) -$ipaddresses = hiera_array('ironic_ipaddresses', values($ironic_address_map)) -$public_virtual_ip = hiera('public_vip') -$internal_virtual_ip = hiera('management_vip') -$baremetal_virtual_ip = $network_metadata['vips']['baremetal']['ipaddr'] - -class { '::openstack::ha::ironic': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], - baremetal_virtual_ip => $baremetal_virtual_ip, -} diff --git a/f2s/resources/openstack-haproxy-ironic/meta.yaml b/f2s/resources/openstack-haproxy-ironic/meta.yaml deleted file mode 100644 index 0ff64180..00000000 --- a/f2s/resources/openstack-haproxy-ironic/meta.yaml +++ /dev/null @@ -1,11 +0,0 @@ -id: openstack-haproxy-ironic -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - ironic: - value: null - puppet_modules: - value: null diff --git a/f2s/resources/openstack-haproxy-keystone/actions/run.pp b/f2s/resources/openstack-haproxy-keystone/actions/run.pp deleted file mode 100644 index 8772ac4c..00000000 --- a/f2s/resources/openstack-haproxy-keystone/actions/run.pp +++ /dev/null @@ -1,29 +0,0 @@ -notice('MODULAR: openstack-haproxy-keystone.pp') - -$network_metadata = hiera_hash('network_metadata') -$keystone_hash = hiera_hash('keystone', {}) -# enabled by default -$use_keystone = pick($keystone_hash['enabled'], true) -$public_ssl_hash = hiera('public_ssl') - -#todo(sv): change to 'keystone' as soon as keystone as node-role was ready -$keystones_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']), 'keystone/api') - -if ($use_keystone) { - $server_names = pick(hiera_array('keystone_names', undef), - keys($keystones_address_map)) - $ipaddresses = pick(hiera_array('keystone_ipaddresses', undef), - values($keystones_address_map)) - $public_virtual_ip = pick(hiera('public_service_endpoint', undef), hiera('public_vip')) - $internal_virtual_ip = pick(hiera('service_endpoint', undef), hiera('management_vip')) - - - # configure keystone ha proxy - class { '::openstack::ha::keystone': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], - } -} diff --git a/f2s/resources/openstack-haproxy-keystone/meta.yaml b/f2s/resources/openstack-haproxy-keystone/meta.yaml deleted file mode 100644 index 63528a46..00000000 --- a/f2s/resources/openstack-haproxy-keystone/meta.yaml +++ /dev/null @@ -1,31 +0,0 @@ -id: openstack-haproxy-keystone -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - keystone: - value: null - keystone_ipaddresses: - value: null - keystone_names: - value: null - management_vip: - value: null - network_metadata: - value: null - public_service_endpoint: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null - service_endpoint: - value: null diff --git a/f2s/resources/openstack-haproxy-murano/actions/run.pp b/f2s/resources/openstack-haproxy-murano/actions/run.pp deleted file mode 100644 index 5224a2e0..00000000 --- a/f2s/resources/openstack-haproxy-murano/actions/run.pp +++ /dev/null @@ -1,24 +0,0 @@ -notice('MODULAR: openstack-haproxy-murano.pp') - -$murano_hash = hiera_hash('murano_hash',{}) -# NOT enabled by default -$use_murano = pick($murano_hash['enabled'], false) -$public_ssl_hash = hiera('public_ssl') -$network_metadata = hiera_hash('network_metadata') -$murano_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, hiera('murano_roles')), 'murano/api') - -if ($use_murano) { - $server_names = hiera_array('murano_names',keys($murano_address_map)) - $ipaddresses = hiera_array('murano_ipaddresses', values($murano_address_map)) - $public_virtual_ip = hiera('public_vip') - $internal_virtual_ip = hiera('management_vip') - - # configure murano ha proxy - class { '::openstack::ha::murano': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], - } -} diff --git a/f2s/resources/openstack-haproxy-murano/meta.yaml b/f2s/resources/openstack-haproxy-murano/meta.yaml deleted file mode 100644 index 9533f803..00000000 --- a/f2s/resources/openstack-haproxy-murano/meta.yaml +++ /dev/null @@ -1,21 +0,0 @@ -id: openstack-haproxy-murano -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - murano_hash: - value: null - murano_roles: - value: null - network_metadata: - value: null - public_ssl: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/openstack-haproxy-mysqld/actions/run.pp b/f2s/resources/openstack-haproxy-mysqld/actions/run.pp deleted file mode 100644 index d2ba97c7..00000000 --- a/f2s/resources/openstack-haproxy-mysqld/actions/run.pp +++ /dev/null @@ -1,31 +0,0 @@ -notice('MODULAR: openstack-haproxy-mysqld.pp') - -$network_metadata = hiera_hash('network_metadata') -$mysql_hash = hiera_hash('mysql', {}) -# enabled by default -$use_mysql = pick($mysql_hash['enabled'], true) - -$custom_mysql_setup_class = hiera('custom_mysql_setup_class', 'galera') -$public_ssl_hash = hiera('public_ssl') - -$database_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('database_nodes'), 'mgmt/database') - -# only do this if mysql is enabled and we are using one of the galera/percona classes -if $use_mysql and ($custom_mysql_setup_class in ['galera', 'percona', 'percona_packages']) { - $server_names = hiera_array('mysqld_names', keys($database_address_map)) - $ipaddresses = hiera_array('mysqld_ipaddresses', values($database_address_map)) - $public_virtual_ip = hiera('public_vip') - $internal_virtual_ip = pick(hiera('database_vip', undef), hiera('management_vip')) - - $primary_controller = hiera('primary_controller') - - - # configure mysql ha proxy - class { '::openstack::ha::mysqld': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - is_primary_controller => $primary_controller, - } -} diff --git a/f2s/resources/openstack-haproxy-mysqld/meta.yaml b/f2s/resources/openstack-haproxy-mysqld/meta.yaml deleted file mode 100644 index 584ece90..00000000 --- a/f2s/resources/openstack-haproxy-mysqld/meta.yaml +++ /dev/null @@ -1,35 +0,0 @@ -id: openstack-haproxy-mysqld -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - custom_mysql_setup_class: - value: null - database_nodes: - value: null - database_vip: - value: null - fqdn: - value: null - management_vip: - value: null - mysql: - value: null - mysqld_ipaddresses: - value: null - mysqld_names: - value: null - network_metadata: - value: null - primary_controller: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/openstack-haproxy-neutron/actions/run.pp b/f2s/resources/openstack-haproxy-neutron/actions/run.pp deleted file mode 100644 index 738ccfde..00000000 --- a/f2s/resources/openstack-haproxy-neutron/actions/run.pp +++ /dev/null @@ -1,22 +0,0 @@ -notice('MODULAR: openstack-haproxy-neutron.pp') - -# NOT enabled by default -$use_neutron = hiera('use_neutron', false) -$public_ssl_hash = hiera('public_ssl') - -$neutron_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('neutron_nodes'), 'neutron/api') -if ($use_neutron) { - $server_names = hiera_array('neutron_names', keys($neutron_address_map)) - $ipaddresses = hiera_array('neutron_ipaddresses', values($neutron_address_map)) - $public_virtual_ip = hiera('public_vip') - $internal_virtual_ip = hiera('management_vip') - - # configure neutron ha proxy - class { '::openstack::ha::neutron': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], - } -} diff --git a/f2s/resources/openstack-haproxy-neutron/meta.yaml b/f2s/resources/openstack-haproxy-neutron/meta.yaml deleted file mode 100644 index 17f37f85..00000000 --- a/f2s/resources/openstack-haproxy-neutron/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -id: openstack-haproxy-neutron -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vip: - value: null - neutron_ipaddresses: - value: null - neutron_names: - value: null - neutron_nodes: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null - use_neutron: - value: null diff --git a/f2s/resources/openstack-haproxy-nova/actions/run.pp b/f2s/resources/openstack-haproxy-nova/actions/run.pp deleted file mode 100644 index 060d9db7..00000000 --- a/f2s/resources/openstack-haproxy-nova/actions/run.pp +++ /dev/null @@ -1,25 +0,0 @@ -notice('MODULAR: openstack-haproxy-nova.pp') - -$nova_hash = hiera_hash('nova', {}) -# enabled by default -$use_nova = pick($nova_hash['enabled'], true) -$public_ssl_hash = hiera('public_ssl') - -$nova_api_address_map = get_node_to_ipaddr_map_by_network_role(hiera('nova_api_nodes'), 'nova/api') - -if ($use_nova) { - $server_names = hiera_array('nova_names', keys($nova_api_address_map)) - $ipaddresses = hiera_array('nova_ipaddresses', values($nova_api_address_map)) - $public_virtual_ip = hiera('public_vip') - $internal_virtual_ip = hiera('management_vip') - - - # configure nova ha proxy - class { '::openstack::ha::nova': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], - } -} diff --git a/f2s/resources/openstack-haproxy-nova/meta.yaml b/f2s/resources/openstack-haproxy-nova/meta.yaml deleted file mode 100644 index 031ce49b..00000000 --- a/f2s/resources/openstack-haproxy-nova/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -id: openstack-haproxy-nova -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vip: - value: null - nova: - value: null - nova_api_nodes: - value: null - nova_ipaddresses: - value: null - nova_names: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/openstack-haproxy-radosgw/actions/run.pp b/f2s/resources/openstack-haproxy-radosgw/actions/run.pp deleted file mode 100644 index 81473365..00000000 --- a/f2s/resources/openstack-haproxy-radosgw/actions/run.pp +++ /dev/null @@ -1,39 +0,0 @@ -notice('MODULAR: openstack-haproxy-radosgw.pp') - -$network_metadata = hiera_hash('network_metadata') -$storage_hash = hiera_hash('storage', {}) -$public_ssl_hash = hiera('public_ssl') -$ironic_hash = hiera_hash('ironic', {}) - -if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { - $use_swift = true -} else { - $use_swift = false -} -if !($use_swift) and ($storage_hash['objects_ceph']) { - $use_radosgw = true -} else { - $use_radosgw = false -} - -if $use_radosgw { - $rgw_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_rgw_nodes'), 'ceph/radosgw') - $server_names = hiera_array('radosgw_server_names', keys($rgw_address_map)) - $ipaddresses = hiera_array('radosgw_ipaddresses', values($rgw_address_map)) - $public_virtual_ip = hiera('public_vip') - $internal_virtual_ip = hiera('management_vip') - - if $ironic_hash['enabled'] { - $baremetal_virtual_ip = $network_metadata['vips']['baremetal']['ipaddr'] - } - - # configure radosgw ha proxy - class { '::openstack::ha::radosgw': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], - baremetal_virtual_ip => $baremetal_virtual_ip, - } -} diff --git a/f2s/resources/openstack-haproxy-radosgw/meta.yaml b/f2s/resources/openstack-haproxy-radosgw/meta.yaml deleted file mode 100644 index a0baa0b2..00000000 --- a/f2s/resources/openstack-haproxy-radosgw/meta.yaml +++ /dev/null @@ -1,19 +0,0 @@ -id: openstack-haproxy-radosgw -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - network_metadata: - value: null - public_ssl: - value: null - puppet_modules: - value: null - role: - value: null - storage: - value: null diff --git a/f2s/resources/openstack-haproxy-sahara/actions/run.pp b/f2s/resources/openstack-haproxy-sahara/actions/run.pp deleted file mode 100644 index 4f76a2f2..00000000 --- a/f2s/resources/openstack-haproxy-sahara/actions/run.pp +++ /dev/null @@ -1,24 +0,0 @@ -notice('MODULAR: openstack-haproxy-sahara.pp') - -$sahara_hash = hiera_hash('sahara_hash',{}) -# NOT enabled by default -$use_sahara = pick($sahara_hash['enabled'], false) -$public_ssl_hash = hiera('public_ssl') -$network_metadata = hiera_hash('network_metadata') -$sahara_address_map = get_node_to_ipaddr_map_by_network_role(get_nodes_hash_by_roles($network_metadata, hiera('sahara_roles')), 'sahara/api') - -if ($use_sahara) { - $server_names = hiera_array('sahara_names',keys($sahara_address_map)) - $ipaddresses = hiera_array('sahara_ipaddresses', values($sahara_address_map)) - $public_virtual_ip = hiera('public_vip') - $internal_virtual_ip = hiera('management_vip') - - # configure sahara ha proxy - class { '::openstack::ha::sahara': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], - } -} diff --git a/f2s/resources/openstack-haproxy-sahara/meta.yaml b/f2s/resources/openstack-haproxy-sahara/meta.yaml deleted file mode 100644 index 86fcd072..00000000 --- a/f2s/resources/openstack-haproxy-sahara/meta.yaml +++ /dev/null @@ -1,21 +0,0 @@ -id: openstack-haproxy-sahara -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - network_metadata: - value: null - public_ssl: - value: null - puppet_modules: - value: null - role: - value: null - sahara_hash: - value: null - sahara_roles: - value: null diff --git a/f2s/resources/openstack-haproxy-stats/actions/run.pp b/f2s/resources/openstack-haproxy-stats/actions/run.pp deleted file mode 100644 index cfcf71d6..00000000 --- a/f2s/resources/openstack-haproxy-stats/actions/run.pp +++ /dev/null @@ -1,7 +0,0 @@ -notice('MODULAR: openstack-haproxy-stats.pp') - -$internal_virtual_ip = unique([hiera('management_vip'), hiera('database_vip'), hiera('service_endpoint')]) - -class { '::openstack::ha::stats': - internal_virtual_ip => $internal_virtual_ip, -} diff --git a/f2s/resources/openstack-haproxy-stats/meta.yaml b/f2s/resources/openstack-haproxy-stats/meta.yaml deleted file mode 100644 index 0001493b..00000000 --- a/f2s/resources/openstack-haproxy-stats/meta.yaml +++ /dev/null @@ -1,19 +0,0 @@ -id: openstack-haproxy-stats -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - database_vip: - value: null - fqdn: - value: null - management_vip: - value: null - puppet_modules: - value: null - role: - value: null - service_endpoint: - value: null diff --git a/f2s/resources/openstack-haproxy-swift/actions/run.pp b/f2s/resources/openstack-haproxy-swift/actions/run.pp deleted file mode 100644 index 01819d46..00000000 --- a/f2s/resources/openstack-haproxy-swift/actions/run.pp +++ /dev/null @@ -1,37 +0,0 @@ -notice('MODULAR: openstack-haproxy-swift.pp') - -$network_metadata = hiera_hash('network_metadata') -$storage_hash = hiera_hash('storage', {}) -$swift_proxies = hiera_hash('swift_proxies', undef) -$public_ssl_hash = hiera('public_ssl') -$ironic_hash = hiera_hash('ironic', {}) - -if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { - $use_swift = true -} else { - $use_swift = false -} - -$swift_proxies_address_map = get_node_to_ipaddr_map_by_network_role($swift_proxies, 'swift/api') - -if ($use_swift) { - - $server_names = hiera_array('swift_server_names', keys($swift_proxies_address_map)) - $ipaddresses = hiera_array('swift_ipaddresses', values($swift_proxies_address_map)) - $public_virtual_ip = hiera('public_vip') - $internal_virtual_ip = hiera('management_vip') - - if $ironic_hash['enabled'] { - $baremetal_virtual_ip = $network_metadata['vips']['baremetal']['ipaddr'] - } - - # configure swift ha proxy - class { '::openstack::ha::swift': - internal_virtual_ip => $internal_virtual_ip, - ipaddresses => $ipaddresses, - public_virtual_ip => $public_virtual_ip, - server_names => $server_names, - public_ssl => $public_ssl_hash['services'], - baremetal_virtual_ip => $baremetal_virtual_ip, - } -} diff --git a/f2s/resources/openstack-haproxy-swift/meta.yaml b/f2s/resources/openstack-haproxy-swift/meta.yaml deleted file mode 100644 index 627fbce4..00000000 --- a/f2s/resources/openstack-haproxy-swift/meta.yaml +++ /dev/null @@ -1,31 +0,0 @@ -id: openstack-haproxy-swift -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - ironic: - value: null - management_vip: - value: null - network_metadata: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null - storage: - value: null - swift_ipaddresses: - value: null - swift_proxies: - value: null - swift_server_names: - value: null diff --git a/f2s/resources/openstack-haproxy/actions/run.pp b/f2s/resources/openstack-haproxy/actions/run.pp deleted file mode 100644 index c38ce7dc..00000000 --- a/f2s/resources/openstack-haproxy/actions/run.pp +++ /dev/null @@ -1,3 +0,0 @@ -notice('MODULAR: openstack-haproxy.pp') -# This is a placeholder task that is used to tie all the haproxy tasks together. -# Any haproxy configurations should go in a openstack-haproxy- task diff --git a/f2s/resources/openstack-haproxy/meta.yaml b/f2s/resources/openstack-haproxy/meta.yaml deleted file mode 100644 index 4a049542..00000000 --- a/f2s/resources/openstack-haproxy/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: openstack-haproxy -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/openstack-network-agents-dhcp/actions/run.pp b/f2s/resources/openstack-network-agents-dhcp/actions/run.pp deleted file mode 100644 index 21968843..00000000 --- a/f2s/resources/openstack-network-agents-dhcp/actions/run.pp +++ /dev/null @@ -1,39 +0,0 @@ -notice('MODULAR: openstack-network/agents/dhcp.pp') - -$use_neutron = hiera('use_neutron', false) - -class neutron {} -class { 'neutron' :} - -if $use_neutron { - - $debug = hiera('debug', true) - $resync_interval = '30' - $isolated_metadata = try_get_value($neutron_config, 'metadata/isolated_metadata', true) - - $neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) - $ha_agent = try_get_value($neutron_advanced_config, 'dhcp_agent_ha', true) - - class { 'neutron::agents::dhcp': - debug => $debug, - resync_interval => $resync_interval, - manage_service => true, - enable_isolated_metadata => $isolated_metadata, - dhcp_delete_namespaces => true, - enabled => true, - } - - if $ha_agent { - $primary_controller = hiera('primary_controller') - class { 'cluster::neutron::dhcp' : - primary => $primary_controller, - } - } - - # stub package for 'neutron::agents::dhcp' class - package { 'neutron': - name => 'binutils', - ensure => 'installed', - } - -} diff --git a/f2s/resources/openstack-network-agents-dhcp/meta.yaml b/f2s/resources/openstack-network-agents-dhcp/meta.yaml deleted file mode 100644 index 3745593b..00000000 --- a/f2s/resources/openstack-network-agents-dhcp/meta.yaml +++ /dev/null @@ -1,21 +0,0 @@ -id: openstack-network-agents-dhcp -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - debug: - value: null - fqdn: - value: null - neutron_advanced_configuration: - value: null - primary_controller: - value: null - puppet_modules: - value: null - role: - value: null - use_neutron: - value: null diff --git a/f2s/resources/openstack-network-agents-l3/actions/run.pp b/f2s/resources/openstack-network-agents-l3/actions/run.pp deleted file mode 100644 index 889c1868..00000000 --- a/f2s/resources/openstack-network-agents-l3/actions/run.pp +++ /dev/null @@ -1,59 +0,0 @@ -notice('MODULAR: openstack-network/agents/l3.pp') - -$use_neutron = hiera('use_neutron', false) - -class neutron {} -class { 'neutron' :} - -$neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) -$dvr = pick($neutron_advanced_config['neutron_dvr'], false) - -$role = hiera('role') -$controller = $role in ['controller', 'primary-controller'] -$compute = $role in ['compute'] - -if $use_neutron and ($controller or ($dvr and $compute)) { - $debug = hiera('debug', true) - $metadata_port = '8775' - $network_scheme = hiera('network_scheme', {}) - - if $controller { - if $dvr { - $agent_mode = 'dvr-snat' - } else { - $agent_mode = 'legacy' - } - } else { - # works on copute nodes only if dvr is enabled - $agent_mode = 'dvr' - } - - prepare_network_config($network_scheme) - - $ha_agent = try_get_value($neutron_advanced_config, 'l3_agent_ha', true) - $external_network_bridge = get_network_role_property('neutron/floating', 'interface') - - class { 'neutron::agents::l3': - debug => $debug, - metadata_port => $metadata_port, - external_network_bridge => $external_network_bridge, - manage_service => true, - enabled => true, - router_delete_namespaces => true, - agent_mode => $agent_mode, - } - - if $ha_agent { - $primary_controller = hiera('primary_controller') - cluster::neutron::l3 { 'default-l3' : - primary => $primary_controller, - } - } - - # stub package for 'neutron::agents::l3' class - package { 'neutron': - name => 'binutils', - ensure => 'installed', - } - -} diff --git a/f2s/resources/openstack-network-agents-l3/meta.yaml b/f2s/resources/openstack-network-agents-l3/meta.yaml deleted file mode 100644 index 523cf75d..00000000 --- a/f2s/resources/openstack-network-agents-l3/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: openstack-network-agents-l3 -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - debug: - value: null - fqdn: - value: null - network_scheme: - value: null - neutron_advanced_configuration: - value: null - primary_controller: - value: null - puppet_modules: - value: null - role: - value: null - use_neutron: - value: null diff --git a/f2s/resources/openstack-network-agents-metadata/actions/run.pp b/f2s/resources/openstack-network-agents-metadata/actions/run.pp deleted file mode 100644 index b4b2f9b7..00000000 --- a/f2s/resources/openstack-network-agents-metadata/actions/run.pp +++ /dev/null @@ -1,56 +0,0 @@ -notice('MODULAR: openstack-network/agents/metadata.pp') - -$use_neutron = hiera('use_neutron', false) - -class neutron {} -class { 'neutron' :} - -if $use_neutron { - $debug = hiera('debug', true) - $neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) - $ha_agent = try_get_value($neutron_advanced_config, 'metadata_agent_ha', true) - - $auth_region = hiera('region', 'RegionOne') - $service_endpoint = hiera('service_endpoint') - $auth_api_version = 'v2.0' - $admin_identity_uri = "http://${service_endpoint}:35357" - $admin_auth_url = "${admin_identity_uri}/${auth_api_version}" - - $neutron_config = hiera_hash('neutron_config') - - $keystone_user = try_get_value($neutron_config, 'keystone/admin_user', 'neutron') - $keystone_tenant = try_get_value($neutron_config, 'keystone/admin_tenant', 'services') - $neutron_user_password = try_get_value($neutron_config, 'keystone/admin_password') - - $shared_secret = try_get_value($neutron_config, 'metadata/metadata_proxy_shared_secret') - - $management_vip = hiera('management_vip') - $nova_endpoint = hiera('nova_endpoint', $management_vip) - - class { 'neutron::agents::metadata': - debug => $debug, - auth_region => $auth_region, - auth_url => $admin_auth_url, - auth_user => $keystone_user, - auth_tenant => $keystone_tenant, - auth_password => $neutron_user_password, - shared_secret => $shared_secret, - metadata_ip => $nova_endpoint, - manage_service => true, - enabled => true, - } - - if $ha_agent { - $primary_controller = hiera('primary_controller') - class { 'cluster::neutron::metadata' : - primary => $primary_controller, - } - } - - # stub package for 'neutron::agents::metadata' class - package { 'neutron': - name => 'binutils', - ensure => 'installed', - } - -} diff --git a/f2s/resources/openstack-network-agents-metadata/meta.yaml b/f2s/resources/openstack-network-agents-metadata/meta.yaml deleted file mode 100644 index 89d18b40..00000000 --- a/f2s/resources/openstack-network-agents-metadata/meta.yaml +++ /dev/null @@ -1,31 +0,0 @@ -id: openstack-network-agents-metadata -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - debug: - value: null - fqdn: - value: null - management_vip: - value: null - neutron_advanced_configuration: - value: null - neutron_config: - value: null - nova_endpoint: - value: null - primary_controller: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - use_neutron: - value: null diff --git a/f2s/resources/openstack-network-common-config/actions/run.pp b/f2s/resources/openstack-network-common-config/actions/run.pp deleted file mode 100644 index 66b49530..00000000 --- a/f2s/resources/openstack-network-common-config/actions/run.pp +++ /dev/null @@ -1,110 +0,0 @@ -notice('MODULAR: openstack-network/common-config.pp') - -$use_neutron = hiera('use_neutron', false) - -if $use_neutron { - - $openstack_network_hash = hiera_hash('openstack_network', { }) - $neutron_config = hiera_hash('neutron_config') - - $core_plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' - $service_plugins = [ - 'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin', - 'neutron.services.metering.metering_plugin.MeteringPlugin', - ] - - $rabbit_hash = hiera_hash('rabbit_hash', { }) - $ceilometer_hash = hiera_hash('ceilometer', { }) - $network_scheme = hiera_hash('network_scheme') - - $verbose = pick($openstack_network_hash['verbose'], hiera('verbose', true)) - $debug = pick($openstack_network_hash['debug'], hiera('debug', true)) - $use_syslog = hiera('use_syslog', true) - $use_stderr = hiera('use_stderr', false) - $log_facility = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4') - - prepare_network_config($network_scheme) - $bind_host = get_network_role_property('neutron/api', 'ipaddr') - - $base_mac = $neutron_config['L2']['base_mac'] - $use_ceilometer = $ceilometer_hash['enabled'] - $amqp_hosts = split(hiera('amqp_hosts', ''), ',') - $amqp_user = $rabbit_hash['user'] - $amqp_password = $rabbit_hash['password'] - - $segmentation_type = try_get_value($neutron_config, 'L2/segmentation_type') - - $nets = $neutron_config['predefined_networks'] - - if $segmentation_type == 'vlan' { - $net_role_property = 'neutron/private' - $iface = get_network_role_property($net_role_property, 'phys_dev') - $mtu_for_virt_network = pick(get_transformation_property('mtu', $iface[0]), '1500') - $overlay_net_mtu = $mtu_for_virt_network - } else { - $net_role_property = 'neutron/mesh' - $iface = get_network_role_property($net_role_property, 'phys_dev') - $physical_net_mtu = pick(get_transformation_property('mtu', $iface[0]), '1500') - - if $segmentation_type == 'gre' { - $mtu_offset = '42' - } else { - # vxlan is the default segmentation type for non-vlan cases - $mtu_offset = '50' - } - - if $physical_net_mtu { - $overlay_net_mtu = $physical_net_mtu - $mtu_offset - } else { - $overlay_net_mtu = '1500' - $mtu_offset - } - - } - - class { 'neutron' : - verbose => $verbose, - debug => $debug, - use_syslog => $use_syslog, - use_stderr => $use_stderr, - log_facility => $log_facility, - bind_host => $bind_host, - base_mac => $base_mac, - core_plugin => $core_plugin, - service_plugins => $service_plugins, - allow_overlapping_ips => true, - mac_generation_retries => '32', - dhcp_lease_duration => '600', - dhcp_agents_per_network => '2', - report_interval => '10', - rabbit_user => $amqp_user, - rabbit_hosts => $amqp_hosts, - rabbit_password => $amqp_password, - kombu_reconnect_delay => '5.0', - network_device_mtu => $overlay_net_mtu, - advertise_mtu => true, - } - - if $use_syslog { - neutron_config { 'DEFAULT/use_syslog_rfc_format': value => true; } - } - - if $use_ceilometer { - neutron_config { 'DEFAULT/notification_driver': value => 'messaging' } - } - -} - -### SYSCTL ### - -# All nodes with network functions should have net forwarding. -# Its a requirement for network namespaces to function. -sysctl::value { 'net.ipv4.ip_forward': value => '1' } - -# All nodes with network functions should have these thresholds -# to avoid "Neighbour table overflow" problem -sysctl::value { 'net.ipv4.neigh.default.gc_thresh1': value => '4096' } -sysctl::value { 'net.ipv4.neigh.default.gc_thresh2': value => '8192' } -sysctl::value { 'net.ipv4.neigh.default.gc_thresh3': value => '16384' } - -Sysctl::Value <| |> -> Nova_config <||> -Sysctl::Value <| |> -> Neutron_config <||> diff --git a/f2s/resources/openstack-network-common-config/meta.yaml b/f2s/resources/openstack-network-common-config/meta.yaml deleted file mode 100644 index 8d649584..00000000 --- a/f2s/resources/openstack-network-common-config/meta.yaml +++ /dev/null @@ -1,37 +0,0 @@ -id: openstack-network-common-config -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - amqp_hosts: - value: null - ceilometer: - value: null - debug: - value: null - fqdn: - value: null - network_scheme: - value: null - neutron_config: - value: null - openstack_network: - value: null - puppet_modules: - value: null - rabbit_hash: - value: null - role: - value: null - syslog_log_facility_neutron: - value: null - use_neutron: - value: null - use_stderr: - value: null - use_syslog: - value: null - verbose: - value: null diff --git a/f2s/resources/openstack-network-compute-nova/actions/run.pp b/f2s/resources/openstack-network-compute-nova/actions/run.pp deleted file mode 100644 index 69c2cfd0..00000000 --- a/f2s/resources/openstack-network-compute-nova/actions/run.pp +++ /dev/null @@ -1,267 +0,0 @@ -notice('MODULAR: openstack-network/compute-nova.pp') - -$use_neutron = hiera('use_neutron', false) - -if $use_neutron { - include nova::params - $neutron_config = hiera_hash('neutron_config') - $neutron_integration_bridge = 'br-int' - $nova_hash = hiera_hash('nova') - $libvirt_vif_driver = pick($nova_hash['libvirt_vif_driver'], 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver') - - $management_vip = hiera('management_vip') - $service_endpoint = hiera('service_endpoint', $management_vip) - $neutron_endpoint = hiera('neutron_endpoint', $management_vip) - $admin_password = try_get_value($neutron_config, 'keystone/admin_password') - $admin_tenant_name = try_get_value($neutron_config, 'keystone/admin_tenant', 'services') - $admin_username = try_get_value($neutron_config, 'keystone/admin_user', 'neutron') - $region_name = hiera('region', 'RegionOne') - $auth_api_version = 'v2.0' - $admin_identity_uri = "http://${service_endpoint}:35357" - $admin_auth_url = "${admin_identity_uri}/${auth_api_version}" - $neutron_url = "http://${neutron_endpoint}:9696" - - service { 'libvirt' : - ensure => 'running', - enable => true, - # Workaround for bug LP #1469308 - # also service name for Ubuntu and Centos is the same. - name => 'libvirtd', - provider => $nova::params::special_service_provider, - } - - exec { 'destroy_libvirt_default_network': - command => 'virsh net-destroy default', - onlyif => 'virsh net-info default | grep -qE "Active:.* yes"', - path => [ '/bin', '/sbin', '/usr/bin', '/usr/sbin' ], - tries => 3, - require => Service['libvirt'], - } - - exec { 'undefine_libvirt_default_network': - command => 'virsh net-undefine default', - onlyif => 'virsh net-info default 2>&1 > /dev/null', - path => [ '/bin', '/sbin', '/usr/bin', '/usr/sbin' ], - tries => 3, - require => Exec['destroy_libvirt_default_network'], - } - - Service['libvirt'] ~> Exec['destroy_libvirt_default_network'] - - # script called by qemu needs to manipulate the tap device - file_line { 'clear_emulator_capabilities': - path => '/etc/libvirt/qemu.conf', - line => 'clear_emulator_capabilities = 0', - notify => Service['libvirt'] - } - - file_line { 'no_qemu_selinux': - path => '/etc/libvirt/qemu.conf', - line => 'security_driver = "none"', - notify => Service['libvirt'] - } - - class { 'nova::compute::neutron': - libvirt_vif_driver => $libvirt_vif_driver, - } - - nova_config { - 'DEFAULT/linuxnet_interface_driver': value => 'nova.network.linux_net.LinuxOVSInterfaceDriver'; - 'DEFAULT/linuxnet_ovs_integration_bridge': value => $neutron_integration_bridge; - 'DEFAULT/network_device_mtu': value => '65000'; - } - - class { 'nova::network::neutron' : - neutron_admin_password => $admin_password, - neutron_admin_tenant_name => $admin_tenant_name, - neutron_region_name => $region_name, - neutron_admin_username => $admin_username, - neutron_admin_auth_url => $admin_auth_url, - neutron_url => $neutron_url, - neutron_ovs_bridge => $neutron_integration_bridge, - } - - augeas { 'sysctl-net.bridge.bridge-nf-call-arptables': - context => '/files/etc/sysctl.conf', - changes => "set net.bridge.bridge-nf-call-arptables '1'", - before => Service['libvirt'], - } - augeas { 'sysctl-net.bridge.bridge-nf-call-iptables': - context => '/files/etc/sysctl.conf', - changes => "set net.bridge.bridge-nf-call-iptables '1'", - before => Service['libvirt'], - } - augeas { 'sysctl-net.bridge.bridge-nf-call-ip6tables': - context => '/files/etc/sysctl.conf', - changes => "set net.bridge.bridge-nf-call-ip6tables '1'", - before => Service['libvirt'], - } - - # We need to restart nova-compute service in orderto apply new settings - # nova-compute must not be restarted until integration bridge is created by - # Neutron L2 agent. - # The reason is described here https://bugs.launchpad.net/fuel/+bug/1477475 - exec { 'wait-for-int-br': - command => "ovs-vsctl br-exists ${neutron_integration_bridge}", - path => [ '/sbin', '/bin', '/usr/bin', '/usr/sbin' ], - try_sleep => 6, - tries => 10, - } - Exec['wait-for-int-br'] -> Service['nova-compute'] - service { 'nova-compute': - ensure => 'running', - name => $::nova::params::compute_service_name, - } - Nova_config<| |> ~> Service['nova-compute'] - - if($::operatingsystem == 'Ubuntu') { - tweaks::ubuntu_service_override { 'nova-network': - package_name => 'nova-network', - } - } - -} else { - - $network_scheme = hiera('network_scheme', { }) - prepare_network_config($network_scheme) - - $nova_hash = hiera_hash('nova_hash', { }) - $bind_address = get_network_role_property('nova/api', 'ipaddr') - $public_int = get_network_role_property('public/vip', 'interface') - $private_interface = get_network_role_property('nova/private', 'interface') - $public_interface = $public_int ? { undef=>'', default => $public_int } - $auto_assign_floating_ip = hiera('auto_assign_floating_ip', false) - $nova_rate_limits = hiera('nova_rate_limits') - $network_size = hiera('network_size', undef) - $network_manager = hiera('network_manager', undef) - $network_config = hiera('network_config', { }) - $create_networks = true - $num_networks = hiera('num_networks', '1') - $novanetwork_params = hiera('novanetwork_parameters') - $fixed_range = hiera('fixed_network_range') - $use_vcenter = hiera('use_vcenter', false) - $enabled_apis = 'metadata' - $dns_nameservers = hiera_array('dns_nameservers', []) - - if ! $fixed_range { - fail('Must specify the fixed range when using nova-networks') - } - - if $use_vcenter { - $enable_nova_net = false - nova_config { - 'DEFAULT/multi_host': value => 'False'; - 'DEFAULT/send_arp_for_ha': value => 'False'; - } - } else { - include keystone::python - - Nova_config<| |> -> Service['nova-network'] - - case $::osfamily { - 'RedHat': { - $pymemcache_package_name = 'python-memcached' - } - 'Debian': { - $pymemcache_package_name = 'python-memcache' - } - default: { - fail("Unsupported osfamily: ${::osfamily} operatingsystem: ${::operatingsystem},\ - module ${module_name} only support osfamily RedHat and Debian") - } - } - - if !defined(Package[$pymemcache_package_name]) { - package { $pymemcache_package_name: - ensure => 'present', - } -> - Nova::Generic_service <| title == 'api' |> - } - - class { 'nova::api': - ensure_package => 'installed', - enabled => true, - admin_tenant_name => $admin_tenant_name, - admin_user => 'nova', - admin_password => $nova_hash['user_password'], - enabled_apis => $enabled_apis, - api_bind_address => $bind_address, - ratelimits => $nova_rate_limits, - # NOTE(bogdando) 1 api worker for compute node is enough - osapi_compute_workers => '1', - } - - if $::operatingsystem == 'Ubuntu' { - tweaks::ubuntu_service_override { 'nova-api': - package_name => 'nova-api', - } - } - - nova_config { - 'DEFAULT/multi_host' : value => 'True'; - 'DEFAULT/send_arp_for_ha' : value => 'True'; - 'DEFAULT/metadata_host' : value => $bind_address; - } - - if ! $public_interface { - fail('public_interface must be defined for multi host compute nodes') - } - - $enable_nova_net = true - - if $auto_assign_floating_ip { - nova_config { 'DEFAULT/auto_assign_floating_ip': value => 'True' } - } - } - -# Stub for networking-refresh that is needed by Nova::Network/Nova::Generic_service[network] -# We do not need it due to l23network is doing all stuff -# BTW '/sbin/ifdown -a ; /sbin/ifup -a' does not work on CentOS - exec { 'networking-refresh': - command => '/bin/echo "networking-refresh has been refreshed"', - refreshonly => true, - } - -# Stubs for nova_paste_api_ini - exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', - refreshonly => true, - } - -# Stubs for nova_network - file { '/etc/nova/nova.conf': - ensure => 'present', - } - -# Stubs for nova-api - package { 'nova-common': - name => 'binutils', - ensure => 'installed', - } - - if $::operatingsystem == 'Ubuntu' { - tweaks::ubuntu_service_override { 'nova-network': - package_name => 'nova-network', - } - } - - class { 'nova::network': - ensure_package => 'installed', - private_interface => $private_interface, - public_interface => $public_interface, - fixed_range => $fixed_range, - floating_range => false, - network_manager => $network_manager, - config_overrides => $network_config, - create_networks => $create_networks, - num_networks => $num_networks, - network_size => $network_size, - dns1 => $dns_nameservers[0], - dns2 => $dns_nameservers[1], - enabled => $enable_nova_net, - install_service => $enable_nova_net, - } -#NOTE(aglarendil): lp/1381164 - nova_config { 'DEFAULT/force_snat_range': value => '0.0.0.0/0' } - -} diff --git a/f2s/resources/openstack-network-compute-nova/meta.yaml b/f2s/resources/openstack-network-compute-nova/meta.yaml deleted file mode 100644 index a1e8fb20..00000000 --- a/f2s/resources/openstack-network-compute-nova/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -id: openstack-network-compute-nova -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vip: - value: null - neutron_config: - value: null - neutron_endpoint: - value: null - nova: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - use_neutron: - value: null diff --git a/f2s/resources/openstack-network-end/meta.yaml b/f2s/resources/openstack-network-end/meta.yaml deleted file mode 100644 index 3fd30ae7..00000000 --- a/f2s/resources/openstack-network-end/meta.yaml +++ /dev/null @@ -1,4 +0,0 @@ -id: openstack-network-end -handler: none -version: '8.0' -inputs: {} diff --git a/f2s/resources/openstack-network-networks/actions/run.pp b/f2s/resources/openstack-network-networks/actions/run.pp deleted file mode 100644 index a61d775b..00000000 --- a/f2s/resources/openstack-network-networks/actions/run.pp +++ /dev/null @@ -1,106 +0,0 @@ -notice('MODULAR: openstack-network/networks.pp') - -if hiera('use_neutron', false) { - $access_hash = hiera('access', { }) - $keystone_admin_tenant = $access_hash['tenant'] - $neutron_config = hiera_hash('neutron_config') - $floating_net = try_get_value($neutron_config, 'default_floating_net', 'net04_ext') - $private_net = try_get_value($neutron_config, 'default_private_net', 'net04') - $default_router = try_get_value($neutron_config, 'default_router', 'router04') - $segmentation_type = try_get_value($neutron_config, 'L2/segmentation_type') - $nets = $neutron_config['predefined_networks'] - - if $segmentation_type == 'vlan' { - $network_type = 'vlan' - $segmentation_id_range = split(try_get_value($neutron_config, 'L2/phys_nets/physnet2/vlan_range', ''), ':') - } elsif $segmentation_type == 'gre' { - $network_type = 'gre' - $segmentation_id_range = split(try_get_value($neutron_config, 'L2/tunnel_id_ranges', ''), ':') - } else { - $network_type = 'vxlan' - $segmentation_id_range = split(try_get_value($neutron_config, 'L2/tunnel_id_ranges', ''), ':') - } - - $fallback_segment_id = $segmentation_id_range[0] - $private_net_segment_id = try_get_value($nets, "${private_net}/L2/segment_id", $fallback_segment_id) - $private_net_physnet = try_get_value($nets, "${private_net}/L2/physnet", false) - $private_net_shared = try_get_value($nets, "${private_net}/shared", false) - $private_net_router_external = false - $floating_net_physnet = try_get_value($nets, "${floating_net}/L2/physnet", false) - $floating_net_router_external = try_get_value($nets, "${floating_net}/L2/router_ext") - $floating_net_floating_range = try_get_value($nets, "${floating_net}/L3/floating", '') - $floating_net_shared = try_get_value($nets, "${floating_net}/shared", false) - - if !empty($floating_net_floating_range) { - $floating_net_allocation_pool = format_allocation_pools($floating_net_floating_range) - } - - $tenant_name = try_get_value($access_hash, 'tenant', 'admin') - - neutron_network { $floating_net : - ensure => 'present', - provider_physical_network => $floating_net_physnet, - provider_network_type => 'local', - router_external => $floating_net_router_external, - tenant_name => $tenant_name, - shared => $floating_net_shared - } - - neutron_subnet { "${floating_net}__subnet" : - ensure => 'present', - cidr => try_get_value($nets, "${floating_net}/L3/subnet"), - network_name => $floating_net, - tenant_name => $tenant_name, - gateway_ip => try_get_value($nets, "${floating_net}/L3/gateway"), - enable_dhcp => false, - allocation_pools => $floating_net_allocation_pool, - } - - neutron_network { $private_net : - ensure => 'present', - provider_physical_network => $private_net_physnet, - provider_network_type => $network_type, - provider_segmentation_id => $private_net_segment_id, - router_external => $private_net_router_external, - tenant_name => $tenant_name, - shared => $private_net_shared - } - - neutron_subnet { "${private_net}__subnet" : - ensure => 'present', - cidr => try_get_value($nets, "${private_net}/L3/subnet"), - network_name => $private_net, - tenant_name => $tenant_name, - gateway_ip => try_get_value($nets, "${private_net}/L3/gateway"), - enable_dhcp => true, - dns_nameservers => try_get_value($nets, "${private_net}/L3/nameservers"), - } - - if has_key($nets, 'baremetal') { - $baremetal_physnet = try_get_value($nets, 'baremetal/L2/physnet', false) - $baremetal_segment_id = try_get_value($nets, 'baremetal/L2/segment_id') - $baremetal_router_external = try_get_value($nets, 'baremetal/L2/router_ext') - $baremetal_shared = try_get_value($nets, 'baremetal/shared', false) - - neutron_network { 'baremetal' : - ensure => 'present', - provider_physical_network => $baremetal_physnet, - provider_network_type => 'flat', - provider_segmentation_id => $baremetal_segment_id, - router_external => $baremetal_router_external, - tenant_name => $tenant_name, - shared => $baremetal_shared - } - - neutron_subnet { 'baremetal__subnet' : - ensure => 'present', - cidr => try_get_value($nets, 'baremetal/L3/subnet'), - network_name => 'baremetal', - tenant_name => $tenant_name, - gateway_ip => try_get_value($nets, 'baremetal/L3/gateway'), - enable_dhcp => true, - dns_nameservers => try_get_value($nets, 'baremetal/L3/nameservers'), - } - } - -} diff --git a/f2s/resources/openstack-network-networks/meta.yaml b/f2s/resources/openstack-network-networks/meta.yaml deleted file mode 100644 index a1568945..00000000 --- a/f2s/resources/openstack-network-networks/meta.yaml +++ /dev/null @@ -1,21 +0,0 @@ -id: openstack-network-networks -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - access: - value: null - fqdn: - value: null - neutron_config: - value: null - primary_controller: - value: null - puppet_modules: - value: null - role: - value: null - use_neutron: - value: null diff --git a/f2s/resources/openstack-network-plugins-l2/actions/run.pp b/f2s/resources/openstack-network-plugins-l2/actions/run.pp deleted file mode 100644 index 60300365..00000000 --- a/f2s/resources/openstack-network-plugins-l2/actions/run.pp +++ /dev/null @@ -1,170 +0,0 @@ -notice('MODULAR: openstack-network/plugins/ml2.pp') - -$use_neutron = hiera('use_neutron', false) - -class neutron {} -class { 'neutron' :} - -if $use_neutron { - include ::neutron::params - - $role = hiera('role') - $controller = $role in ['controller', 'primary-controller'] - $primary_controller = $role in ['primary-controller'] - $compute = $role in ['compute'] - - $neutron_config = hiera_hash('neutron_config') - $neutron_server_enable = pick($neutron_config['neutron_server_enable'], true) - - $management_vip = hiera('management_vip') - $service_endpoint = hiera('service_endpoint', $management_vip) - $auth_api_version = 'v2.0' - $identity_uri = "http://${service_endpoint}:5000" - $auth_url = "${identity_uri}/${auth_api_version}" - $auth_password = $neutron_config['keystone']['admin_password'] - $auth_user = pick($neutron_config['keystone']['admin_user'], 'neutron') - $auth_tenant = pick($neutron_config['keystone']['admin_tenant'], 'services') - $auth_region = hiera('region', 'RegionOne') - $auth_endpoint_type = 'internalURL' - - $network_scheme = hiera_hash('network_scheme') - prepare_network_config($network_scheme) - - $neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) - $l2_population = try_get_value($neutron_advanced_config, 'neutron_l2_pop', false) - $dvr = try_get_value($neutron_advanced_config, 'neutron_dvr', false) - $segmentation_type = try_get_value($neutron_config, 'L2/segmentation_type') - - if $segmentation_type == 'vlan' { - $net_role_property = 'neutron/private' - $iface = get_network_role_property($net_role_property, 'phys_dev') - $physical_net_mtu = pick(get_transformation_property('mtu', $iface[0]), '1500') - $overlay_net_mtu = $physical_net_mtu - $enable_tunneling = false - $network_vlan_ranges_physnet2 = try_get_value($neutron_config, 'L2/phys_nets/physnet2/vlan_range') - $network_vlan_ranges = ["physnet2:${network_vlan_ranges_physnet2}"] - $physnet2_bridge = try_get_value($neutron_config, 'L2/phys_nets/physnet2/bridge') - $physnet2 = "physnet2:${physnet2_bridge}" - $physnet_ironic_bridge = try_get_value($neutron_config, 'L2/phys_nets/physnet-ironic/bridge', false) - - if $physnet_ironic_bridge { - $bridge_mappings = [$physnet2, "physnet-ironic:${physnet_ironic_bridge}"] - } else { - $bridge_mappings = [$physnet2] - } - - $physical_network_mtus = ["physnet2:${physical_net_mtu}"] - $tunnel_id_ranges = [] - $network_type = 'vlan' - $tunnel_types = [] - } else { - $net_role_property = 'neutron/mesh' - $tunneling_ip = get_network_role_property($net_role_property, 'ipaddr') - $iface = get_network_role_property($net_role_property, 'phys_dev') - $physical_net_mtu = pick(get_transformation_property('mtu', $iface[0]), '1500') - $tunnel_id_ranges = [try_get_value($neutron_config, 'L2/tunnel_id_ranges')] - $network_vlan_ranges = [] - $physical_network_mtus = [] - - if $segmentation_type == 'gre' { - $mtu_offset = '42' - $network_type = 'gre' - } else { - # vxlan is the default segmentation type for non-vlan cases - $mtu_offset = '50' - $network_type = 'vxlan' - } - $tunnel_types = [$network_type] - - if $physical_net_mtu { - $overlay_net_mtu = $physical_net_mtu - $mtu_offset - } else { - $overlay_net_mtu = '1500' - $mtu_offset - } - - $enable_tunneling = true - } - - $type_drivers = ['local', 'flat', 'vlan', 'gre', 'vxlan'] - $tenant_network_types = ['flat', $network_type] - $mechanism_drivers = split(try_get_value($neutron_config, 'L2/mechanism_drivers', 'openvswitch,l2population'), ',') - $flat_networks = ['*'] - $vxlan_group = '224.0.0.1' - - class { 'neutron::plugins::ml2': - type_drivers => $type_drivers, - tenant_network_types => $tenant_network_types, - mechanism_drivers => $mechanism_drivers, - flat_networks => $flat_networks, - network_vlan_ranges => $network_vlan_ranges, - tunnel_id_ranges => $tunnel_id_ranges, - vxlan_group => $vxlan_group, - vni_ranges => $tunnel_id_ranges, - physical_network_mtus => $physical_network_mtus, - path_mtu => $overlay_net_mtu, - } - - class { 'neutron::agents::ml2::ovs': - bridge_mappings => $bridge_mappings, - enable_tunneling => $enable_tunneling, - local_ip => $tunneling_ip, - tunnel_types => $tunnel_types, - enable_distributed_routing => $dvr, - l2_population => $l2_population, - arp_responder => $l2_population, - manage_vswitch => false, - manage_service => true, - enabled => true, - } - - # Synchronize database after plugin was configured - if $primary_controller { - include ::neutron::db::sync - } - - if ! $compute { - if $neutron_server_enable { - $service_ensure = 'running' - } else { - $service_ensure = 'stopped' - } - service { 'neutron-server': - name => $::neutron::params::server_service, - enable => $neutron_server_enable, - ensure => $service_ensure, - hasstatus => true, - hasrestart => true, - tag => 'neutron-service', - } -> - exec { 'waiting-for-neutron-api': - environment => [ - "OS_TENANT_NAME=${auth_tenant}", - "OS_USERNAME=${auth_user}", - "OS_PASSWORD=${auth_password}", - "OS_AUTH_URL=${auth_url}", - "OS_REGION_NAME=${auth_region}", - "OS_ENDPOINT_TYPE=${auth_endpoint_type}", - ], - path => '/usr/sbin:/usr/bin:/sbin:/bin', - tries => '30', - try_sleep => '4', - command => 'neutron net-list --http-timeout=4 2>&1 > /dev/null', - provider => 'shell' - } - - $ha_agent = try_get_value($neutron_advanced_config, 'l2_agent_ha', true) - if $ha_agent { - #Exec<| title == 'waiting-for-neutron-api' |> -> - class { 'cluster::neutron::ovs' : - primary => $primary_controller, - } - } - } - - # Stub for upstream neutron manifests - package { 'neutron': - name => 'binutils', - ensure => 'installed', - } - -} diff --git a/f2s/resources/openstack-network-plugins-l2/meta.yaml b/f2s/resources/openstack-network-plugins-l2/meta.yaml deleted file mode 100644 index d3c6a937..00000000 --- a/f2s/resources/openstack-network-plugins-l2/meta.yaml +++ /dev/null @@ -1,29 +0,0 @@ -id: openstack-network-plugins-l2 -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vip: - value: null - network_scheme: - value: null - neutron_advanced_configuration: - value: null - neutron_config: - value: null - puppet_modules: - value: null - quantum_settings: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - use_neutron: - value: null diff --git a/f2s/resources/openstack-network-routers/actions/run.pp b/f2s/resources/openstack-network-routers/actions/run.pp deleted file mode 100644 index 7d2eb7cb..00000000 --- a/f2s/resources/openstack-network-routers/actions/run.pp +++ /dev/null @@ -1,32 +0,0 @@ -notice('MODULAR: openstack-network/routers.pp') - -$use_neutron = hiera('use_neutron', false) - -if $use_neutron { - - $access_hash = hiera('access', { }) - $keystone_admin_tenant = pick($access_hash['tenant'], 'admin') - $neutron_config = hiera_hash('neutron_config') - $floating_net = try_get_value($neutron_config, 'default_floating_net', 'net04_ext') - $private_net = try_get_value($neutron_config, 'default_private_net', 'net04') - $default_router = try_get_value($neutron_config, 'default_router', 'router04') - $nets = $neutron_config['predefined_networks'] - - neutron_router { $default_router: - ensure => 'present', - gateway_network_name => $floating_net, - name => $default_router, - tenant_name => $keystone_admin_tenant, - } -> - - neutron_router_interface { "${default_router}:${private_net}__subnet": - ensure => 'present', - } - - if has_key($nets, 'baremetal') { - neutron_router_interface { "${default_router}:baremetal__subnet": - ensure => 'present', - require => Neutron_router[$default_router] - } - } -} diff --git a/f2s/resources/openstack-network-routers/meta.yaml b/f2s/resources/openstack-network-routers/meta.yaml deleted file mode 100644 index c46d43ca..00000000 --- a/f2s/resources/openstack-network-routers/meta.yaml +++ /dev/null @@ -1,21 +0,0 @@ -id: openstack-network-routers -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - access: - value: null - fqdn: - value: null - neutron_config: - value: null - primary_controller: - value: null - puppet_modules: - value: null - role: - value: null - use_neutron: - value: null diff --git a/f2s/resources/openstack-network-server-config/actions/run.pp b/f2s/resources/openstack-network-server-config/actions/run.pp deleted file mode 100644 index ad088e61..00000000 --- a/f2s/resources/openstack-network-server-config/actions/run.pp +++ /dev/null @@ -1,95 +0,0 @@ -notice('MODULAR: openstack-network/server-config.pp') - -$use_neutron = hiera('use_neutron', false) - -class neutron { } -class { 'neutron' : } - -if $use_neutron { - - $neutron_config = hiera_hash('neutron_config') - $neutron_server_enable = pick($neutron_config['neutron_server_enable'], true) - $database_vip = hiera('database_vip') - $management_vip = hiera('management_vip') - $service_endpoint = hiera('service_endpoint', $management_vip) - $nova_endpoint = hiera('nova_endpoint', $management_vip) - $nova_hash = hiera_hash('nova', { }) - $primary_controller = hiera('primary_controller', false) - - $neutron_db_password = $neutron_config['database']['passwd'] - $neutron_db_user = try_get_value($neutron_config, 'database/user', 'neutron') - $neutron_db_name = try_get_value($neutron_config, 'database/name', 'neutron') - $neutron_db_host = try_get_value($neutron_config, 'database/host', $database_vip) - - $neutron_db_uri = "mysql://${neutron_db_user}:${neutron_db_password}@${neutron_db_host}/${neutron_db_name}?&read_timeout=60" - - $auth_password = $neutron_config['keystone']['admin_password'] - $auth_user = pick($neutron_config['keystone']['admin_user'], 'neutron') - $auth_tenant = pick($neutron_config['keystone']['admin_tenant'], 'services') - $auth_region = hiera('region', 'RegionOne') - $auth_endpoint_type = 'internalURL' - - $auth_api_version = 'v2.0' - $identity_uri = "http://${service_endpoint}:5000/" - #$auth_url = "${identity_uri}${auth_api_version}" - $nova_admin_auth_url = "http://${service_endpoint}:35357/" - $nova_url = "http://${nova_endpoint}:8774/v2" - - $service_workers = pick($neutron_config['workers'], min(max($::processorcount, 2), 16)) - - $neutron_advanced_config = hiera_hash('neutron_advanced_configuration', { }) - $dvr = pick($neutron_advanced_config['neutron_dvr'], false) - - $nova_auth_user = pick($nova_hash['user'], 'nova') - $nova_auth_password = $nova_hash['user_password'] - $nova_auth_tenant = pick($nova_hash['tenant'], 'services') - - class { 'neutron::server': - sync_db => false, - - auth_password => $auth_password, - auth_tenant => $auth_tenant, - auth_region => $auth_region, - auth_user => $auth_user, - identity_uri => $identity_uri, - auth_uri => $identity_uri, - - database_retry_interval => '2', - database_connection => $neutron_db_uri, - database_max_retries => '-1', - - agent_down_time => '30', - allow_automatic_l3agent_failover => true, - - api_workers => $service_workers, - rpc_workers => $service_workers, - - router_distributed => $dvr, - enabled => false, #$neutron_server_enable, - manage_service => true, - } - - include neutron::params - tweaks::ubuntu_service_override { "$::neutron::params::server_service": - package_name => $neutron::params::server_package ? { - false => $neutron::params::package_name, - default => $neutron::params::server_package - } - } - - class { 'neutron::server::notifications': - nova_url => $nova_url, - auth_url => $nova_admin_auth_url, - username => $nova_auth_user, - tenant_name => $nova_auth_tenant, - password => $nova_auth_password, - region_name => $auth_region, - } - - # Stub for Nuetron package - package { 'neutron': - name => 'binutils', - ensure => 'installed', - } - -} diff --git a/f2s/resources/openstack-network-server-config/meta.yaml b/f2s/resources/openstack-network-server-config/meta.yaml deleted file mode 100644 index 711bd549..00000000 --- a/f2s/resources/openstack-network-server-config/meta.yaml +++ /dev/null @@ -1,33 +0,0 @@ -id: openstack-network-server-config -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - database_vip: - value: null - fqdn: - value: null - management_vip: - value: null - neutron_advanced_configuration: - value: null - neutron_config: - value: null - nova: - value: null - nova_endpoint: - value: null - primary_controller: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - use_neutron: - value: null diff --git a/f2s/resources/openstack-network-server-nova/actions/run.pp b/f2s/resources/openstack-network-server-nova/actions/run.pp deleted file mode 100644 index 721a8c2f..00000000 --- a/f2s/resources/openstack-network-server-nova/actions/run.pp +++ /dev/null @@ -1,98 +0,0 @@ -notice('MODULAR: openstack-network/server-nova.pp') - -$use_neutron = hiera('use_neutron', false) - -if $use_neutron { - $neutron_config = hiera_hash('neutron_config') - $management_vip = hiera('management_vip') - $service_endpoint = hiera('service_endpoint', $management_vip) - $neutron_endpoint = hiera('neutron_endpoint', $management_vip) - $admin_password = try_get_value($neutron_config, 'keystone/admin_password') - $admin_tenant_name = try_get_value($neutron_config, 'keystone/admin_tenant', 'services') - $admin_username = try_get_value($neutron_config, 'keystone/admin_user', 'neutron') - $region_name = hiera('region', 'RegionOne') - $auth_api_version = 'v2.0' - $admin_identity_uri = "http://${service_endpoint}:35357" - $admin_auth_url = "${admin_identity_uri}/${auth_api_version}" - $neutron_url = "http://${neutron_endpoint}:9696" - $neutron_ovs_bridge = 'br-int' - $conf_nova = pick($neutron_config['conf_nova'], true) - $floating_net = pick($neutron_config['default_floating_net'], 'net04_ext') - - class { 'nova::network::neutron' : - neutron_admin_password => $admin_password, - neutron_admin_tenant_name => $admin_tenant_name, - neutron_region_name => $region_name, - neutron_admin_username => $admin_username, - neutron_admin_auth_url => $admin_auth_url, - neutron_url => $neutron_url, - neutron_ovs_bridge => $neutron_ovs_bridge, - } - - if $conf_nova { - include nova::params - service { 'nova-api': - ensure => 'running', - name => $nova::params::api_service_name, - } - - nova_config { 'DEFAULT/default_floating_pool': value => $floating_net } - Nova_config<| |> ~> Service['nova-api'] - } - -} else { - - $ensure_package = 'installed' - $private_interface = hiera('private_int', undef) - $public_interface = hiera('public_int', undef) - $fixed_range = hiera('fixed_network_range', undef) - $network_manager = hiera('network_manager', undef) - $network_config = hiera('network_config', { }) - $num_networks = hiera('num_networks', undef) - $network_size = hiera('network_size', undef) - $nameservers = hiera('dns_nameservers', undef) - $enable_nova_net = false - #NOTE(degorenko): lp/1501767 - if $nameservers { - if count($nameservers) >= 2 { - $dns_opts = "--dns1 ${nameservers[0]} --dns2 ${nameservers[1]}" - } else { - $dns_opts = "--dns1 ${nameservers[0]}" - } - } else { - $dns_opts = "" - } - - class { 'nova::network' : - ensure_package => $ensure_package, - private_interface => $private_interface, - public_interface => $public_interface, - fixed_range => $fixed_range, - floating_range => false, - network_manager => $network_manager, - config_overrides => $network_config, - create_networks => false, # lp/1501767 - num_networks => $num_networks, - network_size => $network_size, - dns1 => $nameservers[0], - dns2 => $nameservers[1], - enabled => $enable_nova_net, - install_service => false, # because controller - } - - #NOTE(degorenko): lp/1501767 - $primary_controller = hiera('primary_controller') - if $primary_controller { - exec { 'create_private_nova_network': - path => '/usr/bin', - command => "nova-manage network create novanetwork ${fixed_range} ${num_networks} ${network_size} ${dns_opts}", - } - } - - # NOTE(aglarendil): lp/1381164 - nova_config { 'DEFAULT/force_snat_range' : value => '0.0.0.0/0' } - - # stub resource for 'nova::network' class - file { '/etc/nova/nova.conf' : ensure => 'present' } - -} diff --git a/f2s/resources/openstack-network-server-nova/meta.yaml b/f2s/resources/openstack-network-server-nova/meta.yaml deleted file mode 100644 index 9e74caf1..00000000 --- a/f2s/resources/openstack-network-server-nova/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -id: openstack-network-server-nova -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vip: - value: null - neutron_config: - value: null - neutron_endpoint: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - use_neutron: - value: null diff --git a/f2s/resources/openstack-network-start/meta.yaml b/f2s/resources/openstack-network-start/meta.yaml deleted file mode 100644 index 2f533df8..00000000 --- a/f2s/resources/openstack-network-start/meta.yaml +++ /dev/null @@ -1,4 +0,0 @@ -id: openstack-network-start -handler: none -version: '8.0' -inputs: {} diff --git a/f2s/resources/pre_hiera_config/actions/run.pp b/f2s/resources/pre_hiera_config/actions/run.pp deleted file mode 100644 index e23a1cb7..00000000 --- a/f2s/resources/pre_hiera_config/actions/run.pp +++ /dev/null @@ -1,75 +0,0 @@ -notice('MODULAR: hiera.pp') - -$deep_merge_package_name = $::osfamily ? { - /RedHat/ => 'rubygem-deep_merge', - /Debian/ => 'ruby-deep-merge', -} - -$data_dir = '/etc/hiera' -$data = [ - 'override/node/%{::fqdn}', - 'override/class/%{calling_class}', - 'override/module/%{calling_module}', - 'override/plugins', - 'override/common', - 'class/%{calling_class}', - 'module/%{calling_module}', - 'nodes', - 'globals', - 'astute' -] -$astute_data_file = '/etc/astute.yaml' -$hiera_main_config = '/etc/hiera.yaml' -$hiera_puppet_config = '/etc/puppet/hiera.yaml' -$hiera_data_file = "${data_dir}/astute.yaml" - -File { - owner => 'root', - group => 'root', - mode => '0644', -} - -$hiera_config_content = inline_template(' ---- -:backends: - - yaml - -:hierarchy: -<% @data.each do |name| -%> - - <%= name %> -<% end -%> - -:yaml: - :datadir: <%= @data_dir %> -:merge_behavior: deeper -:logger: noop -') - -file { 'hiera_data_dir' : - ensure => 'directory', - path => $data_dir, -} - -file { 'hiera_config' : - ensure => 'present', - path => $hiera_main_config, - content => $hiera_config_content, -} - -file { 'hiera_data_astute' : - ensure => 'symlink', - path => $hiera_data_file, - target => $astute_data_file, -} - -file { 'hiera_puppet_config' : - ensure => 'symlink', - path => $hiera_puppet_config, - target => $hiera_main_config, -} - -# needed to support the 'deeper' merge_behavior setting for hiera -package { 'rubygem-deep_merge': - ensure => present, - name => $deep_merge_package_name, -} diff --git a/f2s/resources/pre_hiera_config/meta.yaml b/f2s/resources/pre_hiera_config/meta.yaml deleted file mode 100644 index d55b2fbf..00000000 --- a/f2s/resources/pre_hiera_config/meta.yaml +++ /dev/null @@ -1,11 +0,0 @@ -id: pre_hiera_config -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null diff --git a/f2s/resources/public_vip_ping/actions/run.pp b/f2s/resources/public_vip_ping/actions/run.pp deleted file mode 100644 index d8f2eaea..00000000 --- a/f2s/resources/public_vip_ping/actions/run.pp +++ /dev/null @@ -1,17 +0,0 @@ -notice('MODULAR: public_vip_ping.pp') - -prepare_network_config(hiera('network_scheme', {})) -$run_ping_checker = hiera('run_ping_checker', true) -$network_scheme = hiera('network_scheme') -$public_iface = get_network_role_property('public/vip', 'interface') -$ping_host_list = $network_scheme['endpoints'][$public_iface]['gateway'] - -if $run_ping_checker { - $vip = 'vip__public' - - cluster::virtual_ip_ping { $vip : - host_list => $ping_host_list, - } - -} - diff --git a/f2s/resources/public_vip_ping/meta.yaml b/f2s/resources/public_vip_ping/meta.yaml deleted file mode 100644 index 90d9f79c..00000000 --- a/f2s/resources/public_vip_ping/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -id: public_vip_ping -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - network_scheme: - value: null - puppet_modules: - value: null - role: - value: null - run_ping_checker: - value: null diff --git a/f2s/resources/rabbitmq/actions/run.pp b/f2s/resources/rabbitmq/actions/run.pp deleted file mode 100644 index 4276965d..00000000 --- a/f2s/resources/rabbitmq/actions/run.pp +++ /dev/null @@ -1,174 +0,0 @@ -notice('MODULAR: rabbitmq.pp') - -$network_scheme = hiera_hash('network_scheme', {}) -prepare_network_config($network_scheme) - -$queue_provider = hiera('queue_provider', 'rabbitmq') - -if $queue_provider == 'rabbitmq' { - $erlang_cookie = hiera('erlang_cookie', 'EOKOWXQREETZSHFNTPEY') - $version = hiera('rabbit_version', '3.3.5') - $deployment_mode = hiera('deployment_mode', 'ha_compact') - $amqp_port = hiera('amqp_port', '5673') - $rabbit_hash = hiera_hash('rabbit_hash', - { - 'user' => false, - 'password' => false, - } - ) - $debug = pick($rabbit_hash['debug'], hiera('debug', false)) - $enabled = pick($rabbit_hash['enabled'], true) - $use_pacemaker = pick($rabbit_hash['pacemaker'], true) - - case $::osfamily { - 'RedHat': { - $command_timeout = "'-s KILL'" - $package_provider = 'yum' - } - 'Debian': { - $command_timeout = "'--signal=KILL'" - $package_provider = 'apt' - } - default: { - fail("Unsupported osfamily: ${::osfamily} operatingsystem: ${::operatingsystem},\ - module ${module_name} only support osfamily RedHat and Debian") - } - } - - if ($debug) { - # FIXME(aschultz): debug wasn't introduced until v3.5.0, when we upgrade - # we should change info to debug. Also don't forget to fix tests! - $rabbit_levels = '[{connection,info}]' - } else { - $rabbit_levels = '[{connection,info}]' - } - - $cluster_partition_handling = hiera('rabbit_cluster_partition_handling', 'autoheal') - $mnesia_table_loading_timeout = hiera('mnesia_table_loading_timeout', '10000') - $rabbitmq_bind_ip_address = pick(get_network_role_property('mgmt/messaging', 'ipaddr'), 'UNSET') - $management_bind_ip_address = hiera('management_bind_ip_address', '127.0.0.1') - - # NOTE(mattymo) UNSET is a puppet ref, but would break real configs - if $rabbitmq_bind_ip_address == 'UNSET' { - $epmd_bind_ip_address = '0.0.0.0' - } else { - $epmd_bind_ip_address = $rabbitmq_bind_ip_address - } - - # NOTE(bogdando) not a hash. Keep an indentation as is - $rabbit_tcp_listen_options = hiera('rabbit_tcp_listen_options', - '[ - binary, - {packet, raw}, - {reuseaddr, true}, - {backlog, 128}, - {nodelay, true}, - {exit_on_close, false}, - {keepalive, true} - ]' - ) - $config_kernel_variables = hiera('rabbit_config_kernel_variables', - { - 'inet_dist_listen_min' => '41055', - 'inet_dist_listen_max' => '41055', - 'inet_default_connect_options' => '[{nodelay,true}]', - 'net_ticktime' => '10', - } - ) - $config_variables = hiera('rabbit_config_variables', - { - 'log_levels' => $rabbit_levels, - 'default_vhost' => "<<\"/\">>", - 'default_permissions' => '[<<".*">>, <<".*">>, <<".*">>]', - 'tcp_listen_options' => $rabbit_tcp_listen_options, - 'cluster_partition_handling' => $cluster_partition_handling, - 'mnesia_table_loading_timeout' => $mnesia_table_loading_timeout, - 'collect_statistics_interval' => '30000', - 'disk_free_limit' => '5000000', # Corosync checks for disk space, reduce rabbitmq check to 5M see LP#1493520 comment #15 - } - ) - $config_rabbitmq_management_variables = hiera('rabbit_config_management_variables', - { - 'rates_mode' => 'none', - 'listener' => "[{port, 15672}, {ip,\"${management_bind_ip_address}\"}]", - } - ) - - $thread_pool_calc = min(100,max(12*$physicalprocessorcount,30)) - - if $deployment_mode == 'ha_compact' { - $rabbit_pid_file = '/var/run/rabbitmq/p_pid' - } else { - $rabbit_pid_file = '/var/run/rabbitmq/pid' - } - $environment_variables = hiera('rabbit_environment_variables', - { - 'SERVER_ERL_ARGS' => "\"+K true +A${thread_pool_calc} +P 1048576\"", - 'ERL_EPMD_ADDRESS' => $epmd_bind_ip_address, - 'PID_FILE' => $rabbit_pid_file, - } - ) - - if ($enabled) { - class { '::rabbitmq': - admin_enable => true, - repos_ensure => false, - package_provider => $package_provider, - package_source => undef, - service_ensure => 'running', - service_manage => true, - port => $amqp_port, - delete_guest_user => true, - default_user => $rabbit_hash['user'], - default_pass => $rabbit_hash['password'], - # NOTE(bogdando) set to true and uncomment the lines below, if puppet should create a cluster - # We don't want it as far as OCF script creates the cluster - config_cluster => false, - #erlang_cookie => $erlang_cookie, - #wipe_db_on_cookie_change => true, - #cluster_nodes => $rabbitmq_cluster_nodes, - #cluster_node_type => 'disc', - #cluster_partition_handling => $cluster_partition_handling, - version => $version, - node_ip_address => $rabbitmq_bind_ip_address, - config_kernel_variables => $config_kernel_variables, - config_rabbitmq_management_variables => $config_rabbitmq_management_variables, - config_variables => $config_variables, - environment_variables => $environment_variables, - } - - if ($use_pacemaker) { - # Install rabbit-fence daemon - class { 'cluster::rabbitmq_fence': - enabled => $enabled, - require => Class['::rabbitmq'] - } - } - - class { 'nova::rabbitmq': - enabled => $enabled, - userid => $rabbit_hash['user'], - password => $rabbit_hash['password'], - require => Class['::rabbitmq'], - } - - if ($use_pacemaker) { - class { 'pacemaker_wrappers::rabbitmq': - command_timeout => $command_timeout, - debug => $debug, - erlang_cookie => $erlang_cookie, - admin_user => $rabbit_hash['user'], - admin_pass => $rabbit_hash['password'], - host_ip => $rabbitmq_bind_ip_address, - before => Class['nova::rabbitmq'], - } - } - - include rabbitmq::params - tweaks::ubuntu_service_override { 'rabbitmq-server': - package_name => $rabbitmq::params::package_name, - service_name => $rabbitmq::params::service_name, - } - } - -} diff --git a/f2s/resources/rabbitmq/meta.yaml b/f2s/resources/rabbitmq/meta.yaml deleted file mode 100644 index 3339b286..00000000 --- a/f2s/resources/rabbitmq/meta.yaml +++ /dev/null @@ -1,43 +0,0 @@ -id: rabbitmq -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - amqp_port: - value: null - debug: - value: null - deployment_mode: - value: null - erlang_cookie: - value: null - fqdn: - value: null - mnesia_table_loading_timeout: - value: null - network_scheme: - value: null - puppet_modules: - value: null - queue_provider: - value: null - rabbit_cluster_partition_handling: - value: null - rabbit_config_kernel_variables: - value: null - rabbit_config_management_variables: - value: null - rabbit_config_variables: - value: null - rabbit_environment_variables: - value: null - rabbit_hash: - value: null - rabbit_tcp_listen_options: - value: null - rabbit_version: - value: null - role: - value: null diff --git a/f2s/resources/role_data/managers/from_file.py b/f2s/resources/role_data/managers/from_file.py deleted file mode 100755 index cc3e62a1..00000000 --- a/f2s/resources/role_data/managers/from_file.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python - -import os -import sys -import yaml -import json - -CURDIR = os.path.dirname(os.path.realpath(__file__)) - -with open(os.path.join(CURDIR, 'test_sample.yaml')) as f: - ARGS = yaml.safe_load(f) - -sys.stdout.write(json.dumps(ARGS)) diff --git a/f2s/resources/role_data/managers/from_nailgun.py b/f2s/resources/role_data/managers/from_nailgun.py deleted file mode 100755 index 507ec0d1..00000000 --- a/f2s/resources/role_data/managers/from_nailgun.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python - -import sys -import json - -from fuelclient.objects.environment import Environment - -ARGS = json.loads(sys.stdin.read()) - -env = Environment(ARGS['env']) -facts = env.get_default_facts('deployment', [ARGS['uid']]) - -sys.stdout.write(json.dumps(facts[0])) diff --git a/f2s/resources/role_data/managers/globals.pp b/f2s/resources/role_data/managers/globals.pp deleted file mode 100644 index 8d1de5a3..00000000 --- a/f2s/resources/role_data/managers/globals.pp +++ /dev/null @@ -1,294 +0,0 @@ -notice('MODULAR: globals.pp') - -$service_token_off = false -$identity = hiera('uid') -$globals_yaml_file = "/etc/puppet/${identity}globals.yaml" - -# remove cached globals values before anything else -remove_file($globals_yaml_file) - -$network_scheme = hiera_hash('network_scheme', {}) -if empty($network_scheme) { - fail("Network_scheme not given in the astute.yaml") -} -$network_metadata = hiera_hash('network_metadata', {}) -if empty($network_metadata) { - fail("Network_metadata not given in the astute.yaml") -} - -$node_name = regsubst(hiera('fqdn', $::hostname), '\..*$', '') -$node = $network_metadata['nodes'][$node_name] -if empty($node) { - fail("Node hostname is not defined in the astute.yaml") -} - -prepare_network_config($network_scheme) - -# DEPRICATED -$nodes_hash = hiera('nodes', {}) - -$deployment_mode = hiera('deployment_mode', 'ha_compact') -$roles = $node['node_roles'] -$storage_hash = hiera('storage', {}) -$syslog_hash = hiera('syslog', {}) -$base_syslog_hash = hiera('base_syslog', {}) -$sahara_hash = hiera('sahara', {}) -$murano_hash = hiera('murano', {}) -$heat_hash = hiera_hash('heat', {}) -$vcenter_hash = hiera('vcenter', {}) -$nova_hash = hiera_hash('nova', {}) -$mysql_hash = hiera('mysql', {}) -$rabbit_hash = hiera_hash('rabbit', {}) -$glance_hash = hiera_hash('glance', {}) -$swift_hash = hiera('swift', {}) -$cinder_hash = hiera_hash('cinder', {}) -$ceilometer_hash = hiera('ceilometer',{}) -$access_hash = hiera_hash('access', {}) -$mp_hash = hiera('mp', {}) -$keystone_hash = merge({'service_token_off' => $service_token_off}, - hiera_hash('keystone', {})) - -$node_role = hiera('role') -$dns_nameservers = hiera('dns_nameservers', []) -$use_ceilometer = $ceilometer_hash['enabled'] -$use_neutron = hiera('quantum', false) -$use_ovs = hiera('use_ovs', $use_neutron) -$verbose = true -$debug = hiera('debug', false) -$use_monit = false -$master_ip = hiera('master_ip') -$use_syslog = hiera('use_syslog', true) -$syslog_log_facility_glance = hiera('syslog_log_facility_glance', 'LOG_LOCAL2') -$syslog_log_facility_cinder = hiera('syslog_log_facility_cinder', 'LOG_LOCAL3') -$syslog_log_facility_neutron = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4') -$syslog_log_facility_nova = hiera('syslog_log_facility_nova','LOG_LOCAL6') -$syslog_log_facility_keystone = hiera('syslog_log_facility_keystone', 'LOG_LOCAL7') -$syslog_log_facility_murano = hiera('syslog_log_facility_murano', 'LOG_LOCAL0') -$syslog_log_facility_heat = hiera('syslog_log_facility_heat','LOG_LOCAL0') -$syslog_log_facility_sahara = hiera('syslog_log_facility_sahara','LOG_LOCAL0') -$syslog_log_facility_ceilometer = hiera('syslog_log_facility_ceilometer','LOG_LOCAL0') -$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') - -$nova_report_interval = hiera('nova_report_interval', 60) -$nova_service_down_time = hiera('nova_service_down_time', 180) -$apache_ports = hiera_array('apache_ports', ['80', '8888', '5000', '35357']) - -$openstack_version = hiera('openstack_version', - { - 'keystone' => 'installed', - 'glance' => 'installed', - 'horizon' => 'installed', - 'nova' => 'installed', - 'novncproxy' => 'installed', - 'cinder' => 'installed', - } -) - -$nova_rate_limits = hiera('nova_rate_limits', - { - 'POST' => 100000, - 'POST_SERVERS' => 100000, - 'PUT' => 1000, - 'GET' => 100000, - 'DELETE' => 100000 - } -) - -$cinder_rate_limits = hiera('cinder_rate_limits', - { - 'POST' => 100000, - 'POST_SERVERS' => 100000, - 'PUT' => 100000, - 'GET' => 100000, - 'DELETE' => 100000 - } -) - -$default_gateway = get_default_gateways() -$public_vip = $network_metadata['vips']['public']['ipaddr'] -$management_vip = $network_metadata['vips']['management']['ipaddr'] -$public_vrouter_vip = $network_metadata['vips']['vrouter_pub']['ipaddr'] -$management_vrouter_vip = $network_metadata['vips']['vrouter']['ipaddr'] - -$database_vip = is_hash($network_metadata['vips']['database']) ? { - true => pick($network_metadata['vips']['database']['ipaddr'], $management_vip), - default => $management_vip -} -$service_endpoint = is_hash($network_metadata['vips']['service_endpoint']) ? { - true => pick($network_metadata['vips']['service_endpoint']['ipaddr'], $management_vip), - default => $management_vip -} - -if $use_neutron { - $novanetwork_params = {} - $neutron_config = hiera_hash('quantum_settings') - $network_provider = 'neutron' - $neutron_db_password = $neutron_config['database']['passwd'] - $neutron_user_password = $neutron_config['keystone']['admin_password'] - $neutron_metadata_proxy_secret = $neutron_config['metadata']['metadata_proxy_shared_secret'] - $base_mac = $neutron_config['L2']['base_mac'] - $management_network_range = get_network_role_property('mgmt/vip', 'network') -} else { - $neutron_config = {} - $novanetwork_params = hiera('novanetwork_parameters') - $network_size = $novanetwork_params['network_size'] - $num_networks = $novanetwork_params['num_networks'] - $network_provider = 'nova' - if ( $novanetwork_params['network_manager'] == 'FlatDHCPManager') { - $private_int = get_network_role_property('novanetwork/fixed', 'interface') - } else { - $private_int = get_network_role_property('novanetwork/vlan', 'interface') - $vlan_start = $novanetwork_params['vlan_start'] - $network_config = { - 'vlan_start' => $vlan_start, - } - } - $network_manager = "nova.network.manager.${novanetwork_params['network_manager']}" - $management_network_range = hiera('management_network_range') -} - -if $node_role == 'primary-controller' { - $primary_controller = true -} else { - $primary_controller = false -} - -$controllers_hash = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) -$mountpoints = filter_hash($mp_hash,'point') - -# AMQP configuration -$queue_provider = hiera('queue_provider','rabbitmq') -$rabbit_ha_queues = true - -if !$rabbit_hash['user'] { - $rabbit_hash['user'] = 'nova' -} - -$amqp_port = hiera('amqp_ports', '5673') -if hiera('amqp_hosts', false) { - # using pre-defined in astute.yaml RabbitMQ servers - $amqp_hosts = hiera('amqp_hosts') -} else { - # using RabbitMQ servers on controllers - # todo(sv): switch from 'controller' nodes to 'rmq' nodes as soon as it was implemented as additional node-role - $controllers_with_amqp_server = get_node_to_ipaddr_map_by_network_role($controllers_hash, 'mgmt/messaging') - $amqp_nodes = ipsort(values($controllers_with_amqp_server)) - # amqp_hosts() randomize order of RMQ endpoints and put local one first - $amqp_hosts = amqp_hosts($amqp_nodes, $amqp_port, get_network_role_property('mgmt/messaging', 'ipaddr')) -} - -# MySQL and SQLAlchemy backend configuration -$custom_mysql_setup_class = hiera('custom_mysql_setup_class', 'galera') -$max_pool_size = hiera('max_pool_size', min($::processorcount * 5 + 0, 30 + 0)) -$max_overflow = hiera('max_overflow', min($::processorcount * 5 + 0, 60 + 0)) -$max_retries = hiera('max_retries', '-1') -$idle_timeout = hiera('idle_timeout','3600') -$nova_db_password = $nova_hash['db_password'] -$sql_connection = "mysql://nova:${nova_db_password}@${database_vip}/nova?read_timeout = 6 0" -$mirror_type = hiera('mirror_type', 'external') -$multi_host = hiera('multi_host', true) - -# Determine who should get the volume service -if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) { - $manage_volumes = 'iscsi' -} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) { - $manage_volumes = 'vmdk' -} elsif ($storage_hash['volumes_ceph']) { - $manage_volumes = 'ceph' -} else { - $manage_volumes = false -} - -# Define ceph-related variables -$ceph_primary_monitor_node = get_nodes_hash_by_roles($network_metadata, ['primary-controller']) -$ceph_monitor_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) -$ceph_rgw_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -#Determine who should be the default backend -if ($storage_hash['images_ceph']) { - $glance_backend = 'ceph' - $glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ] -} elsif ($storage_hash['images_vcenter']) { - $glance_backend = 'vmware' - $glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ] -} else { - $glance_backend = 'file' - $glance_known_stores = false -} - -# Define ceilometer-related variables: -# todo: use special node-roles instead controllers in the future -$ceilometer_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -# Define memcached-related variables: -$memcache_roles = hiera('memcache_roles', ['primary-controller', 'controller']) - -# Define node roles, that will carry corosync/pacemaker -$corosync_roles = hiera('corosync_roles', ['primary-controller', 'controller']) - -# Define cinder-related variables -# todo: use special node-roles instead controllers in the future -$cinder_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -# Define horizon-related variables: -# todo: use special node-roles instead controllers in the future -$horizon_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -# Define swift-related variables -# todo(sv): use special node-roles instead controllers in the future -$swift_master_role = 'primary-controller' -$swift_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) -$swift_proxies = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) -$swift_proxy_caches = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) # memcache for swift -$is_primary_swift_proxy = $primary_controller - -# Define murano-related variables -$murano_roles = ['primary-controller', 'controller'] - -# Define heat-related variables: -$heat_roles = ['primary-controller', 'controller'] - -# Define sahara-related variable -$sahara_roles = ['primary-controller', 'controller'] - -# Define ceilometer-releated parameters -if !$ceilometer_hash['event_time_to_live'] { $ceilometer_hash['event_time_to_live'] = '604800'} -if !$ceilometer_hash['metering_time_to_live'] { $ceilometer_hash['metering_time_to_live'] = '604800' } -if !$ceilometer_hash['http_timeout'] { $ceilometer_hash['http_timeout'] = '600' } - -# Define database-related variables: -# todo: use special node-roles instead controllers in the future -$database_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -# Define Nova-API variables: -# todo: use special node-roles instead controllers in the future -$nova_api_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -# Define mongo-related variables -$mongo_roles = ['primary-mongo', 'mongo'] - -# Define neutron-related variables: -# todo: use special node-roles instead controllers in the future -$neutron_nodes = get_nodes_hash_by_roles($network_metadata, ['primary-controller', 'controller']) - -#Define Ironic-related variables: -$ironic_api_nodes = $controllers_hash - -# Change nova_hash to add vnc port to it -# TODO(sbog): change this when we will get rid of global hashes -$public_ssl_hash = hiera('public_ssl') -if $public_ssl_hash['services'] { - $nova_hash['vncproxy_protocol'] = 'https' -} else { - $nova_hash['vncproxy_protocol'] = 'http' -} - -# save all these global variables into hiera yaml file for later use -# by other manifests with hiera function -file { $globals_yaml_file : - ensure => 'present', - mode => '0644', - owner => 'root', - group => 'root', - content => template('osnailyfacter/globals_yaml.erb') -} diff --git a/f2s/resources/role_data/managers/globals.py b/f2s/resources/role_data/managers/globals.py deleted file mode 100755 index 50ee4a39..00000000 --- a/f2s/resources/role_data/managers/globals.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python - -import os -import sys -from subprocess import Popen, PIPE -import yaml -import json - -CURDIR = os.path.dirname(os.path.realpath(__file__)) - -ARGS = json.loads(sys.stdin.read()) - -def execute(command, **env_vars): - env = os.environ.copy() - for var in env_vars: - env[var] = env_vars[var] - popen = Popen(command, stdin=PIPE, stdout=PIPE, env=env) - return popen.communicate() - -def prepare_hiera(): - hiera_conf = """:backends: - - yaml -:yaml: - :datadir: /etc/puppet/hieradata -:hierarchy: - - {} -""".format(ARGS['uid']) - with open('/etc/puppet/' + ARGS['uid'] + 'globals.yaml', 'w') as f: - f.write('') - - with open('/etc/puppet/hiera.yaml', 'w') as f: - f.write(hiera_conf) - # dont dump null values - sanitized = {key:ARGS[key] for key in ARGS if ARGS.get(key)} - with open('/etc/puppet/hieradata/{}.yaml'.format(ARGS['uid']), 'w') as f: - f.write(yaml.safe_dump(sanitized)) - -def run_command(): - cmd = [ - 'puppet', 'apply', '--hiera_config=/etc/puppet/hiera.yaml', - '--modulepath={}'.format(ARGS['puppet_modules']), - os.path.join(CURDIR, 'globals.pp')] - return execute(cmd) - -def collect_results(): - path = '/etc/puppet/' + ARGS['uid'] + 'globals.yaml' - with open(path) as f: - return yaml.safe_load(f) - -def main(): - prepare_hiera() - run_command() - rst = collect_results() - sys.stdout.write(json.dumps(rst)) - -if __name__ == '__main__': - main() diff --git a/f2s/resources/role_data/managers/test_sample.yaml b/f2s/resources/role_data/managers/test_sample.yaml deleted file mode 100644 index d1907e45..00000000 --- a/f2s/resources/role_data/managers/test_sample.yaml +++ /dev/null @@ -1,695 +0,0 @@ -access: - email: admin@localhost - metadata: - label: Access - weight: 10 - password: admin - tenant: admin - user: admin -auth_key: '' -auto_assign_floating_ip: false -base_syslog: - syslog_port: '514' - syslog_server: 10.108.0.2 -ceilometer: - db_password: ZcffCIm5 - enabled: false - metering_secret: 7aqxzabx - user_password: FQUfTQ6a -cinder: - db_password: 71kNkN9U - fixed_key: 0ded0202e2a355df942df2bacbaba992658a0345f68f2db6e1bdb6dbb8f682cf - user_password: O2st17AP -cobbler: - profile: ubuntu_1404_x86_64 -corosync: - group: 226.94.1.1 - metadata: - label: Corosync - restrictions: - - action: hide - condition: 'true' - weight: 50 - port: '12000' - verified: false -debug: false -deployment_id: 38 -deployment_mode: ha_compact -external_dns: - dns_list: 8.8.8.8, 8.8.4.4 - metadata: - label: Upstream DNS - weight: 90 -external_mongo: - hosts_ip: '' - metadata: - label: External MongoDB - restrictions: - - action: hide - condition: settings:additional_components.mongo.value == false - weight: 20 - mongo_db_name: ceilometer - mongo_password: ceilometer - mongo_replset: '' - mongo_user: ceilometer -external_ntp: - metadata: - label: Upstream NTP - weight: 100 - ntp_list: 0.pool.ntp.org, 1.pool.ntp.org -public_ssl: - metadata: - label: Public TLS - weight: 110 - horizon: true - services: true - cert_source: self_signed - cert_data: - content: 'somedataaboutyourkeypair' - hostname: public.fuel.local -fail_if_error: false -fqdn: node-118.test.domain.local -fuel_version: '6.1' -glance: - db_password: 0UYCFNfc - image_cache_max_size: '5368709120' - user_password: 94lWbeNn -heat: - auth_encryption_key: 8edb899a7e81e56abe51639880aa32dd - db_password: AuaPc3Yq - enabled: true - rabbit_password: Nmn2wr9S - user_password: EWJfBLJ9 -kernel_params: - kernel: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 - nomodeset - metadata: - label: Kernel parameters - weight: 40 -keystone: - admin_token: 0be9G8hj - db_password: 32TWl29R -last_controller: node-131 -libvirt_type: qemu -management_network_range: 192.168.0.0/24 -management_vip: 192.168.0.6 -management_vrouter_vip: 192.168.0.7 -master_ip: 10.108.0.2 -metadata: - label: Common - weight: 30 -mongo: - enabled: false -mp: -- point: '1' - weight: '1' -- point: '2' - weight: '2' -murano: - db_password: R3SuvZbh - enabled: true - rabbit_password: ZNdTAgF3 - user_password: xP8WtHQw -murano_settings: - metadata: - label: Murano Settings - restrictions: - - action: hide - condition: settings:additional_components.murano.value == false - weight: 20 - murano_repo_url: http://catalog.openstack.org/ -mysql: - root_password: Lz18BpbQ - wsrep_password: JrlrVOHu -network_metadata: - nodes: - node-118: - swift_zone: '1' - uid: '118' - fqdn: node-118.test.domain.local - network_roles: - keystone/api: 192.168.0.1 - neutron/api: 192.168.0.1 - mgmt/database: 192.168.0.1 - sahara/api: 192.168.0.1 - heat/api: 192.168.0.1 - ceilometer/api: 192.168.0.1 - ex: - ceph/public: 192.168.0.1 - ceph/radosgw: - management: 192.168.0.1 - swift/api: 192.168.0.1 - mgmt/api: 192.168.0.1 - storage: 192.168.1.1 - mgmt/corosync: 192.168.0.1 - cinder/api: 192.168.0.1 - public/vip: - swift/replication: 192.168.1.1 - mgmt/messaging: 192.168.0.1 - neutron/mesh: 192.168.0.1 - admin/pxe: 10.109.0.9 - mongo/db: 192.168.0.1 - neutron/private: - neutron/floating: - fw-admin: 10.109.0.9 - glance/api: 192.168.0.1 - mgmt/vip: 192.168.0.1 - murano/api: 192.168.0.1 - nova/api: 192.168.0.1 - horizon: 192.168.0.1 - mgmt/memcache: 192.168.0.1 - cinder/iscsi: 192.168.1.1 - ceph/replication: 192.168.1.1 - user_node_name: Untitled (6a:e7) - node_roles: - - cinder - name: node-118 - node-128: - swift_zone: '1' - uid: '128' - fqdn: node-128.test.domain.local - network_roles: - keystone/api: 192.168.0.2 - neutron/api: 192.168.0.2 - mgmt/database: 192.168.0.2 - sahara/api: 192.168.0.2 - heat/api: 192.168.0.2 - ceilometer/api: 192.168.0.2 - ex: 172.16.0.2 - ceph/public: 192.168.0.2 - ceph/radosgw: 172.16.0.2 - management: 192.168.0.2 - swift/api: 192.168.0.2 - mgmt/api: 192.168.0.2 - storage: 192.168.1.2 - mgmt/corosync: 192.168.0.2 - cinder/api: 192.168.0.2 - public/vip: 172.16.0.2 - swift/replication: 192.168.1.2 - mgmt/messaging: 192.168.0.2 - neutron/mesh: 192.168.0.2 - admin/pxe: 10.108.0.3 - mongo/db: 192.168.0.2 - neutron/private: - neutron/floating: - fw-admin: 10.108.0.3 - glance/api: 192.168.0.2 - mgmt/vip: 192.168.0.2 - murano/api: 192.168.0.2 - nova/api: 192.168.0.2 - horizon: 192.168.0.2 - mgmt/memcache: 192.168.0.2 - cinder/iscsi: 192.168.1.2 - ceph/replication: 192.168.1.2 - user_node_name: Untitled (6a:e7) - node_roles: - - primary-controller - name: node-128 - node-129: - swift_zone: '1' - uid: '129' - fqdn: node-129.test.domain.local - network_roles: - keystone/api: 192.168.0.3 - neutron/api: 192.168.0.3 - mgmt/database: 192.168.0.3 - sahara/api: 192.168.0.3 - heat/api: 192.168.0.3 - ceilometer/api: 192.168.0.3 - ex: 172.16.0.3 - ceph/public: 192.168.0.3 - ceph/radosgw: 172.16.0.3 - management: 192.168.0.3 - swift/api: 192.168.0.3 - mgmt/api: 192.168.0.3 - storage: 192.168.1.3 - mgmt/corosync: 192.168.0.3 - cinder/api: 192.168.0.3 - public/vip: 172.16.0.3 - swift/replication: 192.168.1.3 - mgmt/messaging: 192.168.0.3 - neutron/mesh: 192.168.0.3 - admin/pxe: 10.108.0.6 - mongo/db: 192.168.0.3 - neutron/private: - neutron/floating: - fw-admin: 10.108.0.6 - glance/api: 192.168.0.3 - mgmt/vip: 192.168.0.3 - murano/api: 192.168.0.3 - nova/api: 192.168.0.3 - horizon: 192.168.0.3 - mgmt/memcache: 192.168.0.3 - cinder/iscsi: 192.168.1.3 - ceph/replication: 192.168.1.3 - user_node_name: Untitled (6a:e7) - node_roles: - - controller - name: node-129 - node-131: - swift_zone: '1' - uid: '131' - fqdn: node-131.test.domain.local - network_roles: - keystone/api: 192.168.0.4 - neutron/api: 192.168.0.4 - mgmt/database: 192.168.0.4 - sahara/api: 192.168.0.4 - heat/api: 192.168.0.4 - ceilometer/api: 192.168.0.4 - ex: 172.16.0.4 - ceph/public: 192.168.0.4 - ceph/radosgw: 172.16.0.4 - management: 192.168.0.4 - swift/api: 192.168.0.4 - mgmt/api: 192.168.0.4 - storage: 192.168.1.4 - mgmt/corosync: 192.168.0.4 - cinder/api: 192.168.0.4 - public/vip: 172.16.0.4 - swift/replication: 192.168.1.4 - mgmt/messaging: 192.168.0.4 - neutron/mesh: 192.168.0.4 - admin/pxe: 10.109.0.9 - mongo/db: 192.168.0.4 - neutron/private: - neutron/floating: - fw-admin: 10.109.0.9 - glance/api: 192.168.0.4 - mgmt/vip: 192.168.0.4 - murano/api: 192.168.0.4 - nova/api: 192.168.0.4 - horizon: 192.168.0.4 - mgmt/memcache: 192.168.0.4 - cinder/iscsi: 192.168.1.4 - ceph/replication: 192.168.1.4 - user_node_name: Untitled (6a:e7) - node_roles: - - controller - name: node-131 - node-132: - swift_zone: '1' - uid: '132' - fqdn: node-132.test.domain.local - network_roles: - keystone/api: 192.168.0.5 - neutron/api: 192.168.0.5 - mgmt/database: 192.168.0.5 - sahara/api: 192.168.0.5 - heat/api: 192.168.0.5 - ceilometer/api: 192.168.0.5 - ex: - ceph/public: 192.168.0.5 - ceph/radosgw: - management: 192.168.0.5 - swift/api: 192.168.0.5 - mgmt/api: 192.168.0.5 - storage: 192.168.1.5 - mgmt/corosync: 192.168.0.5 - cinder/api: 192.168.0.5 - public/vip: - swift/replication: 192.168.1.5 - mgmt/messaging: 192.168.0.5 - neutron/mesh: 192.168.0.5 - admin/pxe: 10.108.0.4 - mongo/db: 192.168.0.5 - neutron/private: - neutron/floating: - fw-admin: 10.108.0.4 - glance/api: 192.168.0.5 - mgmt/vip: 192.168.0.5 - murano/api: 192.168.0.5 - nova/api: 192.168.0.5 - horizon: 192.168.0.5 - mgmt/memcache: 192.168.0.5 - cinder/iscsi: 192.168.1.5 - ceph/replication: 192.168.1.5 - user_node_name: Untitled (6a:e7) - node_roles: - - compute - name: node-132 - vips: - vrouter: - ipaddr: 192.168.0.3 - management: - ipaddr: 192.168.0.2 - public: - ipaddr: 10.109.1.2 - vrouter_pub: - ipaddr: 10.109.1.3 -network_scheme: - endpoints: - br-fw-admin: - IP: - - 10.108.0.7/24 - br-mgmt: - IP: - - 192.168.0.1/24 - gateway: 192.168.0.7 - vendor_specific: - phy_interfaces: - - eth0 - vlans: 101 - br-storage: - IP: - - 192.168.1.1/24 - vendor_specific: - phy_interfaces: - - eth0 - vlans: 102 - interfaces: - eth0: - vendor_specific: - bus_info: '0000:00:03.0' - driver: e1000 - eth1: - vendor_specific: - bus_info: '0000:00:04.0' - driver: e1000 - eth2: - vendor_specific: - bus_info: '0000:00:05.0' - driver: e1000 - eth3: - vendor_specific: - bus_info: '0000:00:06.0' - driver: e1000 - eth4: - vendor_specific: - bus_info: '0000:00:07.0' - driver: e1000 - provider: lnx - roles: - ex: br-ex - public/vip: br-ex - neutron/floating: br-floating - storage: br-storage - keystone/api: br-mgmt - neutron/api: br-mgmt - mgmt/database: br-mgmt - sahara/api: br-mgmt - ceilometer/api: br-mgmt - mgmt/vip: br-mgmt - ceph/public: br-mgmt - mgmt/messaging: br-mgmt - management: br-mgmt - swift/api: br-mgmt - mgmt/api: br-mgmt - storage: br-storage - mgmt/corosync: br-mgmt - cinder/api: br-mgmt - swift/replication: br-storage - neutron/mesh: br-mgmt - admin/pxe: br-fw-admin - mongo/db: br-mgmt - neutron/private: br-prv - fw-admin: br-fw-admin - glance/api: br-mgmt - heat/api: br-mgmt - murano/api: br-mgmt - nova/api: br-mgmt - horizon: br-mgmt - mgmt/memcache: br-mgmt - cinder/iscsi: br-storage - ceph/replication: br-storage - neutron/mesh: br-mgmt - transformations: - - action: add-br - name: br-fw-admin - - action: add-br - name: br-mgmt - - action: add-br - name: br-storage - - action: add-port - bridge: br-fw-admin - name: eth0 - - action: add-port - bridge: br-storage - name: eth0.102 - - action: add-port - bridge: br-mgmt - name: eth0.101 - version: '1.1' -neutron_mellanox: - metadata: - enabled: true - label: Mellanox Neutron components - toggleable: false - weight: 50 - plugin: disabled - vf_num: '16' -nodes: -- fqdn: node-118.test.domain.local - internal_address: 192.168.0.1 - internal_netmask: 255.255.255.0 - name: node-118 - role: cinder - storage_address: 192.168.1.1 - storage_netmask: 255.255.255.0 - swift_zone: '118' - uid: '118' - user_node_name: Untitled (1d:4b) -- fqdn: node-128.test.domain.local - internal_address: 192.168.0.2 - internal_netmask: 255.255.255.0 - name: node-128 - public_address: 172.16.0.2 - public_netmask: 255.255.255.0 - role: primary-controller - storage_address: 192.168.1.2 - storage_netmask: 255.255.255.0 - swift_zone: '128' - uid: '128' - user_node_name: Untitled (6f:9d) -- fqdn: node-129.test.domain.local - internal_address: 192.168.0.3 - internal_netmask: 255.255.255.0 - name: node-129 - public_address: 172.16.0.3 - public_netmask: 255.255.255.0 - role: controller - storage_address: 192.168.1.3 - storage_netmask: 255.255.255.0 - swift_zone: '129' - uid: '129' - user_node_name: Untitled (74:27) -- fqdn: node-131.test.domain.local - internal_address: 192.168.0.4 - internal_netmask: 255.255.255.0 - name: node-131 - public_address: 172.16.0.4 - public_netmask: 255.255.255.0 - role: controller - storage_address: 192.168.1.4 - storage_netmask: 255.255.255.0 - swift_zone: '131' - uid: '131' - user_node_name: Untitled (34:45) -- fqdn: node-132.test.domain.local - internal_address: 192.168.0.5 - internal_netmask: 255.255.255.0 - name: node-132 - role: compute - storage_address: 192.168.1.5 - storage_netmask: 255.255.255.0 - swift_zone: '132' - uid: '132' - user_node_name: Untitled (18:c9) -nova: - db_password: mqnsUMgC - state_path: /var/lib/nova - user_password: fj4wVCEs -nova_quota: false -online: true -openstack_version: 2014.2-6.1 -openstack_version_prev: null -priority: 300 -provision: - codename: trusty - image_data: - /: - container: gzip - format: ext4 - uri: http://10.108.0.2:8080/targetimages/env_38_ubuntu_1404_amd64.img.gz - /boot: - container: gzip - format: ext2 - uri: http://10.108.0.2:8080/targetimages/env_38_ubuntu_1404_amd64-boot.img.gz - metadata: - label: Provision - weight: 80 - method: image -public_network_assignment: - assign_to_all_nodes: false - metadata: - label: Public network assignment - restrictions: - - action: hide - condition: cluster:net_provider != 'neutron' - weight: 50 -neutron_advanced_configuration: - neutron_dvr: true - neutron_l2_pop: true -public_vip: 172.16.0.5 -public_vrouter_vip: 172.16.0.6 -puppet: - manifests: rsync://10.108.0.2:/puppet/2014.2-6.1/manifests/ - modules: rsync://10.108.0.2:/puppet/2014.2-6.1/modules/ -puppet_debug: true -puppet_modules: /vagrant/f2s/fuel-library/deployment/puppet -quantum: true -quantum_settings: - L2: - base_mac: fa:16:3e:00:00:00 - phys_nets: {} - segmentation_type: tun - tunnel_id_ranges: 2:65535 - L3: - use_namespaces: true - database: - passwd: QRpCfPk8 - keystone: - admin_password: oT56DSZF - metadata: - metadata_proxy_shared_secret: fp618p5V - predefined_networks: - net04: - L2: - network_type: gre - physnet: null - router_ext: false - segment_id: null - L3: - enable_dhcp: true - floating: null - gateway: 192.168.111.1 - nameservers: - - 8.8.4.4 - - 8.8.8.8 - subnet: 192.168.111.0/24 - shared: false - tenant: admin - net04_ext: - L2: - network_type: local - physnet: null - router_ext: true - segment_id: null - L3: - enable_dhcp: false - floating: 172.16.0.130:172.16.0.254 - gateway: 172.16.0.1 - nameservers: [] - subnet: 172.16.0.0/24 - shared: false - tenant: admin -rabbit: - password: c7fQJeSe -repo_setup: - installer_initrd: - local: /var/www/nailgun/ubuntu/x86_64/images/initrd.gz - remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz - installer_kernel: - local: /var/www/nailgun/ubuntu/x86_64/images/linux - remote_relative: dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux - metadata: - label: Repositories - weight: 50 - repos: - - name: ubuntu - priority: null - section: main universe multiverse - suite: trusty - type: deb - uri: http://archive.ubuntu.com/ubuntu/ - - name: ubuntu-updates - priority: null - section: main universe multiverse - suite: trusty-updates - type: deb - uri: http://archive.ubuntu.com/ubuntu/ - - name: ubuntu-security - priority: null - section: main universe multiverse - suite: trusty-security - type: deb - uri: http://archive.ubuntu.com/ubuntu/ - - name: mos - priority: 1050 - section: main restricted - suite: mos6.1 - type: deb - uri: http://mirror.fuel-infra.org/mos/ubuntu/ - - name: mos-updates - priority: 1050 - section: main restricted - suite: mos6.1-updates - type: deb - uri: http://mirror.fuel-infra.org/mos/ubuntu/ - - name: mos-security - priority: 1050 - section: main restricted - suite: mos6.1-security - type: deb - uri: http://mirror.fuel-infra.org/mos/ubuntu/ - - name: mos-holdback - priority: 1100 - section: main restricted - suite: mos6.1-holdback - type: deb - uri: http://mirror.fuel-infra.org/mos/ubuntu/ -resume_guests_state_on_host_boot: true -role: cinder -sahara: - db_password: f0jl4v47 - enabled: true - user_password: pJc2zAOx -status: discover -storage: - ephemeral_ceph: false - images_ceph: false - images_vcenter: false - iser: false - metadata: - label: Storage - weight: 60 - objects_ceph: false - osd_pool_size: '2' - pg_num: 128 - volumes_ceph: false - volumes_lvm: true -storage_network_range: 192.168.1.0/24 -swift: - user_password: BP92J6tg -syslog: - metadata: - label: Syslog - weight: 50 - syslog_port: '514' - syslog_server: '' - syslog_transport: tcp -test_vm_image: - container_format: bare - disk_format: qcow2 - glance_properties: '' - img_name: TestVM - img_path: /usr/share/cirros-testvm/cirros-x86_64-disk.img - min_ram: 64 - os_name: cirros - public: 'true' -uid: '118' -use_cinder: true -use_cow_images: true -use_vcenter: false -user_node_name: Untitled (1d:4b) -workloads_collector: - enabled: true - metadata: - label: Workloads Collector User - restrictions: - - action: hide - condition: 'true' - weight: 10 - password: 1r3ROjcQ - tenant: services - username: workloads_collector diff --git a/f2s/resources/role_data/meta.yaml b/f2s/resources/role_data/meta.yaml deleted file mode 100644 index 6268f2c5..00000000 --- a/f2s/resources/role_data/meta.yaml +++ /dev/null @@ -1,359 +0,0 @@ -# data container resource that will fetch data from nailgun -id: role_data -handler: none -version: 0.0.1 -managers: - - managers/from_nailgun.py - - managers/globals.py -input: - puppet_modules: - type: str! - value: /etc/puppet/modules - uid: - type: str! - value: - env: - type: str! - value: - tasks: - value: - vms_conf: - value: - horizon: - value: - ironic: - value: - access: - value: null - access_hash: - value: null - amqp_hosts: - value: null - amqp_port: - value: null - apache_ports: - value: null - auth_key: - value: null - auto_assign_floating_ip: - value: null - base_mac: - value: null - base_syslog: - value: null - base_syslog_hash: - value: null - ceilometer: - value: null - ceilometer_hash: - value: null - ceilometer_nodes: - value: null - ceph_monitor_nodes: - value: null - ceph_primary_monitor_node: - value: null - ceph_rgw_nodes: - value: null - cinder: - value: null - cinder_hash: - value: null - cinder_nodes: - value: null - cinder_rate_limits: - value: null - cobbler: - value: null - corosync: - value: null - corosync_roles: - value: null - custom_mysql_setup_class: - value: null - database_nodes: - value: null - database_vip: - value: null - debug: - value: null - default_gateway: - value: null - deployment_id: - value: null - deployment_mode: - value: null - dns_nameservers: - value: null - external_dns: - value: null - external_mongo: - value: null - external_ntp: - value: null - fail_if_error: - value: null - fqdn: - value: null - fuel_version: - value: null - glance: - value: null - glance_backend: - value: null - glance_hash: - value: null - glance_known_stores: - value: null - heat: - value: null - heat_hash: - value: null - heat_roles: - value: null - horizon_nodes: - value: null - idle_timeout: - value: null - ironic_api_nodes: - value: null - is_primary_swift_proxy: - value: null - kernel_params: - value: null - keystone: - value: null - keystone_hash: - value: null - last_controller: - value: null - libvirt_type: - value: null - manage_volumes: - value: null - management_network_range: - value: null - management_vip: - value: null - management_vrouter_vip: - value: null - master_ip: - value: null - max_overflow: - value: null - max_pool_size: - value: null - max_retries: - value: null - memcache_roles: - value: null - metadata: - value: null - mirror_type: - value: null - mongo: - value: null - mongo_roles: - value: null - mountpoints: - value: null - mp: - value: null - multi_host: - value: null - murano: - value: null - murano_hash: - value: null - murano_roles: - value: null - murano_settings: - value: null - mysql: - value: null - mysql_hash: - value: null - network_config: - value: null - network_manager: - value: null - network_metadata: - value: null - network_scheme: - value: null - network_size: - value: null - neutron_advanced_configuration: - value: null - neutron_config: - value: null - neutron_db_password: - value: null - neutron_mellanox: - value: null - neutron_metadata_proxy_secret: - value: null - neutron_nodes: - value: null - neutron_user_password: - value: null - node: - value: null - node_name: - value: null - node_role: - value: null - nodes: - value: null - nodes_hash: - value: null - nova: - value: null - nova_api_nodes: - value: null - nova_db_password: - value: null - nova_hash: - value: null - nova_quota: - value: null - nova_rate_limits: - value: null - nova_report_interval: - value: null - nova_service_down_time: - value: null - novanetwork_params: - value: null - num_networks: - value: null - online: - value: null - openstack_version: - value: null - openstack_version_prev: - value: null - primary_controller: - value: null - priority: - value: null - private_int: - value: null - provision: - value: null - public_network_assignment: - value: null - public_ssl: - value: null - public_vip: - value: null - public_vrouter_vip: - value: null - puppet: - value: null - puppet_debug: - value: null - puppet_modules: - value: null - quantum: - value: null - quantum_settings: - value: null - queue_provider: - value: null - rabbit: - value: null - rabbit_ha_queues: - value: null - rabbit_hash: - value: null - repo_setup: - value: null - resume_guests_state_on_host_boot: - value: null - role: - value: null - roles: - value: null - sahara: - value: null - sahara_hash: - value: null - sahara_roles: - value: null - service_endpoint: - value: null - sql_connection: - value: null - status: - value: null - storage: - value: null - storage_hash: - value: null - storage_network_range: - value: null - swift: - value: null - swift_hash: - value: null - swift_master_role: - value: null - swift_nodes: - value: null - swift_proxies: - value: null - swift_proxy_caches: - value: null - syslog: - value: null - syslog_hash: - value: null - syslog_log_facility_ceilometer: - value: null - syslog_log_facility_ceph: - value: null - syslog_log_facility_cinder: - value: null - syslog_log_facility_glance: - value: null - syslog_log_facility_heat: - value: null - syslog_log_facility_keystone: - value: null - syslog_log_facility_murano: - value: null - syslog_log_facility_neutron: - value: null - syslog_log_facility_nova: - value: null - syslog_log_facility_sahara: - value: null - test_vm_image: - value: null - use_ceilometer: - value: null - use_cinder: - value: null - use_cow_images: - value: null - use_monit: - value: null - use_neutron: - value: null - use_ovs: - value: null - use_syslog: - value: null - use_vcenter: - value: null - user_node_name: - value: null - vcenter_hash: - value: null - verbose: - value: null - vlan_start: - value: null - workloads_collector: - value: null diff --git a/f2s/resources/sahara-db/actions/run.pp b/f2s/resources/sahara-db/actions/run.pp deleted file mode 100644 index 68501ffc..00000000 --- a/f2s/resources/sahara-db/actions/run.pp +++ /dev/null @@ -1,57 +0,0 @@ -notice('MODULAR: sahara/db.pp') - -$node_name = hiera('node_name') -$sahara_hash = hiera_hash('sahara_hash', {}) -$sahara_enabled = pick($sahara_hash['enabled'], false) -$mysql_hash = hiera_hash('mysql_hash', {}) -$management_vip = hiera('management_vip', undef) -$database_vip = hiera('database_vip', undef) - -$mysql_root_user = pick($mysql_hash['root_user'], 'root') -$mysql_db_create = pick($mysql_hash['db_create'], true) -$mysql_root_password = $mysql_hash['root_password'] - -$db_user = pick($sahara_hash['db_user'], 'sahara') -$db_name = pick($sahara_hash['db_name'], 'sahara') -$db_password = pick($sahara_hash['db_password'], $mysql_root_password) - -$db_host = pick($sahara_hash['db_host'], $database_vip) -$db_create = pick($sahara_hash['db_create'], $mysql_db_create) -$db_root_user = pick($sahara_hash['root_user'], $mysql_root_user) -$db_root_password = pick($sahara_hash['root_password'], $mysql_root_password) - -$allowed_hosts = [ $node_name, 'localhost', '127.0.0.1', '%' ] - -validate_string($mysql_root_user) - -if $sahara_enabled and $db_create { - - class { 'galera::client': - custom_setup_class => hiera('mysql_custom_setup_class', 'galera'), - } - - class { 'sahara::db::mysql': - user => $db_user, - password => $db_password, - dbname => $db_name, - allowed_hosts => $allowed_hosts, - } - - class { 'osnailyfacter::mysql_access': - db_host => $db_host, - db_user => $db_root_user, - db_password => $db_root_password, - } - - Class['galera::client'] -> - Class['osnailyfacter::mysql_access'] -> - Class['sahara::db::mysql'] - -} - -class mysql::config {} -include mysql::config -class mysql::server {} -include mysql::server -class sahara::api {} -include sahara::api diff --git a/f2s/resources/sahara-db/meta.yaml b/f2s/resources/sahara-db/meta.yaml deleted file mode 100644 index 4f4542b0..00000000 --- a/f2s/resources/sahara-db/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -id: sahara-db -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - database_vip: - value: null - fqdn: - value: null - management_vip: - value: null - mysql_hash: - value: null - node_name: - value: null - puppet_modules: - value: null - role: - value: null - sahara: - value: null - sahara_hash: - value: null diff --git a/f2s/resources/sahara-keystone/actions/run.pp b/f2s/resources/sahara-keystone/actions/run.pp deleted file mode 100644 index 546018a0..00000000 --- a/f2s/resources/sahara-keystone/actions/run.pp +++ /dev/null @@ -1,34 +0,0 @@ -notice('MODULAR: sahara/keystone.pp') - -$sahara_hash = hiera_hash('sahara_hash', {}) -$public_ssl_hash = hiera('public_ssl') -$public_vip = hiera('public_vip') -$admin_address = hiera('management_vip') -$api_bind_port = '8386' -$sahara_user = pick($sahara_hash['user'], 'sahara') -$sahara_password = pick($sahara_hash['user_password']) -$tenant = pick($sahara_hash['tenant'], 'services') -$region = pick($sahara_hash['region'], hiera('region', 'RegionOne')) -$service_name = pick($sahara_hash['service_name'], 'sahara') -$public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} -$public_url = "${public_protocol}://${public_address}:${api_bind_port}/v1.1/%(tenant_id)s" -$admin_url = "http://${admin_address}:${api_bind_port}/v1.1/%(tenant_id)s" - -class { 'sahara::keystone::auth': - auth_name => $sahara_user, - password => $sahara_password, - service_type => 'data_processing', - service_name => $service_name, - region => $region, - tenant => $tenant, - public_url => $public_url, - admin_url => $admin_url, - internal_url => $admin_url, -} diff --git a/f2s/resources/sahara-keystone/meta.yaml b/f2s/resources/sahara-keystone/meta.yaml deleted file mode 100644 index 75eff871..00000000 --- a/f2s/resources/sahara-keystone/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: sahara-keystone -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vip: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null - sahara_hash: - value: null diff --git a/f2s/resources/sahara/actions/run.pp b/f2s/resources/sahara/actions/run.pp deleted file mode 100644 index 82b1c42b..00000000 --- a/f2s/resources/sahara/actions/run.pp +++ /dev/null @@ -1,156 +0,0 @@ -notice('MODULAR: sahara.pp') - -prepare_network_config(hiera('network_scheme', {})) - -$access_admin = hiera_hash('access_hash', {}) -$sahara_hash = hiera_hash('sahara_hash', {}) -$rabbit_hash = hiera_hash('rabbit_hash', {}) -$public_ssl_hash = hiera('public_ssl') -$ceilometer_hash = hiera_hash('ceilometer_hash', {}) -$primary_controller = hiera('primary_controller') -$public_vip = hiera('public_vip') -$database_vip = hiera('database_vip', undef) -$management_vip = hiera('management_vip') -$use_neutron = hiera('use_neutron', false) -$service_endpoint = hiera('service_endpoint') -$syslog_log_facility_sahara = hiera('syslog_log_facility_sahara') -$debug = pick($sahara_hash['debug'], hiera('debug', false)) -$verbose = pick($sahara_hash['verbose'], hiera('verbose', true)) -$use_syslog = hiera('use_syslog', true) -$use_stderr = hiera('use_stderr', false) -$rabbit_ha_queues = hiera('rabbit_ha_queues') -$amqp_port = hiera('amqp_port') -$amqp_hosts = hiera('amqp_hosts') - -################################################################# - -if $sahara_hash['enabled'] { - $firewall_rule = '201 sahara-api' - $api_bind_port = '8386' - $api_bind_host = get_network_role_property('sahara/api', 'ipaddr') - $public_address = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, - } - $public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', - } - $sahara_user = pick($sahara_hash['user'], 'sahara') - $sahara_password = pick($sahara_hash['user_password']) - $tenant = pick($sahara_hash['tenant'], 'services') - $db_user = pick($sahara_hash['db_user'], 'sahara') - $db_name = pick($sahara_hash['db_name'], 'sahara') - $db_password = pick($sahara_hash['db_password']) - $db_host = pick($sahara_hash['db_host'], $database_vip) - $max_pool_size = min($::processorcount * 5 + 0, 30 + 0) - $max_overflow = min($::processorcount * 5 + 0, 60 + 0) - $max_retries = '-1' - $idle_timeout = '3600' - $read_timeout = '60' - $sql_connection = "mysql://${db_user}:${db_password}@${db_host}/${db_name}?read_timeout=${read_timeout}" - - ####### Disable upstart startup on install ####### - tweaks::ubuntu_service_override { 'sahara-api': - package_name => 'sahara', - } - - firewall { $firewall_rule : - dport => $api_bind_port, - proto => 'tcp', - action => 'accept', - } - - class { 'sahara' : - host => $api_bind_host, - port => $api_bind_port, - verbose => $verbose, - debug => $debug, - use_syslog => $use_syslog, - use_stderr => $use_stderr, - plugins => [ 'ambari', 'cdh', 'mapr', 'spark', 'vanilla' ], - log_facility => $syslog_log_facility_sahara, - database_connection => $sql_connection, - database_max_pool_size => $max_pool_size, - database_max_overflow => $max_overflow, - database_max_retries => $max_retries, - database_idle_timeout => $idle_timeout, - auth_uri => "http://${service_endpoint}:5000/v2.0/", - identity_uri => "http://${service_endpoint}:35357/", - rpc_backend => 'rabbit', - use_neutron => $use_neutron, - admin_user => $sahara_user, - admin_password => $sahara_password, - admin_tenant_name => $tenant, - rabbit_userid => $rabbit_hash['user'], - rabbit_password => $rabbit_hash['password'], - rabbit_ha_queues => $rabbit_ha_queues, - rabbit_port => $amqp_port, - rabbit_hosts => split($amqp_hosts, ',') - } - - if $public_ssl_hash['services'] { - file { '/etc/pki/tls/certs': - mode => 755, - } - - file { '/etc/pki/tls/certs/public_haproxy.pem': - mode => 644, - } - - sahara_config { - 'object_store_access/public_identity_ca_file': value => '/etc/pki/tls/certs/public_haproxy.pem'; - 'object_store_access/public_object_store_ca_file': value => '/etc/pki/tls/certs/public_haproxy.pem'; - } - } - - class { 'sahara::service::api': } - - class { 'sahara::service::engine': } - - class { 'sahara::client': } - - if $ceilometer_hash['enabled'] { - class { '::sahara::notify': - enable_notifications => true, - } - } - - $haproxy_stats_url = "http://${management_vip}:10000/;csv" - - haproxy_backend_status { 'sahara' : - name => 'sahara', - url => $haproxy_stats_url, - } - - if $primary_controller { - haproxy_backend_status { 'keystone-public' : - name => 'keystone-1', - url => $haproxy_stats_url, - } - - haproxy_backend_status { 'keystone-admin' : - name => 'keystone-2', - url => $haproxy_stats_url, - } - - class { 'sahara_templates::create_templates' : - use_neutron => $use_neutron, - auth_user => $access_admin['user'], - auth_password => $access_admin['password'], - auth_tenant => $access_admin['tenant'], - auth_uri => "${public_protocol}://${public_address}:5000/v2.0/", - } - - Haproxy_backend_status['keystone-admin'] -> Haproxy_backend_status['sahara'] - Haproxy_backend_status['keystone-public'] -> Haproxy_backend_status['sahara'] - Haproxy_backend_status['sahara'] -> Class['sahara_templates::create_templates'] - } - - Firewall[$firewall_rule] -> Class['sahara::service::api'] - Service['sahara-api'] -> Haproxy_backend_status['sahara'] -} -######################### - -class openstack::firewall {} -include openstack::firewall diff --git a/f2s/resources/sahara/meta.yaml b/f2s/resources/sahara/meta.yaml deleted file mode 100644 index 953d8c83..00000000 --- a/f2s/resources/sahara/meta.yaml +++ /dev/null @@ -1,55 +0,0 @@ -id: sahara -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - access_hash: - value: null - amqp_hosts: - value: null - amqp_port: - value: null - ceilometer_hash: - value: null - database_vip: - value: null - debug: - value: null - fqdn: - value: null - management_vip: - value: null - network_scheme: - value: null - primary_controller: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - rabbit_ha_queues: - value: null - rabbit_hash: - value: null - role: - value: null - sahara: - value: null - sahara_hash: - value: null - service_endpoint: - value: null - syslog_log_facility_sahara: - value: null - use_neutron: - value: null - use_stderr: - value: null - use_syslog: - value: null - verbose: - value: null diff --git a/f2s/resources/ssl-add-trust-chain/actions/run.pp b/f2s/resources/ssl-add-trust-chain/actions/run.pp deleted file mode 100644 index 231088d0..00000000 --- a/f2s/resources/ssl-add-trust-chain/actions/run.pp +++ /dev/null @@ -1,42 +0,0 @@ -notice('MODULAR: ssl_add_trust_chain.pp') - -$public_ssl_hash = hiera('public_ssl') -$ip = hiera('public_vip') - -case $::osfamily { - /(?i)redhat/: { - file { '/etc/pki/ca-trust/source/anchors/public_haproxy.pem': - ensure => 'link', - target => '/etc/pki/tls/certs/public_haproxy.pem', - }-> - - exec { 'enable_trust': - path => '/bin:/usr/bin:/sbin:/usr/sbin', - command => 'update-ca-trust force-enable', - }-> - - exec { 'add_trust': - path => '/bin:/usr/bin:/sbin:/usr/sbin', - command => 'update-ca-trust extract', - } - } - /(?i)debian/: { - file { '/usr/local/share/ca-certificates/public_haproxy.crt': - ensure => 'link', - target => '/etc/pki/tls/certs/public_haproxy.pem', - }-> - - exec { 'add_trust': - path => '/bin:/usr/bin:/sbin:/usr/sbin', - command => 'update-ca-certificates', - } - } - default: { - fail("Unsupported OS: ${::osfamily}/${::operatingsystem}") - } -} - -host { $public_ssl_hash['hostname']: - ensure => present, - ip => $ip, -} diff --git a/f2s/resources/ssl-add-trust-chain/meta.yaml b/f2s/resources/ssl-add-trust-chain/meta.yaml deleted file mode 100644 index 5f5e0374..00000000 --- a/f2s/resources/ssl-add-trust-chain/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -id: ssl-add-trust-chain -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/ssl-keys-saving/actions/run.pp b/f2s/resources/ssl-keys-saving/actions/run.pp deleted file mode 100644 index 862b1f21..00000000 --- a/f2s/resources/ssl-keys-saving/actions/run.pp +++ /dev/null @@ -1,22 +0,0 @@ -notice('MODULAR: ssl_keys_saving.pp') - -$public_ssl_hash = hiera_hash('public_ssl') -$pub_certificate_content = $public_ssl_hash['cert_data']['content'] -$base_path = "/etc/pki/tls/certs" -$pki_path = [ "/etc/pki", "/etc/pki/tls" ] -$astute_base_path = "/var/lib/astute/haproxy" - -File { - owner => 'root', - group => 'root', - mode => '0644', -} - -file { [ $pki_path, $base_path, $astute_base_path ]: - ensure => directory, -} - -file { ["$base_path/public_haproxy.pem", "$astute_base_path/public_haproxy.pem"]: - ensure => present, - content => $pub_certificate_content, -} diff --git a/f2s/resources/ssl-keys-saving/meta.yaml b/f2s/resources/ssl-keys-saving/meta.yaml deleted file mode 100644 index 90bc0ebd..00000000 --- a/f2s/resources/ssl-keys-saving/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -id: ssl-keys-saving -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - public_ssl: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/swift-keystone/actions/run.pp b/f2s/resources/swift-keystone/actions/run.pp deleted file mode 100644 index 325a83b1..00000000 --- a/f2s/resources/swift-keystone/actions/run.pp +++ /dev/null @@ -1,51 +0,0 @@ -notice('MODULAR: swift/keystone.pp') - -$swift_hash = hiera_hash('swift', {}) -$public_vip = hiera('public_vip') -# Allow a plugin to override the admin address using swift_hash: -$admin_address = pick($swift_hash['management_vip'], hiera('management_vip')) -$region = pick($swift_hash['region'], hiera('region', 'RegionOne')) -$public_ssl_hash = hiera('public_ssl') -$public_address = $public_ssl_hash['services'] ? { - # Allow a plugin to override the public address using swift_hash: - # TODO(sbog): with this approach you must use IP address in SAN field of - # certificate on external swift. Change this in next iterations of TLS - # implementation. - true => pick($swift_hash['public_vip'], - $public_ssl_hash['hostname']), - default => $public_vip, -} -$public_protocol = $public_ssl_hash['services'] ? { - true => 'https', - default => 'http', -} - -$password = $swift_hash['user_password'] -$auth_name = pick($swift_hash['auth_name'], 'swift') -$configure_endpoint = pick($swift_hash['configure_endpoint'], true) -$service_name = pick($swift_hash['service_name'], 'swift') -$tenant = pick($swift_hash['tenant'], 'services') - -validate_string($public_address) -validate_string($password) - -$public_url = "${public_protocol}://${public_address}:8080/v1/AUTH_%(tenant_id)s" -$admin_url = "http://${admin_address}:8080/v1/AUTH_%(tenant_id)s" - -# Amazon S3 endpoints -$public_url_s3 = "${public_protocol}://${public_address}:8080" -$admin_url_s3 = "http://${admin_address}:8080" - -class { '::swift::keystone::auth': - password => $password, - auth_name => $auth_name, - configure_endpoint => $configure_endpoint, - service_name => $service_name, - public_url => $public_url, - internal_url => $admin_url, - admin_url => $admin_url, - public_url_s3 => $public_url_s3, - internal_url_s3 => $admin_url_s3, - admin_url_s3 => $admin_url_s3, - region => $region, -} diff --git a/f2s/resources/swift-keystone/meta.yaml b/f2s/resources/swift-keystone/meta.yaml deleted file mode 100644 index ba5bbc63..00000000 --- a/f2s/resources/swift-keystone/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: swift-keystone -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - management_vip: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null - swift: - value: null diff --git a/f2s/resources/swift-rebalance-cron/actions/run.pp b/f2s/resources/swift-rebalance-cron/actions/run.pp deleted file mode 100644 index 272e8e34..00000000 --- a/f2s/resources/swift-rebalance-cron/actions/run.pp +++ /dev/null @@ -1,24 +0,0 @@ -notice('MODULAR: swift/rebalance_cronjob.pp') - -$network_metadata = hiera_hash('network_metadata') -# $network_scheme = hiera_hash('network_scheme') -# prepare_network_config($network_scheme) - -$storage_hash = hiera('storage_hash') -$swift_master_role = hiera('swift_master_role', 'primary-controller') -$ring_min_part_hours = hiera('swift_ring_min_part_hours', 1) - -# Use Swift if it isn't replaced by vCenter, Ceph for BOTH images and objects -if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { - $master_swift_replication_nodes = get_nodes_hash_by_roles($network_metadata, [$swift_master_role]) - $master_swift_replication_nodes_list = values($master_swift_replication_nodes) - $master_swift_replication_ip = $master_swift_replication_nodes_list[0]['network_roles']['swift/replication'] - - - # setup a cronjob to rebalance and repush rings periodically - class { 'openstack::swift::rebalance_cronjob': - ring_rebalance_period => min($ring_min_part_hours * 2, 23), - master_swift_replication_ip => $master_swift_replication_ip, - primary_proxy => hiera('is_primary_swift_proxy'), - } -} diff --git a/f2s/resources/swift-rebalance-cron/meta.yaml b/f2s/resources/swift-rebalance-cron/meta.yaml deleted file mode 100644 index d2a04544..00000000 --- a/f2s/resources/swift-rebalance-cron/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: swift-rebalance-cron -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - is_primary_swift_proxy: - value: null - network_metadata: - value: null - puppet_modules: - value: null - role: - value: null - storage_hash: - value: null - swift_master_role: - value: null - swift_ring_min_part_hours: - value: null diff --git a/f2s/resources/swift/actions/run.pp b/f2s/resources/swift/actions/run.pp deleted file mode 100644 index aec24337..00000000 --- a/f2s/resources/swift/actions/run.pp +++ /dev/null @@ -1,147 +0,0 @@ -notice('MODULAR: swift.pp') - -$network_scheme = hiera_hash('network_scheme') -$network_metadata = hiera_hash('network_metadata') -prepare_network_config($network_scheme) - -$swift_hash = hiera_hash('swift_hash') -$swift_master_role = hiera('swift_master_role', 'primary-controller') -$swift_nodes = hiera_hash('swift_nodes', {}) -$swift_operator_roles = pick($swift_hash['swift_operator_roles'], ['admin', 'SwiftOperator']) -$swift_proxies_addr_list = values(get_node_to_ipaddr_map_by_network_role(hiera_hash('swift_proxies', {}), 'swift/api')) -# todo(sv) replace 'management' to mgmt/memcache -$memcaches_addr_list = values(get_node_to_ipaddr_map_by_network_role(hiera_hash('swift_proxy_caches', {}), 'management')) -$is_primary_swift_proxy = hiera('is_primary_swift_proxy', false) -$proxy_port = hiera('proxy_port', '8080') -$storage_hash = hiera_hash('storage_hash') -$mp_hash = hiera('mp') -$management_vip = hiera('management_vip') -$public_vip = hiera('public_vip') -$swift_api_ipaddr = get_network_role_property('swift/api', 'ipaddr') -$swift_storage_ipaddr = get_network_role_property('swift/replication', 'ipaddr') -$debug = pick($swift_hash['debug'], hiera('debug', false)) -$verbose = pick($swift_hash['verbose'], hiera('verbose', false)) -# NOTE(mattymo): Changing ring_part_power or part_hours on redeploy leads to data loss -$ring_part_power = pick($swift_hash['ring_part_power'], 10) -$ring_min_part_hours = hiera('swift_ring_min_part_hours', 1) -$deploy_swift_storage = hiera('deploy_swift_storage', true) -$deploy_swift_proxy = hiera('deploy_swift_proxy', true) -$create_keystone_auth = pick($swift_hash['create_keystone_auth'], true) -#Keystone settings -$service_endpoint = hiera('service_endpoint') -$keystone_user = pick($swift_hash['user'], 'swift') -$keystone_password = pick($swift_hash['user_password'], 'passsword') -$keystone_tenant = pick($swift_hash['tenant'], 'services') -$keystone_protocol = pick($swift_hash['auth_protocol'], 'http') -$region = hiera('region', 'RegionOne') -$service_workers = pick($swift_hash['workers'], - min(max($::processorcount, 2), 16)) - -# Use Swift if it isn't replaced by vCenter, Ceph for BOTH images and objects -if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { - $master_swift_proxy_nodes = get_nodes_hash_by_roles($network_metadata, [$swift_master_role]) - $master_swift_proxy_nodes_list = values($master_swift_proxy_nodes) - $master_swift_proxy_ip = regsubst($master_swift_proxy_nodes_list[0]['network_roles']['swift/api'], '\/\d+$', '') - $master_swift_replication_ip = regsubst($master_swift_proxy_nodes_list[0]['network_roles']['swift/replication'], '\/\d+$', '') - $swift_partition = hiera('swift_partition', '/var/lib/glance/node') - - if ($deploy_swift_storage){ - if !defined(File['/var/lib/glance']) { - file {'/var/lib/glance': - ensure => 'directory', - group => 'swift', - require => Package['swift'], - } -> Service <| tag == 'swift-service' |> - } else { - File['/var/lib/glance'] { - ensure => 'directory', - group => 'swift', - require +> Package['swift'], - } - File['/var/lib/glance'] -> Service <| tag == 'swift-service' |> - } - - class { 'openstack::swift::storage_node': - storage_type => false, - loopback_size => '5243780', - storage_mnt_base_dir => $swift_partition, - storage_devices => filter_hash($mp_hash,'point'), - swift_zone => $master_swift_proxy_nodes_list[0]['swift_zone'], - swift_local_net_ip => $swift_storage_ipaddr, - master_swift_proxy_ip => $master_swift_proxy_ip, - master_swift_replication_ip => $master_swift_replication_ip, - sync_rings => ! $is_primary_swift_proxy, - debug => $debug, - verbose => $verbose, - log_facility => 'LOG_SYSLOG', - } - } - - if $is_primary_swift_proxy { - ring_devices {'all': - storages => $swift_nodes, - require => Class['swift'], - } - } - - if $deploy_swift_proxy { - $sto_nets = get_routable_networks_for_network_role($network_scheme, 'swift/replication', ' ') - $man_nets = get_routable_networks_for_network_role($network_scheme, 'swift/api', ' ') - - class { 'openstack::swift::proxy': - swift_user_password => $swift_hash['user_password'], - swift_operator_roles => $swift_operator_roles, - swift_proxies_cache => $memcaches_addr_list, - ring_part_power => $ring_part_power, - primary_proxy => $is_primary_swift_proxy, - swift_proxy_local_ipaddr => $swift_api_ipaddr, - swift_replication_local_ipaddr => $swift_storage_ipaddr, - master_swift_proxy_ip => $master_swift_proxy_ip, - master_swift_replication_ip => $master_swift_replication_ip, - proxy_port => $proxy_port, - proxy_workers => $service_workers, - debug => $debug, - verbose => $verbose, - log_facility => 'LOG_SYSLOG', - ceilometer => hiera('use_ceilometer',false), - ring_min_part_hours => $ring_min_part_hours, - admin_user => $keystone_user, - admin_tenant_name => $keystone_tenant, - admin_password => $keystone_password, - auth_host => $service_endpoint, - auth_protocol => $keystone_protocol, - } -> - class { 'openstack::swift::status': - endpoint => "http://${swift_api_ipaddr}:${proxy_port}", - vip => $management_vip, - only_from => "127.0.0.1 240.0.0.2 ${sto_nets} ${man_nets}", - con_timeout => 5 - } -> - class { 'swift::dispersion': - auth_url => "http://$service_endpoint:5000/v2.0/", - auth_user => $keystone_user, - auth_tenant => $keystone_tenant, - auth_pass => $keystone_password, - auth_version => '2.0', - } - - Service<| tag == 'swift-service' |> -> Class['swift::dispersion'] - - if defined(Class['openstack::swift::storage_node']) { - Class['openstack::swift::storage_node'] -> Class['swift::dispersion'] - } - } -} - -# 'ceilometer' class is being declared inside openstack::ceilometer class -# which is declared inside openstack::controller class in the other task. -# So we need a stub here for dependency from swift::proxy::ceilometer -class ceilometer {} -include ceilometer - -# Class[Swift::Proxy::Cache] requires Class[Memcached] if memcache_servers -# contains 127.0.0.1. But we're deploying memcached in another task. So we -# need to add this stub here. -class memcached {} -include memcached - diff --git a/f2s/resources/swift/meta.yaml b/f2s/resources/swift/meta.yaml deleted file mode 100644 index 73e32aaa..00000000 --- a/f2s/resources/swift/meta.yaml +++ /dev/null @@ -1,63 +0,0 @@ -id: swift -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - debug: - value: null - deploy_swift_proxy: - value: null - deploy_swift_storage: - value: null - fqdn: - value: null - is_primary_swift_proxy: - value: null - management_vip: - value: null - mp: - value: null - network_metadata: - value: null - network_scheme: - value: null - nodes: - value: null - proxy_port: - value: null - public_vip: - value: null - puppet_modules: - value: null - region: - value: null - role: - value: null - service_endpoint: - value: null - storage: - value: null - storage_hash: - value: null - swift: - value: null - swift_hash: - value: null - swift_master_role: - value: null - swift_nodes: - value: null - swift_partition: - value: null - swift_proxies: - value: null - swift_proxy_caches: - value: null - swift_ring_min_part_hours: - value: null - use_ceilometer: - value: null - verbose: - value: null diff --git a/f2s/resources/tools/actions/run.pp b/f2s/resources/tools/actions/run.pp deleted file mode 100644 index f2b1ef72..00000000 --- a/f2s/resources/tools/actions/run.pp +++ /dev/null @@ -1,42 +0,0 @@ -notice('MODULAR: tools.pp') - -class { 'osnailyfacter::atop': } -class { 'osnailyfacter::ssh': } - -if $::virtual != 'physical' { - class { 'osnailyfacter::acpid': } -} - -$tools = [ - 'screen', - 'tmux', - 'man', - 'htop', - 'tcpdump', - 'strace', - 'fuel-misc' -] - -package { $tools : - ensure => 'present', -} - -package { 'cloud-init': - ensure => 'absent', -} - -if $::osfamily == 'Debian' { - apt::conf { 'notranslations': - ensure => 'present', - content => 'Acquire::Languages "none";', - notify_update => false, - } -} - -$puppet = hiera('puppet') -class { 'osnailyfacter::puppet_pull': - modules_source => $puppet['modules'], - manifests_source => $puppet['manifests'], -} - -$deployment_mode = hiera('deployment_mode') diff --git a/f2s/resources/tools/meta.yaml b/f2s/resources/tools/meta.yaml deleted file mode 100644 index 01ff292c..00000000 --- a/f2s/resources/tools/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -id: tools -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - deployment_mode: - value: null - fqdn: - value: null - puppet: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/top-role-ceph-osd/actions/run.pp b/f2s/resources/top-role-ceph-osd/actions/run.pp deleted file mode 100644 index 93bb6ee4..00000000 --- a/f2s/resources/top-role-ceph-osd/actions/run.pp +++ /dev/null @@ -1,57 +0,0 @@ -notice('MODULAR: ceph-osd.pp') - -# Pulling hiera -$storage_hash = hiera('storage', {}) -$public_vip = hiera('public_vip') -$management_vip = hiera('management_vip') -$use_neutron = hiera('use_neutron', false) -$mp_hash = hiera('mp') -$verbose = pick($storage_hash['verbose'], true) -$debug = pick($storage_hash['debug'], hiera('debug', true)) -$use_monit = false -$auto_assign_floating_ip = hiera('auto_assign_floating_ip', false) -$keystone_hash = hiera('keystone', {}) -$access_hash = hiera('access', {}) -$network_scheme = hiera_hash('network_scheme') -$neutron_mellanox = hiera('neutron_mellanox', false) -$syslog_hash = hiera('syslog', {}) -$use_syslog = hiera('use_syslog', true) -$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public') -$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node') -$primary_mons = keys($ceph_primary_monitor_node) -$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name'] -prepare_network_config($network_scheme) -$ceph_cluster_network = get_network_role_property('ceph/replication', 'network') -$ceph_public_network = get_network_role_property('ceph/public', 'network') - -class {'ceph': - primary_mon => $primary_mon, - mon_hosts => keys($mon_address_map), - mon_ip_addresses => values($mon_address_map), - cluster_node_address => $public_vip, - osd_pool_default_size => $storage_hash['osd_pool_size'], - osd_pool_default_pg_num => $storage_hash['pg_num'], - osd_pool_default_pgp_num => $storage_hash['pg_num'], - use_rgw => $storage_hash['objects_ceph'], - glance_backend => $glance_backend, - rgw_pub_ip => $public_vip, - rgw_adm_ip => $management_vip, - rgw_int_ip => $management_vip, - cluster_network => $ceph_cluster_network, - public_network => $ceph_public_network, - use_syslog => $use_syslog, - syslog_log_level => hiera('syslog_log_level_ceph', 'info'), - syslog_log_facility => hiera('syslog_log_facility_ceph','LOG_LOCAL0'), - rgw_keystone_admin_token => $keystone_hash['admin_token'], - ephemeral_ceph => $storage_hash['ephemeral_ceph'], -} - -$osd_devices = split($::osd_devices_list, ' ') -#Class Ceph is already defined so it will do it's thing. -notify {"ceph_osd: ${osd_devices}": } -notify {"osd_devices: ${::osd_devices_list}": } -# TODO(bogdando) add monit ceph-osd services monitoring, if required - -################################################################# - -# vim: set ts=2 sw=2 et : diff --git a/f2s/resources/top-role-ceph-osd/meta.yaml b/f2s/resources/top-role-ceph-osd/meta.yaml deleted file mode 100644 index 492120ed..00000000 --- a/f2s/resources/top-role-ceph-osd/meta.yaml +++ /dev/null @@ -1,47 +0,0 @@ -id: top-role-ceph-osd -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - access: - value: null - auto_assign_floating_ip: - value: null - ceph_monitor_nodes: - value: null - ceph_primary_monitor_node: - value: null - debug: - value: null - fqdn: - value: null - keystone: - value: null - management_vip: - value: null - mp: - value: null - network_scheme: - value: null - neutron_mellanox: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null - storage: - value: null - syslog: - value: null - syslog_log_facility_ceph: - value: null - syslog_log_level_ceph: - value: null - use_neutron: - value: null - use_syslog: - value: null diff --git a/f2s/resources/top-role-cinder-vmware/actions/run.pp b/f2s/resources/top-role-cinder-vmware/actions/run.pp deleted file mode 100644 index 0f96eebe..00000000 --- a/f2s/resources/top-role-cinder-vmware/actions/run.pp +++ /dev/null @@ -1,11 +0,0 @@ -notice('MODULAR: cinder-vmware.pp') - -$nodes_hash = hiera('nodes', {}) -$roles = node_roles($nodes_hash, hiera('uid')) -$cinder_hash = hiera_hash('cinder_hash', {}) - -if (member($roles, 'cinder-vmware')) { - $debug = pick($cinder_hash['debug'], hiera('debug', true)) - $volumes = get_cinder_vmware_data($cinder_hash['instances'], $debug) - create_resources(vmware::cinder::vmdk, $volumes) -} diff --git a/f2s/resources/top-role-cinder-vmware/meta.yaml b/f2s/resources/top-role-cinder-vmware/meta.yaml deleted file mode 100644 index bbf04753..00000000 --- a/f2s/resources/top-role-cinder-vmware/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: top-role-cinder-vmware -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/top-role-cinder/actions/run.pp b/f2s/resources/top-role-cinder/actions/run.pp deleted file mode 100644 index 5cd24839..00000000 --- a/f2s/resources/top-role-cinder/actions/run.pp +++ /dev/null @@ -1,308 +0,0 @@ -notice('MODULAR: cinder.pp') - -# Pulling hiera -prepare_network_config(hiera('network_scheme', {})) -$cinder_hash = hiera_hash('cinder_hash', {}) -$storage_address = get_network_role_property('cinder/iscsi', 'ipaddr') -$public_vip = hiera('public_vip') -$management_vip = hiera('management_vip') -$primary_controller = hiera('primary_controller') -$use_neutron = hiera('use_neutron', false) -$mp_hash = hiera('mp') -$verbose = pick($cinder_hash['verbose'], true) -$debug = pick($cinder_hash['debug'], hiera('debug', true)) -$use_monit = false -$auto_assign_floating_ip = hiera('auto_assign_floating_ip', false) -$nodes_hash = hiera('nodes', {}) -$storage_hash = hiera_hash('storage_hash', {}) -$vcenter_hash = hiera('vcenter', {}) -$nova_hash = hiera_hash('nova_hash', {}) -$mysql_hash = hiera_hash('mysql_hash', {}) -$rabbit_hash = hiera_hash('rabbit_hash', {}) -$glance_hash = hiera_hash('glance_hash', {}) -$keystone_hash = hiera_hash('keystone_hash', {}) -$ceilometer_hash = hiera_hash('ceilometer_hash',{}) -$access_hash = hiera('access', {}) -$network_scheme = hiera_hash('network_scheme') -$neutron_mellanox = hiera('neutron_mellanox', false) -$syslog_hash = hiera('syslog', {}) -$base_syslog_hash = hiera('base_syslog', {}) -$use_stderr = hiera('use_stderr', false) -$use_syslog = hiera('use_syslog', true) -$syslog_log_facility_glance = hiera('syslog_log_facility_glance', 'LOG_LOCAL2') -$syslog_log_facility_cinder = hiera('syslog_log_facility_cinder', 'LOG_LOCAL3') -$syslog_log_facility_neutron = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4') -$syslog_log_facility_nova = hiera('syslog_log_facility_nova','LOG_LOCAL6') -$syslog_log_facility_keystone = hiera('syslog_log_facility_keystone', 'LOG_LOCAL7') -$syslog_log_facility_murano = hiera('syslog_log_facility_murano', 'LOG_LOCAL0') -$syslog_log_facility_sahara = hiera('syslog_log_facility_sahara','LOG_LOCAL0') -$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') - -$cinder_db_password = $cinder_hash[db_password] -$keystone_user = pick($cinder_hash['user'], 'cinder') -$keystone_tenant = pick($cinder_hash['tenant'], 'services') -$db_host = pick($cinder_hash['db_host'], hiera('database_vip')) -$cinder_db_user = pick($cinder_hash['db_user'], 'cinder') -$cinder_db_name = pick($cinder_hash['db_name'], 'cinder') - -$service_endpoint = hiera('service_endpoint') -$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") - -$keystone_auth_protocol = 'http' -$keystone_auth_host = $service_endpoint -$service_port = '5000' -$auth_uri = "${keystone_auth_protocol}://${keystone_auth_host}:${service_port}/" - -# TODO: openstack_version is confusing, there's such string var in hiera and hardcoded hash -$hiera_openstack_version = hiera('openstack_version') -$openstack_version = { - 'keystone' => 'installed', - 'glance' => 'installed', - 'horizon' => 'installed', - 'nova' => 'installed', - 'novncproxy' => 'installed', - 'cinder' => 'installed', -} - -$queue_provider = hiera('queue_provider', 'rabbitmq') -$custom_mysql_setup_class='galera' - -# Do the stuff -if $neutron_mellanox { - $mellanox_mode = $neutron_mellanox['plugin'] -} else { - $mellanox_mode = 'disabled' -} - -if (!empty(filter_nodes(hiera('nodes'), 'role', 'ceph-osd')) or - $storage_hash['volumes_ceph'] or - $storage_hash['images_ceph'] or - $storage_hash['objects_ceph'] -) { - $use_ceph = true -} else { - $use_ceph = false -} - -if $use_neutron { - $neutron_config = hiera('quantum_settings') -} else { - $neutron_config = {} -} - -if $primary_controller { - if ($mellanox_mode == 'ethernet') { - $test_vm_pkg = 'cirros-testvm-mellanox' - } else { - $test_vm_pkg = 'cirros-testvm' - } - package { 'cirros-testvm' : - ensure => 'installed', - name => $test_vm_pkg, - } -} - -if !$rabbit_hash['user'] { - $rabbit_hash['user'] = 'nova' -} - -if ! $use_neutron { - $floating_ips_range = hiera('floating_network_range') -} -$floating_hash = {} - -##CALCULATED PARAMETERS - - -##NO NEED TO CHANGE - -$node = filter_nodes($nodes_hash, 'name', $::hostname) -if empty($node) { - fail("Node $::hostname is not defined in the hash structure") -} - -$roles = node_roles($nodes_hash, hiera('uid')) -$mountpoints = filter_hash($mp_hash,'point') - -# SQLAlchemy backend configuration -$max_pool_size = min($::processorcount * 5 + 0, 30 + 0) -$max_overflow = min($::processorcount * 5 + 0, 60 + 0) -$max_retries = '-1' -$idle_timeout = '3600' - -# Determine who should get the volume service - -if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) { - $manage_volumes = 'iscsi' -} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) { - $manage_volumes = 'vmdk' -} elsif ($storage_hash['volumes_ceph']) { - $manage_volumes = 'ceph' -} else { - $manage_volumes = false -} - -#Determine who should be the default backend - -if ($storage_hash['images_ceph']) { - $glance_backend = 'ceph' - $glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ] -} elsif ($storage_hash['images_vcenter']) { - $glance_backend = 'vmware' - $glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ] -} else { - $glance_backend = 'swift' - $glance_known_stores = [ 'glance.store.swift.Store', 'glance.store.http.Store' ] -} - -# NOTE(bogdando) for controller nodes running Corosync with Pacemaker -# we delegate all of the monitor functions to RA instead of monit. -if member($roles, 'controller') or member($roles, 'primary-controller') { - $use_monit_real = false -} else { - $use_monit_real = $use_monit -} - -if $use_monit_real { - # Configure service names for monit watchdogs and 'service' system path - # FIXME(bogdando) replace service_path to systemd, once supported - include nova::params - include cinder::params - include neutron::params - $nova_compute_name = $::nova::params::compute_service_name - $nova_api_name = $::nova::params::api_service_name - $nova_network_name = $::nova::params::network_service_name - $cinder_volume_name = $::cinder::params::volume_service - $ovs_vswitchd_name = $::l23network::params::ovs_service_name - case $::osfamily { - 'RedHat' : { - $service_path = '/sbin/service' - } - 'Debian' : { - $service_path = '/usr/sbin/service' - } - default : { - fail("Unsupported osfamily: ${osfamily} for os ${operatingsystem}") - } - } -} - -#HARDCODED PARAMETERS - -$multi_host = true -$mirror_type = 'external' -Exec { logoutput => true } - - -################################################################# -# we need to evaluate ceph here, because ceph notifies/requires -# other services that are declared in openstack manifests -if ($use_ceph and !$storage_hash['volumes_lvm']) { - $primary_mons = $controllers - $primary_mon = $controllers[0]['name'] - - if ($use_neutron) { - prepare_network_config(hiera_hash('network_scheme')) - $ceph_cluster_network = get_network_role_property('ceph/replication', 'network') - $ceph_public_network = get_network_role_property('ceph/public', 'network') - } else { - $ceph_cluster_network = hiera('storage_network_range') - $ceph_public_network = hiera('management_network_range') - } - - class {'ceph': - primary_mon => $primary_mon, - mon_hosts => nodes_with_roles($nodes_hash, ['primary-controller', 'controller', 'ceph-mon'], 'name'), - mon_ip_addresses => nodes_with_roles($nodes_hash, ['primary-controller', 'controller', 'ceph-mon'], 'internal_address'), - cluster_node_address => $public_vip, - osd_pool_default_size => $storage_hash['osd_pool_size'], - osd_pool_default_pg_num => $storage_hash['pg_num'], - osd_pool_default_pgp_num => $storage_hash['pg_num'], - use_rgw => $storage_hash['objects_ceph'], - glance_backend => $glance_backend, - rgw_pub_ip => $public_vip, - rgw_adm_ip => $management_vip, - rgw_int_ip => $management_vip, - cluster_network => $ceph_cluster_network, - public_network => $ceph_public_network, - use_syslog => $use_syslog, - syslog_log_facility => $syslog_log_facility_ceph, - rgw_keystone_admin_token => $keystone_hash['admin_token'], - ephemeral_ceph => $storage_hash['ephemeral_ceph'] - } -} - -################################################################# - -include keystone::python -#FIXME(bogdando) notify services on python-amqp update, if needed -package { 'python-amqp': - ensure => present -} -if member($roles, 'controller') or member($roles, 'primary-controller') { - $bind_host = get_network_role_property('cinder/api', 'ipaddr') -} else { - $bind_host = false - # Configure auth_strategy on cinder node, if cinder and controller are - # on the same node this parameter is configured by ::cinder::api - cinder_config { - 'DEFAULT/auth_strategy': value => 'keystone'; - } -} - -# NOTE(bogdando) deploy cinder volume node with disabled cinder-volume -# service #LP1398817. The orchestration will start and enable it back -# after the deployment is done. -class { 'openstack::cinder': - enable_volumes => false, - sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_name}?charset=utf8&read_timeout=60", - glance_api_servers => $glance_api_servers, - bind_host => $bind_host, - queue_provider => $queue_provider, - amqp_hosts => hiera('amqp_hosts',''), - amqp_user => $rabbit_hash['user'], - amqp_password => $rabbit_hash['password'], - rabbit_ha_queues => hiera('rabbit_ha_queues', false), - volume_group => 'cinder', - manage_volumes => $manage_volumes, - iser => $storage_hash['iser'], - enabled => true, - auth_host => $service_endpoint, - iscsi_bind_host => $storage_address, - keystone_user => $keystone_user, - keystone_tenant => $keystone_tenant, - cinder_user_password => $cinder_hash[user_password], - syslog_log_facility => $syslog_log_facility_cinder, - debug => $debug, - verbose => $verbose, - use_stderr => $use_stderr, - use_syslog => $use_syslog, - max_retries => $max_retries, - max_pool_size => $max_pool_size, - max_overflow => $max_overflow, - idle_timeout => $idle_timeout, - ceilometer => $ceilometer_hash[enabled], - vmware_host_ip => $vcenter_hash['host_ip'], - vmware_host_username => $vcenter_hash['vc_user'], - vmware_host_password => $vcenter_hash['vc_password'], - auth_uri => $auth_uri, - identity_uri => $auth_uri, -} - -cinder_config { 'keymgr/fixed_key': - value => $cinder_hash[fixed_key]; -} - -# FIXME(bogdando) replace service_path and action to systemd, once supported -if $use_monit_real { - monit::process { $cinder_volume_name : - ensure => running, - matching => '/usr/bin/python /usr/bin/cinder-volume', - start_command => "${service_path} ${cinder_volume_name} restart", - stop_command => "${service_path} ${cinder_volume_name} stop", - pidfile => false, - } -} -################################################################# - -# vim: set ts=2 sw=2 et : diff --git a/f2s/resources/top-role-cinder/meta.yaml b/f2s/resources/top-role-cinder/meta.yaml deleted file mode 100644 index 75dbd887..00000000 --- a/f2s/resources/top-role-cinder/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: top-role-cinder -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/top-role-compute-vmware/actions/run.pp b/f2s/resources/top-role-compute-vmware/actions/run.pp deleted file mode 100644 index 16bd8e02..00000000 --- a/f2s/resources/top-role-compute-vmware/actions/run.pp +++ /dev/null @@ -1,18 +0,0 @@ -notice('MODULAR: vmware/compute-vmware.pp') - -$role = hiera('role') - -$debug = hiera('debug', true) -$ceilometer_hash = hiera('ceilometer',{}) - -$vcenter_hash = hiera('vcenter', {}) -$computes_hash = parse_vcenter_settings($vcenter_hash['computes']) - -$uid = hiera('uid') -$node_name = "node-$uid" -$defaults = { - current_node => $node_name, - vlan_interface => $vcenter_hash['esxi_vlan_interface'] - } - -create_resources(vmware::compute_vmware, $computes_hash, $defaults) diff --git a/f2s/resources/top-role-compute-vmware/meta.yaml b/f2s/resources/top-role-compute-vmware/meta.yaml deleted file mode 100644 index b2ff4732..00000000 --- a/f2s/resources/top-role-compute-vmware/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: top-role-compute-vmware -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/top-role-compute/actions/run.pp b/f2s/resources/top-role-compute/actions/run.pp deleted file mode 100644 index f2539a74..00000000 --- a/f2s/resources/top-role-compute/actions/run.pp +++ /dev/null @@ -1,339 +0,0 @@ -notice('MODULAR: compute.pp') - -$network_scheme = hiera_hash('network_scheme', {}) -$network_metadata = hiera_hash('network_metadata', {}) -prepare_network_config($network_scheme) - -# Pulling hiera -$compute_hash = hiera_hash('compute', {}) -$node_name = hiera('node_name') -$public_int = hiera('public_int', undef) -$public_vip = hiera('public_vip') -$management_vip = hiera('management_vip') -$database_vip = hiera('database_vip') -$service_endpoint = hiera('service_endpoint') -$primary_controller = hiera('primary_controller') -$use_neutron = hiera('use_neutron', false) -$sahara_hash = hiera('sahara', {}) -$murano_hash = hiera('murano', {}) -$mp_hash = hiera('mp') -$verbose = pick($compute_hash['verbose'], true) -$debug = pick($compute_hash['debug'], hiera('debug', true)) -$use_monit = false -$auto_assign_floating_ip = hiera('auto_assign_floating_ip', false) -$nodes_hash = hiera('nodes', {}) -$storage_hash = hiera_hash('storage_hash', {}) -$vcenter_hash = hiera('vcenter', {}) -$nova_hash = hiera_hash('nova_hash', {}) -$nova_custom_hash = hiera_hash('nova_custom_hash', {}) -$rabbit_hash = hiera_hash('rabbit_hash', {}) -$glance_hash = hiera_hash('glance_hash', {}) -$keystone_hash = hiera_hash('keystone_hash', {}) -$swift_hash = hiera_hash('swift_hash', {}) -$cinder_hash = hiera_hash('cinder_hash', {}) -$ceilometer_hash = hiera_hash('ceilometer_hash',{}) -$access_hash = hiera('access', {}) -$swift_proxies = hiera('swift_proxies') -$swift_master_role = hiera('swift_master_role', 'primary-controller') -$neutron_mellanox = hiera('neutron_mellanox', false) -$syslog_hash = hiera('syslog', {}) -$base_syslog_hash = hiera('base_syslog', {}) -$use_syslog = hiera('use_syslog', true) -$use_stderr = hiera('use_stderr', false) -$syslog_log_facility_glance = hiera('syslog_log_facility_glance', 'LOG_LOCAL2') -$syslog_log_facility_cinder = hiera('syslog_log_facility_cinder', 'LOG_LOCAL3') -$syslog_log_facility_neutron = hiera('syslog_log_facility_neutron', 'LOG_LOCAL4') -$syslog_log_facility_nova = hiera('syslog_log_facility_nova','LOG_LOCAL6') -$syslog_log_facility_keystone = hiera('syslog_log_facility_keystone', 'LOG_LOCAL7') -$syslog_log_facility_murano = hiera('syslog_log_facility_murano', 'LOG_LOCAL0') -$syslog_log_facility_sahara = hiera('syslog_log_facility_sahara','LOG_LOCAL0') -$nova_rate_limits = hiera('nova_rate_limits') -$nova_report_interval = hiera('nova_report_interval') -$nova_service_down_time = hiera('nova_service_down_time') -$glance_api_servers = hiera('glance_api_servers', "${management_vip}:9292") -$config_drive_format = 'vfat' - -$public_ssl_hash = hiera('public_ssl') -$vncproxy_host = $public_ssl_hash['services'] ? { - true => $public_ssl_hash['hostname'], - default => $public_vip, -} - -$db_host = pick($nova_hash['db_host'], $database_vip) - -$block_device_allocate_retries = hiera('block_device_allocate_retries', 300) -$block_device_allocate_retries_interval = hiera('block_device_allocate_retries_interval', 3) - -# TODO: openstack_version is confusing, there's such string var in hiera and hardcoded hash -$hiera_openstack_version = hiera('openstack_version') -$openstack_version = { - 'keystone' => 'installed', - 'glance' => 'installed', - 'horizon' => 'installed', - 'nova' => 'installed', - 'novncproxy' => 'installed', - 'cinder' => 'installed', -} - -$queue_provider = hiera('queue_provider', 'rabbitmq') - -# Do the stuff -if $neutron_mellanox { - $mellanox_mode = $neutron_mellanox['plugin'] -} else { - $mellanox_mode = 'disabled' -} - -if $use_neutron { - $novanetwork_params = {} - $network_provider = 'neutron' - $neutron_config = hiera_hash('quantum_settings') - $neutron_db_password = $neutron_config['database']['passwd'] - $neutron_user_password = $neutron_config['keystone']['admin_password'] - $neutron_metadata_proxy_secret = $neutron_config['metadata']['metadata_proxy_shared_secret'] - $base_mac = $neutron_config['L2']['base_mac'] -} else { - $network_provider = 'nova' - $floating_ips_range = hiera('floating_network_range') - $neutron_config = {} - $novanetwork_params = hiera('novanetwork_parameters') -} - -if $primary_controller { - if ($mellanox_mode == 'ethernet') { - $test_vm_pkg = 'cirros-testvm-mellanox' - } else { - $test_vm_pkg = 'cirros-testvm' - } - package { 'cirros-testvm' : - ensure => 'installed', - name => $test_vm_pkg, - } -} - -if !$rabbit_hash['user'] { - $rabbit_hash['user'] = 'nova' -} - -$floating_hash = {} - -##CALCULATED PARAMETERS - -##TODO: simply parse nodes array -$memcache_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('memcache_roles')) -$memcache_ipaddrs = ipsort(values(get_node_to_ipaddr_map_by_network_role($memcache_nodes,'mgmt/memcache'))) -$roles = $network_metadata['nodes'][$node_name]['node_roles'] -$mountpoints = filter_hash($mp_hash,'point') - -# SQLAlchemy backend configuration -$max_pool_size = min($::processorcount * 5 + 0, 30 + 0) -$max_overflow = min($::processorcount * 5 + 0, 60 + 0) -$max_retries = '-1' -$idle_timeout = '3600' - -if ($storage_hash['volumes_lvm']) { - nova_config { 'keymgr/fixed_key': - value => $cinder_hash[fixed_key]; - } -} - -# Determine who should get the volume service - -if (member($roles, 'cinder') and $storage_hash['volumes_lvm']) { - $manage_volumes = 'iscsi' -} elsif (member($roles, 'cinder') and $storage_hash['volumes_vmdk']) { - $manage_volumes = 'vmdk' -} elsif ($storage_hash['volumes_ceph']) { - $manage_volumes = 'ceph' -} else { - $manage_volumes = false -} - -#Determine who should be the default backend - -if ($storage_hash['images_ceph']) { - $glance_backend = 'ceph' - $glance_known_stores = [ 'glance.store.rbd.Store', 'glance.store.http.Store' ] -} elsif ($storage_hash['images_vcenter']) { - $glance_backend = 'vmware' - $glance_known_stores = [ 'glance.store.vmware_datastore.Store', 'glance.store.http.Store' ] -} else { - $glance_backend = 'swift' - $glance_known_stores = [ 'glance.store.swift.Store', 'glance.store.http.Store' ] -} - -# Use Swift if it isn't replaced by vCenter, Ceph for BOTH images and objects -if !($storage_hash['images_ceph'] and $storage_hash['objects_ceph']) and !$storage_hash['images_vcenter'] { - $use_swift = true -} else { - $use_swift = false -} - -# NOTE(bogdando) for controller nodes running Corosync with Pacemaker -# we delegate all of the monitor functions to RA instead of monit. -if member($roles, 'controller') or member($roles, 'primary-controller') { - $use_monit_real = false -} else { - $use_monit_real = $use_monit -} - -if $use_monit_real { - # Configure service names for monit watchdogs and 'service' system path - # FIXME(bogdando) replace service_path to systemd, once supported - include nova::params - include cinder::params - include neutron::params - $nova_compute_name = $::nova::params::compute_service_name - $nova_api_name = $::nova::params::api_service_name - $nova_network_name = $::nova::params::network_service_name - $cinder_volume_name = $::cinder::params::volume_service - $ovs_vswitchd_name = $::l23network::params::ovs_service_name - case $::osfamily { - 'RedHat' : { - $service_path = '/sbin/service' - } - 'Debian' : { - $service_path = '/usr/sbin/service' - } - default : { - fail("Unsupported osfamily: ${osfamily} for os ${operatingsystem}") - } - } -} - -#HARDCODED PARAMETERS -if hiera('use_vcenter', false) { - $multi_host = false -} else { - $multi_host = true -} - -$mirror_type = 'external' -Exec { logoutput => true } - -include osnailyfacter::test_compute - -if ($::mellanox_mode == 'ethernet') { - $neutron_private_net = pick($neutron_config['default_private_net'], 'net04') - $physnet = $neutron_config['predefined_networks'][$neutron_private_net]['L2']['physnet'] - class { 'mellanox_openstack::compute': - physnet => $physnet, - physifc => $neutron_mellanox['physical_port'], - } -} - -# NOTE(bogdando) deploy compute node with disabled nova-compute -# service #LP1398817. The orchestration will start and enable it back -# after the deployment is done. -# FIXME(bogdando) This should be changed once the host aggregates implemented, bp disable-new-computes -class { 'openstack::compute': - enabled => false, - public_interface => $public_int ? { undef=>'', default=>$public_int}, - private_interface => $use_neutron ? { true=>false, default=>hiera('private_int', undef)}, - internal_address => get_network_role_property('nova/api', 'ipaddr'), - libvirt_type => hiera('libvirt_type', undef), - fixed_range => $use_neutron ? { true=>false, default=>hiera('fixed_network_range', undef)}, - network_manager => hiera('network_manager', undef), - network_config => hiera('network_config', {}), - multi_host => $multi_host, - queue_provider => $queue_provider, - amqp_hosts => hiera('amqp_hosts',''), - amqp_user => $rabbit_hash['user'], - amqp_password => $rabbit_hash['password'], - rabbit_ha_queues => $rabbit_ha_queues, - auto_assign_floating_ip => $auto_assign_floating_ip, - glance_api_servers => $glance_api_servers, - vncproxy_host => $vncproxy_host, - vncserver_listen => '0.0.0.0', - migration_support => true, - debug => $debug, - verbose => $verbose, - use_stderr => $use_stderr, - cinder_volume_group => 'cinder', - vnc_enabled => true, - manage_volumes => $manage_volumes, - nova_user_password => $nova_hash[user_password], - nova_hash => $nova_hash, - cache_server_ip => $memcache_ipaddrs, - service_endpoint => $service_endpoint, - cinder => true, - cinder_iscsi_bind_addr => get_network_role_property('cinder/iscsi', 'ipaddr'), - cinder_user_password => $cinder_hash[user_password], - cinder_db_password => $cinder_hash[db_password], - ceilometer => $ceilometer_hash[enabled], - ceilometer_metering_secret => $ceilometer_hash[metering_secret], - ceilometer_user_password => $ceilometer_hash[user_password], - db_host => $db_host, - network_provider => $network_provider, - neutron_user_password => $use_neutron ? { true=>$neutron_config['keystone']['admin_password'], default=>undef}, - base_mac => $base_mac, - - use_syslog => $use_syslog, - syslog_log_facility => $syslog_log_facility_nova, - syslog_log_facility_neutron => $syslog_log_facility_neutron, - nova_rate_limits => $nova_rate_limits, - nova_report_interval => $nova_report_interval, - nova_service_down_time => $nova_service_down_time, - state_path => $nova_hash[state_path], - neutron_settings => $neutron_config, - storage_hash => $storage_hash, - config_drive_format => $config_drive_format, -} - -# Required for fping API extension, see LP#1486404 -ensure_packages('fping') - -$nova_config_hash = { - 'DEFAULT/resume_guests_state_on_host_boot' => { value => hiera('resume_guests_state_on_host_boot', 'False') }, - 'DEFAULT/use_cow_images' => { value => hiera('use_cow_images', 'True') }, - 'DEFAULT/block_device_allocate_retries' => { value => $block_device_allocate_retries }, - 'DEFAULT/block_device_allocate_retries_interval' => { value => $block_device_allocate_retries_interval }, - 'libvirt/libvirt_inject_key' => { value => 'true' }, - 'libvirt/libvirt_inject_password' => { value => 'true' }, -} - -$nova_complete_hash = merge($nova_config_hash, $nova_custom_hash) - -class {'nova::config': - nova_config => $nova_complete_hash, -} - -# Configure monit watchdogs -# FIXME(bogdando) replace service_path and action to systemd, once supported -if $use_monit_real { - monit::process { $nova_compute_name : - ensure => running, - matching => '/usr/bin/python /usr/bin/nova-compute', - start_command => "${service_path} ${nova_compute_name} restart", - stop_command => "${service_path} ${nova_compute_name} stop", - pidfile => false, - } - if $use_neutron { - monit::process { $ovs_vswitchd_name : - ensure => running, - start_command => "${service_path} ${ovs_vswitchd_name} restart", - stop_command => "${service_path} ${ovs_vswitchd_name} stop", - pidfile => '/var/run/openvswitch/ovs-vswitchd.pid', - } - } else { - monit::process { $nova_network_name : - ensure => running, - matching => '/usr/bin/python /usr/bin/nova-network', - start_command => "${service_path} ${nova_network_name} restart", - stop_command => "${service_path} ${nova_network_name} stop", - pidfile => false, - } - monit::process { $nova_api_name : - ensure => running, - matching => '/usr/bin/python /usr/bin/nova-api', - start_command => "${service_path} ${nova_api_name} restart", - stop_command => "${service_path} ${nova_api_name} stop", - pidfile => false, - } - } -} - -######################################################################## - - -# vim: set ts=2 sw=2 et : diff --git a/f2s/resources/top-role-compute/meta.yaml b/f2s/resources/top-role-compute/meta.yaml deleted file mode 100644 index 132b1de9..00000000 --- a/f2s/resources/top-role-compute/meta.yaml +++ /dev/null @@ -1,133 +0,0 @@ -id: top-role-compute -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - access: - value: null - amqp_hosts: - value: null - auto_assign_floating_ip: - value: null - base_syslog: - value: null - block_device_allocate_retries: - value: null - block_device_allocate_retries_interval: - value: null - ceilometer_hash: - value: null - cinder_hash: - value: null - compute: - value: null - database_vip: - value: null - debug: - value: null - fqdn: - value: null - glance_api_servers: - value: null - glance_hash: - value: null - keystone_hash: - value: null - libvirt_type: - value: null - management_vip: - value: null - memcache_roles: - value: null - mp: - value: null - murano: - value: null - network_config: - value: null - network_manager: - value: null - network_metadata: - value: null - network_scheme: - value: null - neutron_mellanox: - value: null - node_name: - value: null - nodes: - value: null - nova_custom_hash: - value: null - nova_hash: - value: null - nova_rate_limits: - value: null - nova_report_interval: - value: null - nova_service_down_time: - value: null - openstack_version: - value: null - primary_controller: - value: null - public_int: - value: null - public_ssl: - value: null - public_vip: - value: null - puppet_modules: - value: null - quantum_settings: - value: null - queue_provider: - value: null - rabbit_hash: - value: null - resume_guests_state_on_host_boot: - value: null - role: - value: null - sahara: - value: null - service_endpoint: - value: null - storage_hash: - value: null - swift_hash: - value: null - swift_master_role: - value: null - swift_proxies: - value: null - syslog: - value: null - syslog_log_facility_cinder: - value: null - syslog_log_facility_glance: - value: null - syslog_log_facility_keystone: - value: null - syslog_log_facility_murano: - value: null - syslog_log_facility_neutron: - value: null - syslog_log_facility_nova: - value: null - syslog_log_facility_sahara: - value: null - use_cow_images: - value: null - use_neutron: - value: null - use_stderr: - value: null - use_syslog: - value: null - use_vcenter: - value: null - vcenter: - value: null diff --git a/f2s/resources/top-role-mongo/actions/run.pp b/f2s/resources/top-role-mongo/actions/run.pp deleted file mode 100644 index 9007e549..00000000 --- a/f2s/resources/top-role-mongo/actions/run.pp +++ /dev/null @@ -1,32 +0,0 @@ -notice('MODULAR: mongo.pp') - -prepare_network_config(hiera('network_scheme', {})) -$mongo_hash = hiera_hash('mongo', {}) -$mongo_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('mongo_roles')) -$mongo_address_map = get_node_to_ipaddr_map_by_network_role($mongo_nodes, 'mongo/db') -$bind_address = get_network_role_property('mongo/db', 'ipaddr') -$use_syslog = hiera('use_syslog', true) -$debug = pick($mongo_hash['debug'], hiera('debug', false)) -$ceilometer_hash = hiera_hash('ceilometer_hash') -$roles = hiera('roles') -$replset_name = 'ceilometer' -$mongodb_port = hiera('mongodb_port', '27017') - -#################################################################### -class { 'openstack::mongo': - mongodb_bind_address => [ '127.0.0.1', $bind_address ], - mongodb_port => $mongodb_port, - ceilometer_metering_secret => $ceilometer_hash['metering_secret'], - ceilometer_db_password => $ceilometer_hash['db_password'], - ceilometer_replset_members => values($mongo_address_map), - replset_name => $replset_name, - mongo_version => '2.6.10', - use_syslog => $use_syslog, - debug => $debug, -} - -if !(member($roles, 'controller') or member($roles, 'primary-controller')) { - sysctl::value { 'net.ipv4.tcp_keepalive_time': - value => '300', - } -} diff --git a/f2s/resources/top-role-mongo/meta.yaml b/f2s/resources/top-role-mongo/meta.yaml deleted file mode 100644 index 481741a0..00000000 --- a/f2s/resources/top-role-mongo/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: top-role-mongo -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/top-role-primary-mongo/actions/run.pp b/f2s/resources/top-role-primary-mongo/actions/run.pp deleted file mode 100644 index 9007e549..00000000 --- a/f2s/resources/top-role-primary-mongo/actions/run.pp +++ /dev/null @@ -1,32 +0,0 @@ -notice('MODULAR: mongo.pp') - -prepare_network_config(hiera('network_scheme', {})) -$mongo_hash = hiera_hash('mongo', {}) -$mongo_nodes = get_nodes_hash_by_roles(hiera('network_metadata'), hiera('mongo_roles')) -$mongo_address_map = get_node_to_ipaddr_map_by_network_role($mongo_nodes, 'mongo/db') -$bind_address = get_network_role_property('mongo/db', 'ipaddr') -$use_syslog = hiera('use_syslog', true) -$debug = pick($mongo_hash['debug'], hiera('debug', false)) -$ceilometer_hash = hiera_hash('ceilometer_hash') -$roles = hiera('roles') -$replset_name = 'ceilometer' -$mongodb_port = hiera('mongodb_port', '27017') - -#################################################################### -class { 'openstack::mongo': - mongodb_bind_address => [ '127.0.0.1', $bind_address ], - mongodb_port => $mongodb_port, - ceilometer_metering_secret => $ceilometer_hash['metering_secret'], - ceilometer_db_password => $ceilometer_hash['db_password'], - ceilometer_replset_members => values($mongo_address_map), - replset_name => $replset_name, - mongo_version => '2.6.10', - use_syslog => $use_syslog, - debug => $debug, -} - -if !(member($roles, 'controller') or member($roles, 'primary-controller')) { - sysctl::value { 'net.ipv4.tcp_keepalive_time': - value => '300', - } -} diff --git a/f2s/resources/top-role-primary-mongo/meta.yaml b/f2s/resources/top-role-primary-mongo/meta.yaml deleted file mode 100644 index a3e935a1..00000000 --- a/f2s/resources/top-role-primary-mongo/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: top-role-primary-mongo -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/umm/actions/run.pp b/f2s/resources/umm/actions/run.pp deleted file mode 100644 index 5b6b58a7..00000000 --- a/f2s/resources/umm/actions/run.pp +++ /dev/null @@ -1,3 +0,0 @@ -notice('MODULAR: umm.pp') - -class {'umm': } diff --git a/f2s/resources/umm/meta.yaml b/f2s/resources/umm/meta.yaml deleted file mode 100644 index 478a72dc..00000000 --- a/f2s/resources/umm/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: umm -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/update_hosts/actions/run.pp b/f2s/resources/update_hosts/actions/run.pp deleted file mode 100644 index e82bddff..00000000 --- a/f2s/resources/update_hosts/actions/run.pp +++ /dev/null @@ -1,5 +0,0 @@ -notice('MODULAR: hosts.pp') - -class { "l23network::hosts_file": - nodes => hiera('nodes'), -} diff --git a/f2s/resources/update_hosts/meta.yaml b/f2s/resources/update_hosts/meta.yaml deleted file mode 100644 index 21e04200..00000000 --- a/f2s/resources/update_hosts/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: update_hosts -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - nodes: - value: null - puppet_modules: - value: null diff --git a/f2s/resources/updatedb/actions/run.pp b/f2s/resources/updatedb/actions/run.pp deleted file mode 100644 index ec929499..00000000 --- a/f2s/resources/updatedb/actions/run.pp +++ /dev/null @@ -1,21 +0,0 @@ -notice('MODULAR: ceph/updatedb.pp') - -$storage_hash = hiera('storage', {}) - -if ($storage_hash['volumes_ceph'] or - $storage_hash['images_ceph'] or - $storage_hash['objects_ceph'] -) { - $use_ceph = true -} else { - $use_ceph = false -} - -if $use_ceph { - - exec {"Ensure /var/lib/ceph in the updatedb PRUNEPATH": - path => [ '/usr/bin', '/bin' ], - command => "sed -i -Ee 's|(PRUNEPATHS *= *\"[^\"]*)|\\1 /var/lib/ceph|' /etc/updatedb.conf", - unless => "test ! -f /etc/updatedb.conf || grep 'PRUNEPATHS *= *.*/var/lib/ceph.*' /etc/updatedb.conf", - } -} diff --git a/f2s/resources/updatedb/meta.yaml b/f2s/resources/updatedb/meta.yaml deleted file mode 100644 index cb9f6980..00000000 --- a/f2s/resources/updatedb/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -id: updatedb -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null - storage: - value: null diff --git a/f2s/resources/virtual_ips/actions/run.pp b/f2s/resources/virtual_ips/actions/run.pp deleted file mode 100644 index b3f0fd1f..00000000 --- a/f2s/resources/virtual_ips/actions/run.pp +++ /dev/null @@ -1,3 +0,0 @@ -notice('MODULAR: virtual_ips.pp') - -generate_vips() diff --git a/f2s/resources/virtual_ips/meta.yaml b/f2s/resources/virtual_ips/meta.yaml deleted file mode 100644 index 0b22a001..00000000 --- a/f2s/resources/virtual_ips/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -id: virtual_ips -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - network_metadata: - value: null - network_scheme: - value: null - puppet_modules: - value: null - role: - value: null diff --git a/f2s/resources/vmware-vcenter/actions/run.pp b/f2s/resources/vmware-vcenter/actions/run.pp deleted file mode 100644 index aabbf3d8..00000000 --- a/f2s/resources/vmware-vcenter/actions/run.pp +++ /dev/null @@ -1,19 +0,0 @@ -notice('MODULAR: vmware/vcenter.pp') - -$use_vcenter = hiera('use_vcenter', false) -$vcenter_hash = hiera('vcenter_hash') -$public_vip = hiera('public_vip') -$use_neutron = hiera('use_neutron', false) -$ceilometer_hash = hiera('ceilometer',{}) -$debug = pick($vcenter_hash['debug'], hiera('debug', false)) - -if $use_vcenter { - class { 'vmware': - vcenter_settings => $vcenter_hash['computes'], - vlan_interface => $vcenter_hash['esxi_vlan_interface'], - use_quantum => $use_neutron, - vnc_address => $public_vip, - ceilometer => $ceilometer_hash['enabled'], - debug => $debug, - } -} diff --git a/f2s/resources/vmware-vcenter/meta.yaml b/f2s/resources/vmware-vcenter/meta.yaml deleted file mode 100644 index 95788c06..00000000 --- a/f2s/resources/vmware-vcenter/meta.yaml +++ /dev/null @@ -1,27 +0,0 @@ -id: vmware-vcenter -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - ceilometer: - value: null - debug: - value: null - fqdn: - value: null - novanetwork_parameters: - value: null - public_vip: - value: null - puppet_modules: - value: null - role: - value: null - use_neutron: - value: null - use_vcenter: - value: null - vcenter_hash: - value: null diff --git a/f2s/resources/workloads_collector_add/actions/run.pp b/f2s/resources/workloads_collector_add/actions/run.pp deleted file mode 100644 index c9a8dbc6..00000000 --- a/f2s/resources/workloads_collector_add/actions/run.pp +++ /dev/null @@ -1,21 +0,0 @@ -notice('MODULAR: keystone/workloads_collector_add.pp') - -$workloads_hash = hiera('workloads_collector', {}) -$service_endpoint = hiera('service_endpoint') - -$haproxy_stats_url = "http://${service_endpoint}:10000/;csv" - -haproxy_backend_status { 'keystone-admin' : - name => 'keystone-2', - count => '200', - step => '6', - url => $haproxy_stats_url, -} -> - -class { 'openstack::workloads_collector': - enabled => $workloads_hash['enabled'], - workloads_username => $workloads_hash['username'], - workloads_password => $workloads_hash['password'], - workloads_tenant => $workloads_hash['tenant'], - workloads_create_user => true, -} diff --git a/f2s/resources/workloads_collector_add/meta.yaml b/f2s/resources/workloads_collector_add/meta.yaml deleted file mode 100644 index a052923c..00000000 --- a/f2s/resources/workloads_collector_add/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -id: workloads_collector_add -handler: puppetv2 -version: '8.0' -actions: - run: run.pp - update: run.pp -input: - fqdn: - value: null - puppet_modules: - value: null - role: - value: null - service_endpoint: - value: null - workloads_collector: - value: null diff --git a/f2s/vrs/base-os.yml b/f2s/vrs/base-os.yml deleted file mode 100644 index 4157dbde..00000000 --- a/f2s/vrs/base-os.yml +++ /dev/null @@ -1,18 +0,0 @@ -id: base-os -resources: -- id: role_data{{index}} - from: f2s/resources/role_data - location: '{{node}}' - values: - env: '{{env}}' - puppet_modules: /etc/puppet/modules - uid: '{{index}}' -- id: logging{{index}} - from: f2s/resources/logging - location: '{{node}}' - values_from: role_data{{index}} -events: -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: logging{{index}}.run diff --git a/f2s/vrs/ceph-osd.yml b/f2s/vrs/ceph-osd.yml deleted file mode 100644 index 5be3a673..00000000 --- a/f2s/vrs/ceph-osd.yml +++ /dev/null @@ -1,143 +0,0 @@ -id: ceph-osd -resources: -- id: role_data{{index}} - from: f2s/resources/role_data - location: '{{node}}' - values: - env: '{{env}}' - puppet_modules: /etc/puppet/modules - uid: '{{index}}' - puppet_modules: '/etc/puppet/modules' -- id: fuel_pkgs{{index}} - from: f2s/resources/fuel_pkgs - location: '{{node}}' - values_from: role_data{{index}} -- id: logging{{index}} - from: f2s/resources/logging - location: '{{node}}' - values_from: role_data{{index}} -- id: tools{{index}} - from: f2s/resources/tools - location: '{{node}}' - values_from: role_data{{index}} -- id: netconfig{{index}} - from: f2s/resources/netconfig - location: '{{node}}' - values_from: role_data{{index}} -- id: connectivity_tests{{index}} - from: f2s/resources/connectivity_tests - location: '{{node}}' - values_from: role_data{{index}} -- id: firewall{{index}} - from: f2s/resources/firewall - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-keys-saving{{index}} - from: f2s/resources/ssl-keys-saving - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-add-trust-chain{{index}} - from: f2s/resources/ssl-add-trust-chain - location: '{{node}}' - values_from: role_data{{index}} -- id: hosts{{index}} - from: f2s/resources/hosts - location: '{{node}}' - values_from: role_data{{index}} -- id: top-role-ceph-osd{{index}} - from: f2s/resources/top-role-ceph-osd - location: '{{node}}' - values_from: role_data{{index}} -events: -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: fuel_pkgs{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: logging{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: logging{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: ssl-keys-saving{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: ssl-add-trust-chain{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: top-role-ceph-osd{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: top-role-ceph-osd{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: top-role-ceph-osd{{index}}.run diff --git a/f2s/vrs/cinder-vmware.yml b/f2s/vrs/cinder-vmware.yml deleted file mode 100644 index 778d861a..00000000 --- a/f2s/vrs/cinder-vmware.yml +++ /dev/null @@ -1,183 +0,0 @@ -id: cinder-vmware -resources: -- id: role_data{{index}} - from: f2s/resources/role_data - location: '{{node}}' - values: - env: '{{env}}' - puppet_modules: /etc/puppet/modules - uid: '{{index}}' - puppet_modules: '/etc/puppet/modules' -- id: fuel_pkgs{{index}} - from: f2s/resources/fuel_pkgs - location: '{{node}}' - values_from: role_data{{index}} -- id: logging{{index}} - from: f2s/resources/logging - location: '{{node}}' - values_from: role_data{{index}} -- id: tools{{index}} - from: f2s/resources/tools - location: '{{node}}' - values_from: role_data{{index}} -- id: netconfig{{index}} - from: f2s/resources/netconfig - location: '{{node}}' - values_from: role_data{{index}} -- id: connectivity_tests{{index}} - from: f2s/resources/connectivity_tests - location: '{{node}}' - values_from: role_data{{index}} -- id: firewall{{index}} - from: f2s/resources/firewall - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-keys-saving{{index}} - from: f2s/resources/ssl-keys-saving - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-add-trust-chain{{index}} - from: f2s/resources/ssl-add-trust-chain - location: '{{node}}' - values_from: role_data{{index}} -- id: hosts{{index}} - from: f2s/resources/hosts - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-cinder{{index}} - from: f2s/resources/openstack-cinder - location: '{{node}}' - values_from: role_data{{index}} -- id: top-role-cinder-vmware{{index}} - from: f2s/resources/top-role-cinder-vmware - location: '{{node}}' - values_from: role_data{{index}} -events: -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: fuel_pkgs{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: logging{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: logging{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: ssl-keys-saving{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: ssl-add-trust-chain{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=cinder-keystone - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=keystone - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=rabbitmq - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=cinder-db - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: top-role-cinder-vmware{{index}}.run -- type: depends_on - state: success - parent_action: openstack-cinder{{index}}.run - depend_action: top-role-cinder-vmware{{index}}.run diff --git a/f2s/vrs/cinder.yml b/f2s/vrs/cinder.yml deleted file mode 100644 index d04ede6f..00000000 --- a/f2s/vrs/cinder.yml +++ /dev/null @@ -1,143 +0,0 @@ -id: cinder -resources: -- id: role_data{{index}} - from: f2s/resources/role_data - location: '{{node}}' - values: - env: '{{env}}' - puppet_modules: /etc/puppet/modules - uid: '{{index}}' - puppet_modules: '/etc/puppet/modules' -- id: fuel_pkgs{{index}} - from: f2s/resources/fuel_pkgs - location: '{{node}}' - values_from: role_data{{index}} -- id: logging{{index}} - from: f2s/resources/logging - location: '{{node}}' - values_from: role_data{{index}} -- id: tools{{index}} - from: f2s/resources/tools - location: '{{node}}' - values_from: role_data{{index}} -- id: netconfig{{index}} - from: f2s/resources/netconfig - location: '{{node}}' - values_from: role_data{{index}} -- id: connectivity_tests{{index}} - from: f2s/resources/connectivity_tests - location: '{{node}}' - values_from: role_data{{index}} -- id: firewall{{index}} - from: f2s/resources/firewall - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-keys-saving{{index}} - from: f2s/resources/ssl-keys-saving - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-add-trust-chain{{index}} - from: f2s/resources/ssl-add-trust-chain - location: '{{node}}' - values_from: role_data{{index}} -- id: hosts{{index}} - from: f2s/resources/hosts - location: '{{node}}' - values_from: role_data{{index}} -- id: top-role-cinder{{index}} - from: f2s/resources/top-role-cinder - location: '{{node}}' - values_from: role_data{{index}} -events: -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: fuel_pkgs{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: logging{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: logging{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: ssl-keys-saving{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: ssl-add-trust-chain{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: top-role-cinder{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: top-role-cinder{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: top-role-cinder{{index}}.run diff --git a/f2s/vrs/compute-vmware.yml b/f2s/vrs/compute-vmware.yml deleted file mode 100644 index 68cc5c9e..00000000 --- a/f2s/vrs/compute-vmware.yml +++ /dev/null @@ -1,147 +0,0 @@ -id: compute-vmware -resources: -- id: role_data{{index}} - from: f2s/resources/role_data - location: '{{node}}' - values: - env: '{{env}}' - puppet_modules: /etc/puppet/modules - uid: '{{index}}' - puppet_modules: '/etc/puppet/modules' -- id: logging{{index}} - from: f2s/resources/logging - location: '{{node}}' - values_from: role_data{{index}} -- id: tools{{index}} - from: f2s/resources/tools - location: '{{node}}' - values_from: role_data{{index}} -- id: netconfig{{index}} - from: f2s/resources/netconfig - location: '{{node}}' - values_from: role_data{{index}} -- id: connectivity_tests{{index}} - from: f2s/resources/connectivity_tests - location: '{{node}}' - values_from: role_data{{index}} -- id: firewall{{index}} - from: f2s/resources/firewall - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-keys-saving{{index}} - from: f2s/resources/ssl-keys-saving - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-add-trust-chain{{index}} - from: f2s/resources/ssl-add-trust-chain - location: '{{node}}' - values_from: role_data{{index}} -- id: hosts{{index}} - from: f2s/resources/hosts - location: '{{node}}' - values_from: role_data{{index}} -- id: top-role-compute{{index}} - from: f2s/resources/top-role-compute - location: '{{node}}' - values_from: role_data{{index}} -- id: top-role-compute-vmware{{index}} - from: f2s/resources/top-role-compute-vmware - location: '{{node}}' - values_from: role_data{{index}} -events: -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: logging{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: logging{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: ssl-keys-saving{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: ssl-add-trust-chain{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: top-role-compute{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: top-role-compute{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: top-role-compute{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: top-role-compute-vmware{{index}}.run -- type: depends_on - state: success - parent_action: top-role-compute{{index}}.run - depend_action: top-role-compute-vmware{{index}}.run diff --git a/f2s/vrs/compute.yml b/f2s/vrs/compute.yml deleted file mode 100644 index 4f69fab5..00000000 --- a/f2s/vrs/compute.yml +++ /dev/null @@ -1,358 +0,0 @@ -id: compute -resources: -- id: role_data{{index}} - from: f2s/resources/role_data - location: '{{node}}' - values: - env: '{{env}}' - puppet_modules: /etc/puppet/modules - uid: '{{index}}' -- id: fuel_pkgs{{index}} - from: f2s/resources/fuel_pkgs - location: '{{node}}' - values_from: role_data{{index}} -- id: logging{{index}} - from: f2s/resources/logging - location: '{{node}}' - values_from: role_data{{index}} -- id: tools{{index}} - from: f2s/resources/tools - location: '{{node}}' - values_from: role_data{{index}} -- id: netconfig{{index}} - from: f2s/resources/netconfig - location: '{{node}}' - values_from: role_data{{index}} -- id: connectivity_tests{{index}} - from: f2s/resources/connectivity_tests - location: '{{node}}' - values_from: role_data{{index}} -- id: firewall{{index}} - from: f2s/resources/firewall - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-keys-saving{{index}} - from: f2s/resources/ssl-keys-saving - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-add-trust-chain{{index}} - from: f2s/resources/ssl-add-trust-chain - location: '{{node}}' - values_from: role_data{{index}} -- id: hosts{{index}} - from: f2s/resources/hosts - location: '{{node}}' - values_from: role_data{{index}} -- id: top-role-compute{{index}} - from: f2s/resources/top-role-compute - location: '{{node}}' - values_from: role_data{{index}} -- id: ceilometer-compute{{index}} - from: f2s/resources/ceilometer-compute - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-start{{index}} - from: f2s/resources/openstack-network-start - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-common-config{{index}} - from: f2s/resources/openstack-network-common-config - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-plugins-l2{{index}} - from: f2s/resources/openstack-network-plugins-l2 - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-agents-l3{{index}} - from: f2s/resources/openstack-network-agents-l3 - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-compute-nova{{index}} - from: f2s/resources/openstack-network-compute-nova - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-end{{index}} - from: f2s/resources/openstack-network-end - location: '{{node}}' - values_from: role_data{{index}} -events: -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: fuel_pkgs{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: logging{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: logging{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: ssl-keys-saving{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: ssl-add-trust-chain{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: top-role-compute{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: top-role-compute{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: top-role-compute{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ceilometer-compute{{index}}.run -- type: depends_on - state: success - parent_action: top-role-compute{{index}}.run - depend_action: ceilometer-compute{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=ceilometer-controller - depend_action: ceilometer-compute{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: top-role-compute{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=ironic-compute - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=neutron-keystone - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=neutron-db - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-controller - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-common-config{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-start{{index}}.run - depend_action: openstack-network-common-config{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-plugins-l2{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-plugins-l2{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-server-config - depend_action: openstack-network-plugins-l2{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-agents-l3{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-plugins-l2{{index}}.run - depend_action: openstack-network-agents-l3{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-networks - depend_action: openstack-network-agents-l3{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-routers - depend_action: openstack-network-agents-l3{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-compute-nova{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-l3{{index}}.run - depend_action: openstack-network-compute-nova{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-compute-nova{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-l3{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-compute-nova{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-plugins-l2{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=neutron-keystone - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-server-config - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=neutron-db - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-networks - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-routers - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-server-nova - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-agents-metadata - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-agents-dhcp - depend_action: openstack-network-end{{index}}.run diff --git a/f2s/vrs/controller.yml b/f2s/vrs/controller.yml deleted file mode 100644 index 9e51f549..00000000 --- a/f2s/vrs/controller.yml +++ /dev/null @@ -1,1201 +0,0 @@ -id: controller -resources: -- id: role_data{{index}} - from: f2s/resources/role_data - location: '{{node}}' - values: - env: '{{env}}' - puppet_modules: /etc/puppet/modules - uid: '{{index}}' -- id: fuel_pkgs{{index}} - from: f2s/resources/fuel_pkgs - location: '{{node}}' - values_from: role_data{{index}} -- id: logging{{index}} - from: f2s/resources/logging - location: '{{node}}' - values_from: role_data{{index}} -- id: tools{{index}} - from: f2s/resources/tools - location: '{{node}}' - values_from: role_data{{index}} -- id: umm{{index}} - from: f2s/resources/umm - location: '{{node}}' - values_from: role_data{{index}} -- id: netconfig{{index}} - from: f2s/resources/netconfig - location: '{{node}}' - values_from: role_data{{index}} -- id: connectivity_tests{{index}} - from: f2s/resources/connectivity_tests - location: '{{node}}' - values_from: role_data{{index}} -- id: firewall{{index}} - from: f2s/resources/firewall - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-keys-saving{{index}} - from: f2s/resources/ssl-keys-saving - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-add-trust-chain{{index}} - from: f2s/resources/ssl-add-trust-chain - location: '{{node}}' - values_from: role_data{{index}} -- id: hosts{{index}} - from: f2s/resources/hosts - location: '{{node}}' - values_from: role_data{{index}} -- id: cluster{{index}} - from: f2s/resources/cluster - location: '{{node}}' - values_from: role_data{{index}} -- id: cluster_health{{index}} - from: f2s/resources/cluster_health - location: '{{node}}' - values_from: role_data{{index}} -- id: cluster-vrouter{{index}} - from: f2s/resources/cluster-vrouter - location: '{{node}}' - values_from: role_data{{index}} -- id: virtual_ips{{index}} - from: f2s/resources/virtual_ips - location: '{{node}}' - values_from: role_data{{index}} -- id: conntrackd{{index}} - from: f2s/resources/conntrackd - location: '{{node}}' - values_from: role_data{{index}} -- id: cluster-haproxy{{index}} - from: f2s/resources/cluster-haproxy - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-radosgw{{index}} - from: f2s/resources/openstack-haproxy-radosgw - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-swift{{index}} - from: f2s/resources/openstack-haproxy-swift - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-nova{{index}} - from: f2s/resources/openstack-haproxy-nova - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-glance{{index}} - from: f2s/resources/openstack-haproxy-glance - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-sahara{{index}} - from: f2s/resources/openstack-haproxy-sahara - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-cinder{{index}} - from: f2s/resources/openstack-haproxy-cinder - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-keystone{{index}} - from: f2s/resources/openstack-haproxy-keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-murano{{index}} - from: f2s/resources/openstack-haproxy-murano - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-stats{{index}} - from: f2s/resources/openstack-haproxy-stats - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-ironic{{index}} - from: f2s/resources/openstack-haproxy-ironic - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-heat{{index}} - from: f2s/resources/openstack-haproxy-heat - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-neutron{{index}} - from: f2s/resources/openstack-haproxy-neutron - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-mysqld{{index}} - from: f2s/resources/openstack-haproxy-mysqld - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-horizon{{index}} - from: f2s/resources/openstack-haproxy-horizon - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-ceilometer{{index}} - from: f2s/resources/openstack-haproxy-ceilometer - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy{{index}} - from: f2s/resources/openstack-haproxy - location: '{{node}}' - values_from: role_data{{index}} -- id: dns-server{{index}} - from: f2s/resources/dns-server - location: '{{node}}' - values_from: role_data{{index}} -- id: database{{index}} - from: f2s/resources/database - location: '{{node}}' - values_from: role_data{{index}} -- id: ceilometer-controller{{index}} - from: f2s/resources/ceilometer-controller - location: '{{node}}' - values_from: role_data{{index}} -- id: rabbitmq{{index}} - from: f2s/resources/rabbitmq - location: '{{node}}' - values_from: role_data{{index}} -- id: ironic-api{{index}} - from: f2s/resources/ironic-api - location: '{{node}}' - values_from: role_data{{index}} -- id: apache{{index}} - from: f2s/resources/apache - location: '{{node}}' - values_from: role_data{{index}} -- id: api-proxy{{index}} - from: f2s/resources/api-proxy - location: '{{node}}' - values_from: role_data{{index}} -- id: glance{{index}} - from: f2s/resources/glance - location: '{{node}}' - values_from: role_data{{index}} -- id: memcached{{index}} - from: f2s/resources/memcached - location: '{{node}}' - values_from: role_data{{index}} -- id: keystone{{index}} - from: f2s/resources/keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-cinder{{index}} - from: f2s/resources/openstack-cinder - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-controller{{index}} - from: f2s/resources/openstack-controller - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-start{{index}} - from: f2s/resources/openstack-network-start - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-common-config{{index}} - from: f2s/resources/openstack-network-common-config - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-server-config{{index}} - from: f2s/resources/openstack-network-server-config - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-plugins-l2{{index}} - from: f2s/resources/openstack-network-plugins-l2 - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-agents-l3{{index}} - from: f2s/resources/openstack-network-agents-l3 - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-server-nova{{index}} - from: f2s/resources/openstack-network-server-nova - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-agents-dhcp{{index}} - from: f2s/resources/openstack-network-agents-dhcp - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-agents-metadata{{index}} - from: f2s/resources/openstack-network-agents-metadata - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-end{{index}} - from: f2s/resources/openstack-network-end - location: '{{node}}' - values_from: role_data{{index}} -- id: heat{{index}} - from: f2s/resources/heat - location: '{{node}}' - values_from: role_data{{index}} -- id: horizon{{index}} - from: f2s/resources/horizon - location: '{{node}}' - values_from: role_data{{index}} -- id: murano{{index}} - from: f2s/resources/murano - location: '{{node}}' - values_from: role_data{{index}} -- id: sahara{{index}} - from: f2s/resources/sahara - location: '{{node}}' - values_from: role_data{{index}} -- id: ceph-mon{{index}} - from: f2s/resources/ceph-mon - location: '{{node}}' - values_from: role_data{{index}} -- id: ceph-radosgw{{index}} - from: f2s/resources/ceph-radosgw - location: '{{node}}' - values_from: role_data{{index}} -- id: swift{{index}} - from: f2s/resources/swift - location: '{{node}}' - values_from: role_data{{index}} -- id: controller_remaining_tasks{{index}} - from: f2s/resources/controller_remaining_tasks - location: '{{node}}' - values_from: role_data{{index}} -- id: vmware-vcenter{{index}} - from: f2s/resources/vmware-vcenter - location: '{{node}}' - values_from: role_data{{index}} -- id: swift-rebalance-cron{{index}} - from: f2s/resources/swift-rebalance-cron - location: '{{node}}' - values_from: role_data{{index}} -events: -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: fuel_pkgs{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: logging{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: logging{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: umm{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: umm{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: ssl-keys-saving{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: ssl-add-trust-chain{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: cluster{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: cluster{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: cluster{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: cluster_health{{index}}.run -- type: depends_on - state: success - parent_action: cluster{{index}}.run - depend_action: cluster_health{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: cluster-vrouter{{index}}.run -- type: depends_on - state: success - parent_action: cluster{{index}}.run - depend_action: cluster-vrouter{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: virtual_ips{{index}}.run -- type: depends_on - state: success - parent_action: cluster{{index}}.run - depend_action: virtual_ips{{index}}.run -- type: depends_on - state: success - parent_action: cluster-vrouter{{index}}.run - depend_action: virtual_ips{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: conntrackd{{index}}.run -- type: depends_on - state: success - parent_action: virtual_ips{{index}}.run - depend_action: conntrackd{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: cluster-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: cluster{{index}}.run - depend_action: cluster-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: virtual_ips{{index}}.run - depend_action: cluster-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-radosgw{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-radosgw{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-swift{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-swift{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-nova{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-nova{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-glance{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-glance{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-sahara{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-sahara{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-cinder{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-cinder{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-keystone{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-murano{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-murano{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-stats{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-stats{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-ironic{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-ironic{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-heat{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-heat{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-neutron{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-neutron{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-mysqld{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-mysqld{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-horizon{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-horizon{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-ceilometer{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-ceilometer{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-ceilometer{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-radosgw{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-horizon{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-nova{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-mysqld{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-glance{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-heat{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-sahara{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-cinder{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-keystone{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-murano{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-stats{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-ironic{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-swift{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-neutron{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: dns-server{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: dns-server{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: database{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: database{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ceilometer-controller{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: ceilometer-controller{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=ceilometer-keystone - depend_action: ceilometer-controller{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: rabbitmq{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: rabbitmq{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ironic-api{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: ironic-api{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=ironic-db - depend_action: ironic-api{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=ironic-keystone - depend_action: ironic-api{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: apache{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: apache{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: api-proxy{{index}}.run -- type: depends_on - state: success - parent_action: apache{{index}}.run - depend_action: api-proxy{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: glance{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: glance{{index}}.run -- type: depends_on - state: success - parent_action: rabbitmq{{index}}.run - depend_action: glance{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: glance{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=glance-keystone - depend_action: glance{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=glance-db - depend_action: glance{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: memcached{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: memcached{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: apache{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: memcached{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: rabbitmq{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=keystone-db - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: rabbitmq{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=cinder-keystone - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=cinder-db - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: openstack-cinder{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: ceilometer-controller{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: rabbitmq{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: ironic-api{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: glance{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=nova-keystone - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=nova-db - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: openstack-controller{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=ironic-compute - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=neutron-keystone - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=neutron-db - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=top-role-compute - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-common-config{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-start{{index}}.run - depend_action: openstack-network-common-config{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-server-config{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-server-config{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-plugins-l2{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-server-config{{index}}.run - depend_action: openstack-network-plugins-l2{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-plugins-l2{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-agents-l3{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-plugins-l2{{index}}.run - depend_action: openstack-network-agents-l3{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-networks - depend_action: openstack-network-agents-l3{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-routers - depend_action: openstack-network-agents-l3{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-server-nova{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-l3{{index}}.run - depend_action: openstack-network-server-nova{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-agents-dhcp{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-l3{{index}}.run - depend_action: openstack-network-agents-dhcp{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-server-nova{{index}}.run - depend_action: openstack-network-agents-dhcp{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-agents-dhcp{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-agents-metadata{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-l3{{index}}.run - depend_action: openstack-network-agents-metadata{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-server-nova{{index}}.run - depend_action: openstack-network-agents-metadata{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-dhcp{{index}}.run - depend_action: openstack-network-agents-metadata{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-agents-metadata{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-server-config{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-metadata{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-server-nova{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-l3{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-plugins-l2{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-dhcp{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=neutron-keystone - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-networks - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=neutron-db - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-compute-nova - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-routers - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: heat{{index}}.run -- type: depends_on - state: success - parent_action: openstack-controller{{index}}.run - depend_action: heat{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=heat-db - depend_action: heat{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=heat-keystone - depend_action: heat{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: horizon{{index}}.run -- type: depends_on - state: success - parent_action: openstack-controller{{index}}.run - depend_action: horizon{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: heat{{index}}.run - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: horizon{{index}}.run - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: rabbitmq{{index}}.run - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=murano-rabbitmq - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=murano-keystone - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=murano-db - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: sahara{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-end{{index}}.run - depend_action: sahara{{index}}.run -- type: depends_on - state: success - parent_action: horizon{{index}}.run - depend_action: sahara{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=sahara-keystone - depend_action: sahara{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=sahara-db - depend_action: sahara{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ceph-mon{{index}}.run -- type: depends_on - state: success - parent_action: openstack-controller{{index}}.run - depend_action: ceph-mon{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ceph-radosgw{{index}}.run -- type: depends_on - state: success - parent_action: apache{{index}}.run - depend_action: ceph-radosgw{{index}}.run -- type: depends_on - state: success - parent_action: ceph-mon{{index}}.run - depend_action: ceph-radosgw{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: swift{{index}}.run -- type: depends_on - state: success - parent_action: ceilometer-controller{{index}}.run - depend_action: swift{{index}}.run -- type: depends_on - state: success - parent_action: openstack-controller{{index}}.run - depend_action: swift{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=swift-keystone - depend_action: swift{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: ceph-radosgw{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: api-proxy{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: murano{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: sahara{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: ceph-mon{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: swift{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: vmware-vcenter{{index}}.run -- type: depends_on - state: success - parent_action: controller_remaining_tasks{{index}}.run - depend_action: vmware-vcenter{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: swift-rebalance-cron{{index}}.run -- type: depends_on - state: success - parent_action: swift{{index}}.run - depend_action: swift-rebalance-cron{{index}}.run diff --git a/f2s/vrs/fuel_node.yaml b/f2s/vrs/fuel_node.yaml deleted file mode 100644 index 23c01ec9..00000000 --- a/f2s/vrs/fuel_node.yaml +++ /dev/null @@ -1,20 +0,0 @@ -id: fuel_node -resources: - - id: ssh_transport{{index}} - from: resources/transport_ssh - values: - ssh_user: 'root' - ssh_key: '/root/.ssh/bootstrap.rsa' - - id: transports{{index}} - from: resources/transports - values: - transports:key: ssh_transport{{index}}::ssh_key - transports:user: ssh_transport{{index}}::ssh_user - transports:port: ssh_transport{{index}}::ssh_port - transports:name: ssh_transport{{index}}::name - - id: node{{index}} - from: resources/ro_node - values: - name: node{{index}} - ip: {{ip}} - transports_id: transports{{index}}::transports_id diff --git a/f2s/vrs/genkeys.yaml b/f2s/vrs/genkeys.yaml deleted file mode 100644 index 3b2d4024..00000000 --- a/f2s/vrs/genkeys.yaml +++ /dev/null @@ -1,10 +0,0 @@ -id: genkeys -resources: - - id: genkeys{{index}} - from: f2s/resources/genkeys - location: {{node}} - values: - uid: '{{index}}' - path: /var/lib/fuel/keys/ - ssl: [mongo] - ssh: [neutron, nova, mysql] diff --git a/f2s/vrs/ironic.yml b/f2s/vrs/ironic.yml deleted file mode 100644 index cd7d8bfb..00000000 --- a/f2s/vrs/ironic.yml +++ /dev/null @@ -1,295 +0,0 @@ -id: ironic -resources: -- id: role_data{{index}} - from: f2s/resources/role_data - location: '{{node}}' - values: - env: '{{env}}' - puppet_modules: /etc/puppet/modules - uid: '{{index}}' - puppet_modules: '/etc/puppet/modules' -- id: fuel_pkgs{{index}} - from: f2s/resources/fuel_pkgs - location: '{{node}}' - values_from: role_data{{index}} -- id: logging{{index}} - from: f2s/resources/logging - location: '{{node}}' - values_from: role_data{{index}} -- id: tools{{index}} - from: f2s/resources/tools - location: '{{node}}' - values_from: role_data{{index}} -- id: netconfig{{index}} - from: f2s/resources/netconfig - location: '{{node}}' - values_from: role_data{{index}} -- id: connectivity_tests{{index}} - from: f2s/resources/connectivity_tests - location: '{{node}}' - values_from: role_data{{index}} -- id: firewall{{index}} - from: f2s/resources/firewall - location: '{{node}}' - values_from: role_data{{index}} -- id: hosts{{index}} - from: f2s/resources/hosts - location: '{{node}}' - values_from: role_data{{index}} -- id: ironic-conductor{{index}} - from: f2s/resources/ironic-conductor - location: '{{node}}' - values_from: role_data{{index}} -- id: ironic-compute{{index}} - from: f2s/resources/ironic-compute - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-start{{index}} - from: f2s/resources/openstack-network-start - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-common-config{{index}} - from: f2s/resources/openstack-network-common-config - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-plugins-l2{{index}} - from: f2s/resources/openstack-network-plugins-l2 - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-end{{index}} - from: f2s/resources/openstack-network-end - location: '{{node}}' - values_from: role_data{{index}} -events: -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: fuel_pkgs{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: logging{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: logging{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=ssl-add-trust-chain - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ironic-conductor{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ironic-conductor{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: ironic-conductor{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ironic-compute{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ironic-compute{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: ironic-compute{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: ironic-compute{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=neutron-keystone - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=neutron-db - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=top-role-compute - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-controller - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-common-config{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-start{{index}}.run - depend_action: openstack-network-common-config{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-plugins-l2{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-plugins-l2{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-server-config - depend_action: openstack-network-plugins-l2{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-plugins-l2{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=neutron-keystone - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-compute-nova - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-server-config - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=neutron-db - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-networks - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-routers - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-server-nova - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-agents-l3 - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-agents-metadata - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-agents-dhcp - depend_action: openstack-network-end{{index}}.run diff --git a/f2s/vrs/mongo.yml b/f2s/vrs/mongo.yml deleted file mode 100644 index 92274851..00000000 --- a/f2s/vrs/mongo.yml +++ /dev/null @@ -1,143 +0,0 @@ -id: mongo -resources: -- id: role_data{{index}} - from: f2s/resources/role_data - location: '{{node}}' - values: - env: '{{env}}' - puppet_modules: /etc/puppet/modules - uid: '{{index}}' - puppet_modules: '/etc/puppet/modules' -- id: fuel_pkgs{{index}} - from: f2s/resources/fuel_pkgs - location: '{{node}}' - values_from: role_data{{index}} -- id: logging{{index}} - from: f2s/resources/logging - location: '{{node}}' - values_from: role_data{{index}} -- id: tools{{index}} - from: f2s/resources/tools - location: '{{node}}' - values_from: role_data{{index}} -- id: netconfig{{index}} - from: f2s/resources/netconfig - location: '{{node}}' - values_from: role_data{{index}} -- id: connectivity_tests{{index}} - from: f2s/resources/connectivity_tests - location: '{{node}}' - values_from: role_data{{index}} -- id: firewall{{index}} - from: f2s/resources/firewall - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-keys-saving{{index}} - from: f2s/resources/ssl-keys-saving - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-add-trust-chain{{index}} - from: f2s/resources/ssl-add-trust-chain - location: '{{node}}' - values_from: role_data{{index}} -- id: hosts{{index}} - from: f2s/resources/hosts - location: '{{node}}' - values_from: role_data{{index}} -- id: top-role-mongo{{index}} - from: f2s/resources/top-role-mongo - location: '{{node}}' - values_from: role_data{{index}} -events: -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: fuel_pkgs{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: logging{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: logging{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: ssl-keys-saving{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: ssl-add-trust-chain{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: top-role-mongo{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: top-role-mongo{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: top-role-mongo{{index}}.run diff --git a/f2s/vrs/prep.yaml b/f2s/vrs/prep.yaml deleted file mode 100644 index 193ff98e..00000000 --- a/f2s/vrs/prep.yaml +++ /dev/null @@ -1,29 +0,0 @@ -id: prep_tasks -resources: - - id: sources{{index}} - from: resources/sources - location: {{node}} - values: - sources: - - src: /var/lib/fuel/keys/{{env}}/neutron - dst: /var/lib/astute - - src: /var/lib/fuel/keys/{{env}}/nova - dst: /var/lib/astute - - src: /var/lib/fuel/keys/{{env}}/mysql - dst: /var/lib/astute - - src: /var/lib/fuel/keys/{{env}}/mongo - dst: /var/lib/astute - - src: /etc/puppet/modules - dst: /etc/puppet/ -events: - - type: depends_on - state: success - parent: - action: run - with_tags: - - resource=genkeys - depend_action: sources{{index}}.run - - type: depends_on - state: success - parent_action: sources{{index}}.run - depend_action: role_data{{index}}.run diff --git a/f2s/vrs/primary-controller.yml b/f2s/vrs/primary-controller.yml deleted file mode 100644 index 43cf3e6b..00000000 --- a/f2s/vrs/primary-controller.yml +++ /dev/null @@ -1,1399 +0,0 @@ -id: primary-controller -resources: -- id: role_data{{index}} - from: f2s/resources/role_data - location: '{{node}}' - values: - env: '{{env}}' - puppet_modules: /etc/puppet/modules - uid: '{{index}}' -- id: fuel_pkgs{{index}} - from: f2s/resources/fuel_pkgs - location: '{{node}}' - values_from: role_data{{index}} -- id: logging{{index}} - from: f2s/resources/logging - location: '{{node}}' - values_from: role_data{{index}} -- id: tools{{index}} - from: f2s/resources/tools - location: '{{node}}' - values_from: role_data{{index}} -- id: umm{{index}} - from: f2s/resources/umm - location: '{{node}}' - values_from: role_data{{index}} -- id: netconfig{{index}} - from: f2s/resources/netconfig - location: '{{node}}' - values_from: role_data{{index}} -- id: connectivity_tests{{index}} - from: f2s/resources/connectivity_tests - location: '{{node}}' - values_from: role_data{{index}} -- id: firewall{{index}} - from: f2s/resources/firewall - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-keys-saving{{index}} - from: f2s/resources/ssl-keys-saving - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-add-trust-chain{{index}} - from: f2s/resources/ssl-add-trust-chain - location: '{{node}}' - values_from: role_data{{index}} -- id: hosts{{index}} - from: f2s/resources/hosts - location: '{{node}}' - values_from: role_data{{index}} -- id: cluster{{index}} - from: f2s/resources/cluster - location: '{{node}}' - values_from: role_data{{index}} -- id: cluster_health{{index}} - from: f2s/resources/cluster_health - location: '{{node}}' - values_from: role_data{{index}} -- id: cluster-vrouter{{index}} - from: f2s/resources/cluster-vrouter - location: '{{node}}' - values_from: role_data{{index}} -- id: virtual_ips{{index}} - from: f2s/resources/virtual_ips - location: '{{node}}' - values_from: role_data{{index}} -- id: conntrackd{{index}} - from: f2s/resources/conntrackd - location: '{{node}}' - values_from: role_data{{index}} -- id: cluster-haproxy{{index}} - from: f2s/resources/cluster-haproxy - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-ceilometer{{index}} - from: f2s/resources/openstack-haproxy-ceilometer - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-radosgw{{index}} - from: f2s/resources/openstack-haproxy-radosgw - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-horizon{{index}} - from: f2s/resources/openstack-haproxy-horizon - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-swift{{index}} - from: f2s/resources/openstack-haproxy-swift - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-nova{{index}} - from: f2s/resources/openstack-haproxy-nova - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-glance{{index}} - from: f2s/resources/openstack-haproxy-glance - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-sahara{{index}} - from: f2s/resources/openstack-haproxy-sahara - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-cinder{{index}} - from: f2s/resources/openstack-haproxy-cinder - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-keystone{{index}} - from: f2s/resources/openstack-haproxy-keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-murano{{index}} - from: f2s/resources/openstack-haproxy-murano - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-stats{{index}} - from: f2s/resources/openstack-haproxy-stats - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-ironic{{index}} - from: f2s/resources/openstack-haproxy-ironic - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-heat{{index}} - from: f2s/resources/openstack-haproxy-heat - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-neutron{{index}} - from: f2s/resources/openstack-haproxy-neutron - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy-mysqld{{index}} - from: f2s/resources/openstack-haproxy-mysqld - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-haproxy{{index}} - from: f2s/resources/openstack-haproxy - location: '{{node}}' - values_from: role_data{{index}} -- id: dns-server{{index}} - from: f2s/resources/dns-server - location: '{{node}}' - values_from: role_data{{index}} -- id: database{{index}} - from: f2s/resources/database - location: '{{node}}' - values_from: role_data{{index}} -- id: keystone-db{{index}} - from: f2s/resources/keystone-db - location: '{{node}}' - values_from: role_data{{index}} -- id: glance-db{{index}} - from: f2s/resources/glance-db - location: '{{node}}' - values_from: role_data{{index}} -- id: ironic-db{{index}} - from: f2s/resources/ironic-db - location: '{{node}}' - values_from: role_data{{index}} -- id: neutron-db{{index}} - from: f2s/resources/neutron-db - location: '{{node}}' - values_from: role_data{{index}} -- id: murano-db{{index}} - from: f2s/resources/murano-db - location: '{{node}}' - values_from: role_data{{index}} -- id: nova-db{{index}} - from: f2s/resources/nova-db - location: '{{node}}' - values_from: role_data{{index}} -- id: cinder-db{{index}} - from: f2s/resources/cinder-db - location: '{{node}}' - values_from: role_data{{index}} -- id: sahara-db{{index}} - from: f2s/resources/sahara-db - location: '{{node}}' - values_from: role_data{{index}} -- id: heat-db{{index}} - from: f2s/resources/heat-db - location: '{{node}}' - values_from: role_data{{index}} -- id: rabbitmq{{index}} - from: f2s/resources/rabbitmq - location: '{{node}}' - values_from: role_data{{index}} -- id: murano-rabbitmq{{index}} - from: f2s/resources/murano-rabbitmq - location: '{{node}}' - values_from: role_data{{index}} -- id: apache{{index}} - from: f2s/resources/apache - location: '{{node}}' - values_from: role_data{{index}} -- id: api-proxy{{index}} - from: f2s/resources/api-proxy - location: '{{node}}' - values_from: role_data{{index}} -- id: memcached{{index}} - from: f2s/resources/memcached - location: '{{node}}' - values_from: role_data{{index}} -- id: keystone{{index}} - from: f2s/resources/keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: sahara-keystone{{index}} - from: f2s/resources/sahara-keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: neutron-keystone{{index}} - from: f2s/resources/neutron-keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: cinder-keystone{{index}} - from: f2s/resources/cinder-keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: glance-keystone{{index}} - from: f2s/resources/glance-keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: glance{{index}} - from: f2s/resources/glance - location: '{{node}}' - values_from: role_data{{index}} -- id: ironic-keystone{{index}} - from: f2s/resources/ironic-keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: ironic-api{{index}} - from: f2s/resources/ironic-api - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-cinder{{index}} - from: f2s/resources/openstack-cinder - location: '{{node}}' - values_from: role_data{{index}} -- id: ceilometer-keystone{{index}} - from: f2s/resources/ceilometer-keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: ceilometer-controller{{index}} - from: f2s/resources/ceilometer-controller - location: '{{node}}' - values_from: role_data{{index}} -- id: murano-keystone{{index}} - from: f2s/resources/murano-keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: workloads_collector_add{{index}} - from: f2s/resources/workloads_collector_add - location: '{{node}}' - values_from: role_data{{index}} -- id: heat-keystone{{index}} - from: f2s/resources/heat-keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: swift-keystone{{index}} - from: f2s/resources/swift-keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: nova-keystone{{index}} - from: f2s/resources/nova-keystone - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-controller{{index}} - from: f2s/resources/openstack-controller - location: '{{node}}' - values_from: role_data{{index}} -- id: ceph-mon{{index}} - from: f2s/resources/ceph-mon - location: '{{node}}' - values_from: role_data{{index}} -- id: ceph-radosgw{{index}} - from: f2s/resources/ceph-radosgw - location: '{{node}}' - values_from: role_data{{index}} -- id: heat{{index}} - from: f2s/resources/heat - location: '{{node}}' - values_from: role_data{{index}} -- id: swift{{index}} - from: f2s/resources/swift - location: '{{node}}' - values_from: role_data{{index}} -- id: swift-rebalance-cron{{index}} - from: f2s/resources/swift-rebalance-cron - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-start{{index}} - from: f2s/resources/openstack-network-start - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-common-config{{index}} - from: f2s/resources/openstack-network-common-config - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-server-config{{index}} - from: f2s/resources/openstack-network-server-config - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-plugins-l2{{index}} - from: f2s/resources/openstack-network-plugins-l2 - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-networks{{index}} - from: f2s/resources/openstack-network-networks - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-routers{{index}} - from: f2s/resources/openstack-network-routers - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-agents-l3{{index}} - from: f2s/resources/openstack-network-agents-l3 - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-server-nova{{index}} - from: f2s/resources/openstack-network-server-nova - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-agents-dhcp{{index}} - from: f2s/resources/openstack-network-agents-dhcp - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-agents-metadata{{index}} - from: f2s/resources/openstack-network-agents-metadata - location: '{{node}}' - values_from: role_data{{index}} -- id: openstack-network-end{{index}} - from: f2s/resources/openstack-network-end - location: '{{node}}' - values_from: role_data{{index}} -- id: horizon{{index}} - from: f2s/resources/horizon - location: '{{node}}' - values_from: role_data{{index}} -- id: murano{{index}} - from: f2s/resources/murano - location: '{{node}}' - values_from: role_data{{index}} -- id: sahara{{index}} - from: f2s/resources/sahara - location: '{{node}}' - values_from: role_data{{index}} -- id: controller_remaining_tasks{{index}} - from: f2s/resources/controller_remaining_tasks - location: '{{node}}' - values_from: role_data{{index}} -- id: vmware-vcenter{{index}} - from: f2s/resources/vmware-vcenter - location: '{{node}}' - values_from: role_data{{index}} -events: -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: fuel_pkgs{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: logging{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: logging{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: umm{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: umm{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: ssl-keys-saving{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: ssl-add-trust-chain{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: cluster{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: cluster{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: cluster{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: cluster_health{{index}}.run -- type: depends_on - state: success - parent_action: cluster{{index}}.run - depend_action: cluster_health{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: cluster-vrouter{{index}}.run -- type: depends_on - state: success - parent_action: cluster{{index}}.run - depend_action: cluster-vrouter{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: virtual_ips{{index}}.run -- type: depends_on - state: success - parent_action: cluster{{index}}.run - depend_action: virtual_ips{{index}}.run -- type: depends_on - state: success - parent_action: cluster-vrouter{{index}}.run - depend_action: virtual_ips{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: conntrackd{{index}}.run -- type: depends_on - state: success - parent_action: virtual_ips{{index}}.run - depend_action: conntrackd{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: cluster-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: cluster{{index}}.run - depend_action: cluster-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: virtual_ips{{index}}.run - depend_action: cluster-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-ceilometer{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-ceilometer{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-radosgw{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-radosgw{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-horizon{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-horizon{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-swift{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-swift{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-nova{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-nova{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-glance{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-glance{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-sahara{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-sahara{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-cinder{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-cinder{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-keystone{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-murano{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-murano{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-stats{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-stats{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-ironic{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-ironic{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-heat{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-heat{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-neutron{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-neutron{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy-mysqld{{index}}.run -- type: depends_on - state: success - parent_action: cluster-haproxy{{index}}.run - depend_action: openstack-haproxy-mysqld{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-ceilometer{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-radosgw{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-horizon{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-nova{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-mysqld{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-glance{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-heat{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-sahara{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-cinder{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-keystone{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-murano{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-stats{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-ironic{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-swift{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy-neutron{{index}}.run - depend_action: openstack-haproxy{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: dns-server{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: dns-server{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: database{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: database{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: keystone-db{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: keystone-db{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: glance-db{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: glance-db{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ironic-db{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: ironic-db{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: neutron-db{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: neutron-db{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: murano-db{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: murano-db{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: nova-db{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: nova-db{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: cinder-db{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: cinder-db{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: sahara-db{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: sahara-db{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: heat-db{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: heat-db{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: rabbitmq{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: rabbitmq{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: murano-rabbitmq{{index}}.run -- type: depends_on - state: success - parent_action: rabbitmq{{index}}.run - depend_action: murano-rabbitmq{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: apache{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: apache{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: api-proxy{{index}}.run -- type: depends_on - state: success - parent_action: apache{{index}}.run - depend_action: api-proxy{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: memcached{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: memcached{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: keystone-db{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: rabbitmq{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: apache{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: memcached{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: sahara-keystone{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: sahara-keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: neutron-keystone{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: neutron-keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: cinder-keystone{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: cinder-keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: glance-keystone{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: glance-keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: glance{{index}}.run -- type: depends_on - state: success - parent_action: glance-keystone{{index}}.run - depend_action: glance{{index}}.run -- type: depends_on - state: success - parent_action: glance-db{{index}}.run - depend_action: glance{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: glance{{index}}.run -- type: depends_on - state: success - parent_action: rabbitmq{{index}}.run - depend_action: glance{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: glance{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ironic-keystone{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: ironic-keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ironic-api{{index}}.run -- type: depends_on - state: success - parent_action: ironic-db{{index}}.run - depend_action: ironic-api{{index}}.run -- type: depends_on - state: success - parent_action: ironic-keystone{{index}}.run - depend_action: ironic-api{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: ironic-api{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: cinder-keystone{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: rabbitmq{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: cinder-db{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: openstack-cinder{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ceilometer-keystone{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: ceilometer-keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ceilometer-controller{{index}}.run -- type: depends_on - state: success - parent_action: ceilometer-keystone{{index}}.run - depend_action: ceilometer-controller{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: ceilometer-controller{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: murano-keystone{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: murano-keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: workloads_collector_add{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: workloads_collector_add{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: heat-keystone{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: heat-keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: swift-keystone{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: swift-keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: nova-keystone{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: nova-keystone{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: openstack-cinder{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: database{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: nova-keystone{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: ceilometer-controller{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: rabbitmq{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: nova-db{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: keystone{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: ironic-api{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: glance{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: openstack-haproxy{{index}}.run - depend_action: openstack-controller{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ceph-mon{{index}}.run -- type: depends_on - state: success - parent_action: openstack-controller{{index}}.run - depend_action: ceph-mon{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ceph-radosgw{{index}}.run -- type: depends_on - state: success - parent_action: apache{{index}}.run - depend_action: ceph-radosgw{{index}}.run -- type: depends_on - state: success - parent_action: ceph-mon{{index}}.run - depend_action: ceph-radosgw{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: heat{{index}}.run -- type: depends_on - state: success - parent_action: heat-db{{index}}.run - depend_action: heat{{index}}.run -- type: depends_on - state: success - parent_action: openstack-controller{{index}}.run - depend_action: heat{{index}}.run -- type: depends_on - state: success - parent_action: heat-keystone{{index}}.run - depend_action: heat{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: swift{{index}}.run -- type: depends_on - state: success - parent_action: ceilometer-controller{{index}}.run - depend_action: swift{{index}}.run -- type: depends_on - state: success - parent_action: swift-keystone{{index}}.run - depend_action: swift{{index}}.run -- type: depends_on - state: success - parent_action: openstack-controller{{index}}.run - depend_action: swift{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: swift-rebalance-cron{{index}}.run -- type: depends_on - state: success - parent_action: swift{{index}}.run - depend_action: swift-rebalance-cron{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: neutron-keystone{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: neutron-db{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: openstack-controller{{index}}.run - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=ironic-compute - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=top-role-compute - depend_action: openstack-network-start{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-common-config{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-start{{index}}.run - depend_action: openstack-network-common-config{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-server-config{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-server-config{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-plugins-l2{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-server-config{{index}}.run - depend_action: openstack-network-plugins-l2{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-plugins-l2{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-networks{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-plugins-l2{{index}}.run - depend_action: openstack-network-networks{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-routers{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-networks{{index}}.run - depend_action: openstack-network-routers{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-agents-l3{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-plugins-l2{{index}}.run - depend_action: openstack-network-agents-l3{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-networks{{index}}.run - depend_action: openstack-network-agents-l3{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-routers{{index}}.run - depend_action: openstack-network-agents-l3{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-server-nova{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-l3{{index}}.run - depend_action: openstack-network-server-nova{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-agents-dhcp{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-l3{{index}}.run - depend_action: openstack-network-agents-dhcp{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-server-nova{{index}}.run - depend_action: openstack-network-agents-dhcp{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-agents-dhcp{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-agents-metadata{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-l3{{index}}.run - depend_action: openstack-network-agents-metadata{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-server-nova{{index}}.run - depend_action: openstack-network-agents-metadata{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-dhcp{{index}}.run - depend_action: openstack-network-agents-metadata{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-agents-metadata{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: neutron-keystone{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-common-config{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-server-config{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: neutron-db{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-networks{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-routers{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-server-nova{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-plugins-l2{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-l3{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-metadata{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-agents-dhcp{{index}}.run - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=openstack-network-compute-nova - depend_action: openstack-network-end{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: horizon{{index}}.run -- type: depends_on - state: success - parent_action: openstack-controller{{index}}.run - depend_action: horizon{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: heat{{index}}.run - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: murano-db{{index}}.run - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: murano-rabbitmq{{index}}.run - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: murano-keystone{{index}}.run - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: rabbitmq{{index}}.run - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: horizon{{index}}.run - depend_action: murano{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: sahara{{index}}.run -- type: depends_on - state: success - parent_action: sahara-keystone{{index}}.run - depend_action: sahara{{index}}.run -- type: depends_on - state: success - parent_action: openstack-network-end{{index}}.run - depend_action: sahara{{index}}.run -- type: depends_on - state: success - parent_action: sahara-db{{index}}.run - depend_action: sahara{{index}}.run -- type: depends_on - state: success - parent_action: horizon{{index}}.run - depend_action: sahara{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: ceph-radosgw{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: api-proxy{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: murano{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: sahara{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: ceph-mon{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: swift{{index}}.run - depend_action: controller_remaining_tasks{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: vmware-vcenter{{index}}.run -- type: depends_on - state: success - parent_action: controller_remaining_tasks{{index}}.run - depend_action: vmware-vcenter{{index}}.run diff --git a/f2s/vrs/primary-mongo.yml b/f2s/vrs/primary-mongo.yml deleted file mode 100644 index 78d78a3a..00000000 --- a/f2s/vrs/primary-mongo.yml +++ /dev/null @@ -1,143 +0,0 @@ -id: primary-mongo -resources: -- id: role_data{{index}} - from: f2s/resources/role_data - location: '{{node}}' - values: - env: '{{env}}' - puppet_modules: /etc/puppet/modules - uid: '{{index}}' - puppet_modules: '/etc/puppet/modules' -- id: fuel_pkgs{{index}} - from: f2s/resources/fuel_pkgs - location: '{{node}}' - values_from: role_data{{index}} -- id: logging{{index}} - from: f2s/resources/logging - location: '{{node}}' - values_from: role_data{{index}} -- id: tools{{index}} - from: f2s/resources/tools - location: '{{node}}' - values_from: role_data{{index}} -- id: netconfig{{index}} - from: f2s/resources/netconfig - location: '{{node}}' - values_from: role_data{{index}} -- id: connectivity_tests{{index}} - from: f2s/resources/connectivity_tests - location: '{{node}}' - values_from: role_data{{index}} -- id: firewall{{index}} - from: f2s/resources/firewall - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-keys-saving{{index}} - from: f2s/resources/ssl-keys-saving - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-add-trust-chain{{index}} - from: f2s/resources/ssl-add-trust-chain - location: '{{node}}' - values_from: role_data{{index}} -- id: hosts{{index}} - from: f2s/resources/hosts - location: '{{node}}' - values_from: role_data{{index}} -- id: top-role-primary-mongo{{index}} - from: f2s/resources/top-role-primary-mongo - location: '{{node}}' - values_from: role_data{{index}} -events: -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: fuel_pkgs{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: logging{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: logging{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: firewall{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: ssl-keys-saving{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: connectivity_tests{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: ssl-add-trust-chain{{index}}.run - depend_action: hosts{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: top-role-primary-mongo{{index}}.run -- type: depends_on - state: success - parent_action: firewall{{index}}.run - depend_action: top-role-primary-mongo{{index}}.run -- type: depends_on - state: success - parent_action: hosts{{index}}.run - depend_action: top-role-primary-mongo{{index}}.run diff --git a/f2s/vrs/virt.yml b/f2s/vrs/virt.yml deleted file mode 100644 index f23090c2..00000000 --- a/f2s/vrs/virt.yml +++ /dev/null @@ -1,101 +0,0 @@ -id: virt -resources: -- id: role_data{{index}} - from: f2s/resources/role_data - location: '{{node}}' - values: - env: '{{env}}' - puppet_modules: /etc/puppet/modules - uid: '{{index}}' - puppet_modules: '/etc/puppet/modules' -- id: ssl-keys-saving{{index}} - from: f2s/resources/ssl-keys-saving - location: '{{node}}' - values_from: role_data{{index}} -- id: ssl-add-trust-chain{{index}} - from: f2s/resources/ssl-add-trust-chain - location: '{{node}}' - values_from: role_data{{index}} -- id: logging{{index}} - from: f2s/resources/logging - location: '{{node}}' - values_from: role_data{{index}} -- id: tools{{index}} - from: f2s/resources/tools - location: '{{node}}' - values_from: role_data{{index}} -- id: netconfig{{index}} - from: f2s/resources/netconfig - location: '{{node}}' - values_from: role_data{{index}} -- id: generate_vms{{index}} - from: f2s/resources/generate_vms - location: '{{node}}' - values_from: role_data{{index}} -- id: connectivity_tests{{index}} - from: f2s/resources/connectivity_tests - location: '{{node}}' - values_from: role_data{{index}} -events: -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=firewall - depend_action: ssl-keys-saving{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: ssl-keys-saving{{index}}.run - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent: - action: run - with_tags: - - resource=firewall - depend_action: ssl-add-trust-chain{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: logging{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: logging{{index}}.run - depend_action: tools{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: tools{{index}}.run - depend_action: netconfig{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: generate_vms{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: generate_vms{{index}}.run -- type: depends_on - state: success - parent_action: role_data{{index}}.run - depend_action: connectivity_tests{{index}}.run -- type: depends_on - state: success - parent_action: netconfig{{index}}.run - depend_action: connectivity_tests{{index}}.run From 3c1e3b0b03cbef97bbfa3cbe5fab8c101c07ca91 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 1 Dec 2015 15:49:07 +0200 Subject: [PATCH 51/51] Remove all f2s specific software from Dockerfile --- Dockerfile | 9 --------- docker-compose.yml | 2 +- solar/core/resource/resource.py | 2 +- 3 files changed, 2 insertions(+), 11 deletions(-) diff --git a/Dockerfile b/Dockerfile index 8afcb984..a495649c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,22 +8,13 @@ ADD templates /templates ADD run.sh /run.sh RUN apt-get update -# Install pip's dependency: setuptools: RUN apt-get install -y python python-dev python-distribute python-pip \ libyaml-dev vim libffi-dev libssl-dev RUN pip install ansible -RUN apt-get install -y libffi-dev libssl-dev - RUN pip install https://github.com/Mirantis/solar/archive/master.zip RUN pip install https://github.com/Mirantis/solar-agent/archive/master.zip RUN ansible-playbook -v -i "localhost," -c local /celery.yaml --tags install -RUN pip install -U setuptools>=17.1 -RUN pip install -U python-fuelclient -RUN apt-get install -y puppet -RUN gem install hiera -RUN mkdir -p /etc/puppet/hieradata/ - CMD ["/run.sh"] diff --git a/docker-compose.yml b/docker-compose.yml index f5ca2933..1317fdb8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,4 @@ -solar: +solar-celery: image: solarproject/solar-celery # path inside of the container should be exactly the same as outside # because solar uses absolute path to find resoruce actions files diff --git a/solar/core/resource/resource.py b/solar/core/resource/resource.py index 71822222..6f24a88d 100644 --- a/solar/core/resource/resource.py +++ b/solar/core/resource/resource.py @@ -235,7 +235,7 @@ class Resource(object): def resource_inputs(self): return self.db_obj.inputs - def to_dict(self, inputs=True): + def to_dict(self, inputs=False): ret = self.db_obj.to_dict() if inputs: ret['inputs'] = self.db_obj.inputs.as_dict()