From 9e76ddd22009608c8199dd5b603b37f7e08643d6 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Thu, 16 Jul 2015 17:41:55 +0300 Subject: [PATCH 01/86] Stop execution by marking all tasks SKIPPED --- solar/solar/cli/orch.py | 19 ++++++++++++++----- solar/solar/orchestration/graph.py | 8 -------- solar/solar/orchestration/tasks.py | 14 +++++++++++--- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/solar/solar/cli/orch.py b/solar/solar/cli/orch.py index 74c46ef6..16898a36 100644 --- a/solar/solar/cli/orch.py +++ b/solar/solar/cli/orch.py @@ -36,10 +36,11 @@ def update(uid, plan): @click.argument('uid') def report(uid): colors = { - 'PENDING': 'blue', + 'PENDING': 'cyan', 'ERROR': 'red', 'SUCCESS': 'green', - 'INPROGRESS': 'yellow'} + 'INPROGRESS': 'yellow', + 'SKIPPED': 'blue'} report = graph.report_topo(uid) for item in report: @@ -78,7 +79,14 @@ def stop(uid): # using revoke(terminate=True) will lead to inability to restart execution # research possibility of customizations of # app.control and Panel.register in celery - graph.soft_stop(uid) + tasks.soft_stop.apply_async(args=[uid], queue='scheduler') + + +@orchestration.command() +@click.argument('uid') +def resume(uid): + graph.reset(uid, ['SKIPPED']) + tasks.schedule_start.apply_async(args=[uid], queue='scheduler') @orchestration.command() @@ -94,10 +102,11 @@ def dg(uid): plan = graph.get_graph(uid) colors = { - 'PENDING': 'blue', + 'PENDING': 'cyan', 'ERROR': 'red', 'SUCCESS': 'green', - 'INPROGRESS': 'yellow'} + 'INPROGRESS': 'yellow', + 'SKIPPED': 'blue'} for n in plan: color = colors[plan.node[n]['status']] diff --git a/solar/solar/orchestration/graph.py b/solar/solar/orchestration/graph.py index d7342e23..d71b5404 100644 --- a/solar/solar/orchestration/graph.py +++ b/solar/solar/orchestration/graph.py @@ -78,14 +78,6 @@ def reset(uid, states=None): save_graph(uid, dg) -def soft_stop(uid): - """Graph will stop when all currently inprogress tasks will be finished - """ - dg = get_graph(uid) - dg.graph['stop'] = True - save_graph(uid, dg) - - def report_topo(uid): dg = get_graph(uid) diff --git a/solar/solar/orchestration/tasks.py b/solar/solar/orchestration/tasks.py index 49794ac9..3478b92a 100644 --- a/solar/solar/orchestration/tasks.py +++ b/solar/solar/orchestration/tasks.py @@ -118,10 +118,18 @@ def schedule_start(plan_uid, start=None, end=None): - apply different policies to tasks """ dg = graph.get_graph(plan_uid) - dg.graph['stop'] = False schedule(plan_uid, dg) +@app.task +def soft_stop(plan_uid): + dg = graph.get_graph(plan_uid) + for n in dg: + if dg.node[n]['status'] == 'PENDING': + dg.node[n]['status'] = 'SKIPPED' + graph.save_graph(plan_uid, dg) + + @app.task def schedule_next(task_id, status, errmsg=None): plan_uid, task_name = task_id.rsplit(':', 1) @@ -149,7 +157,7 @@ def traverse(dg): visited = set() for node in dg: data = dg.node[node] - if data['status'] not in ('PENDING', 'INPROGRESS'): + if data['status'] not in ('PENDING', 'INPROGRESS', 'SKIPPED'): visited.add(node) for node in dg: @@ -157,7 +165,7 @@ def traverse(dg): if node in visited: continue - elif data['status'] == 'INPROGRESS': + elif data['status'] in ('INPROGRESS', 'SKIPPED'): continue predecessors = set(dg.predecessors(node)) From a3ec8ce8533ee88d517454554ebc9d9504c30e3c Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 13 Jul 2015 17:03:05 +0300 Subject: [PATCH 02/86] Add ordered redis data structure to store system log - keeps order in which items are inserted - allow to update certain item by uid - provides FIFO interface for collection --- solar/solar/interfaces/db/redis_db.py | 48 +++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/solar/solar/interfaces/db/redis_db.py b/solar/solar/interfaces/db/redis_db.py index ad4730fa..511950de 100644 --- a/solar/solar/interfaces/db/redis_db.py +++ b/solar/solar/interfaces/db/redis_db.py @@ -67,6 +67,9 @@ class RedisDB(object): def clear(self): self._r.flushdb() + def get_set(self, collection): + return OrderedSet(self._r, collection) + def clear_collection(self, collection=COLLECTIONS.resource): key_glob = self._make_key(collection, '*') @@ -83,6 +86,51 @@ class RedisDB(object): return '{0}:{1}'.format(collection, _id) +class OrderedSet(object): + + def __init__(self, client, collection): + self.r = client + self.collection = collection + self.order_counter = '{}:incr'.format(collection) + self.order = '{}:order'.format(collection) + + def add(self, items): + pipe = self.r.pipeline() + for key, value in items: + count = self.r.incr(self.order_counter) + pipe.zadd(self.order, count, key) + pipe.hset(self.collection, key, json.dumps(value)) + pipe.execute() + + def rem(self, keys): + pipe = self.r.pipeline() + for key in keys: + pipe.zrem(self.order, key) + pipe.hdel(self.collection, key) + pipe.execute() + + def get(self, key): + value = self.r.hget(self.collection, key) + if value: + return json.loads(value) + return None + + def update(self, key, value): + self.r.hset(self.collection, key, json.dumps(value)) + + def clean(self): + self.rem(self.r.zrange(self.order, 0, -1)) + + def rem_left(self, n=1): + self.rem(r.zrevrange(self.order, 0, n-1)) + + def get_left(self, n=1): + result = [] + for key in self.r.zrevrange(self.order, 0, n-1): + result.append(self.get(key)) + return result + + class FakeRedisDB(RedisDB): REDIS_CLIENT = fakeredis.FakeStrictRedis From 3c0e8b1e1a2c76730a080c281ba1ba11e01f0e02 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 13 Jul 2015 17:24:47 +0300 Subject: [PATCH 03/86] Add system_log module - refactor stage/commit procedure - integrate stage/commit with solar using celery --- solar/solar/operations.py | 197 --------------------------- solar/solar/state.py | 152 --------------------- solar/solar/system_log/__init__.py | 0 solar/solar/system_log/change.py | 94 +++++++++++++ solar/solar/system_log/data.py | 120 ++++++++++++++++ solar/solar/system_log/operations.py | 21 +++ solar/solar/system_log/signals.py | 21 +++ solar/solar/system_log/tasks.py | 16 +++ 8 files changed, 272 insertions(+), 349 deletions(-) delete mode 100644 solar/solar/operations.py delete mode 100644 solar/solar/state.py create mode 100644 solar/solar/system_log/__init__.py create mode 100644 solar/solar/system_log/change.py create mode 100644 solar/solar/system_log/data.py create mode 100644 solar/solar/system_log/operations.py create mode 100644 solar/solar/system_log/signals.py create mode 100644 solar/solar/system_log/tasks.py diff --git a/solar/solar/operations.py b/solar/solar/operations.py deleted file mode 100644 index 6bf91051..00000000 --- a/solar/solar/operations.py +++ /dev/null @@ -1,197 +0,0 @@ - - -from solar import state -from solar.core.log import log -from solar.core import signals -from solar.core import resource -from solar import utils -from solar.interfaces.db import get_db -from solar.core import actions - -db = get_db() - -from dictdiffer import diff, patch, revert -from fabric import api as fabric_api -import networkx as nx - - -def guess_action(from_, to): - # TODO(dshulyak) it should be more flexible - if not from_: - return 'run' - elif not to: - return 'remove' - else: - # it should be update - return 'update' - - -def connections(res, graph): - result = [] - for pred in graph.predecessors(res.name): - for num, edge in graph.get_edge_data(pred, res.name).items(): - if 'label' in edge: - if ':' in edge['label']: - parent, child = edge['label'].split(':') - mapping = [parent, child] - else: - mapping = [edge['label'], edge['label']] - else: - mapping = None - result.append([pred, res.name, mapping]) - return result - - -def to_dict(resource, graph): - res = resource.to_dict() - res['connections'] = connections(resource, graph) - return res - - -def create_diff(staged, commited): - if 'connections' in commited: - commited['connections'].sort() - staged['connections'].sort() - if 'tags' in commited: - commited['tags'].sort() - staged['tags'].sort() - - return list(diff(commited, staged)) - - -def _stage_changes(staged_resources, conn_graph, - commited_resources, staged_log): - - try: - srt = nx.topological_sort(conn_graph) - except: - for cycle in nx.simple_cycles(conn_graph): - log.debug('CYCLE: %s', cycle) - raise - - for res_uid in srt: - commited_data = commited_resources.get(res_uid, {}) - staged_data = staged_resources.get(res_uid, {}) - - df = create_diff(staged_data, commited_data) - - if df: - - log_item = state.LogItem( - utils.generate_uuid(), - res_uid, - df, - guess_action(commited_data, staged_data)) - staged_log.append(log_item) - return staged_log - - -def stage_changes(): - conn_graph = signals.detailed_connection_graph() - staged = {r.name: to_dict(r, conn_graph) for r in resource.load_all().values()} - commited = state.CD() - log = state.SL() - log.delete() - return _stage_changes(staged, conn_graph, commited, log) - - -def execute(res, action): - try: - actions.resource_action(res, action) - return state.STATES.success - except Exception as e: - return state.STATES.error - - -def commit(li, resources, commited, history): - - staged_res = resources[li.res] - staged_data = patch(li.diff, commited.get(li.res, {})) - - # TODO(dshulyak) think about this hack for update - if li.action == 'update': - commited_res = resource.wrap_resource( - commited[li.res]['metadata']) - result_state = execute(commited_res, 'remove') - - staged_res.set_args_from_dict(staged_data['input']) - - if result_state is state.STATES.success: - result_state = execute(staged_res, 'run') - else: - result_state = execute(staged_res, li.action) - - # resource_action return None in case there is no actions - result_state = result_state or state.STATES.success - - commited[li.res] = staged_data - li.state = result_state - - history.append(li) - - if result_state is state.STATES.error: - raise Exception('Failed') - - -def commit_one(): - commited = state.CD() - history = state.CL() - staged = state.SL() - - resources = resource.load_all() - commit(staged.popleft(), resources, commited, history) - - -def commit_changes(): - # just shortcut to test stuff - commited = state.CD() - history = state.CL() - staged = state.SL() - resources = resource.load_all() - - while staged: - commit(staged.popleft(), resources, commited, history) - - -def rollback(log_item): - log = state.SL() - - resources = resource.load_all() - commited = state.CD()[log_item.res] - - staged = revert(log_item.diff, commited) - - for e, r, mapping in commited.get('connections', ()): - signals.disconnect(resources[e], resources[r]) - - for e, r, mapping in staged.get('connections', ()): - signals.connect(resources[e], resources[r], dict([mapping])) - - df = create_diff(staged, commited) - - log_item = state.LogItem( - utils.generate_uuid(), - log_item.res, df, guess_action(commited, staged)) - log.append(log_item) - - res = resource.load(log_item.res) - res.set_args_from_dict(staged['input']) - - return log_item - - -def rollback_uid(uid): - item = next(l for l in state.CL() if l.uid == uid) - return rollback(item) - - -def rollback_last(): - l = state.CL().items[-1] - return rollback(l) - - -def rollback_all(): - cl = state.CL() - - while cl: - rollback(cl.pop()) diff --git a/solar/solar/state.py b/solar/solar/state.py deleted file mode 100644 index d6c6d7c9..00000000 --- a/solar/solar/state.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import collections -from collections import deque -from functools import partial - -from solar import utils - -from enum import Enum - -from solar.interfaces.db import get_db - -db = get_db() - - -STATES = Enum('States', 'error inprogress pending success') - - -def state_file(name): - if 'log' in name: - return Log(name) - elif 'data' in name: - return Data(name) - - -CD = partial(state_file, 'commited_data') -SD = partial(state_file, 'staged_data') -SL = partial(state_file, 'stage_log') -IL = partial(state_file, 'inprogress_log') -CL = partial(state_file, 'commit_log') - - -class LogItem(object): - - def __init__(self, uid, res_uid, diff, action, state=None): - self.uid = uid - self.res = res_uid - self.diff = diff - self.state = state or STATES.pending - self.action = action - - def to_yaml(self): - return utils.yaml_dump(self.to_dict()) - - def to_dict(self): - return {'uid': self.uid, - 'res': self.res, - 'diff': self.diff, - 'state': self.state.name, - 'action': self.action} - - def __str__(self): - return self.to_yaml() - - def __repr__(self): - return self.to_yaml() - - -class Log(object): - - def __init__(self, path): - self.path = path - items = [] - r = db.read(path, collection=db.COLLECTIONS.state_log) - if r: - items = r or items - - self.items = deque([LogItem( - l['uid'], l['res'], - l['diff'], l['action'], - getattr(STATES, l['state'])) for l in items]) - - def delete(self): - self.items = deque() - db.delete(self.path, db.COLLECTIONS.state_log) - - def sync(self): - db.save( - self.path, - [i.to_dict() for i in self.items], - collection=db.COLLECTIONS.state_log - ) - - def append(self, logitem): - self.items.append(logitem) - self.sync() - - def popleft(self): - item = self.items.popleft() - self.sync() - return item - - def pop(self): - item = self.items.pop() - self.sync() - return item - - def show(self, verbose=False): - return ['L(uuid={0}, res={1}, action={2})'.format( - l.uid, l.res, l.action) for l in self.items] - - def __len__(self): - return len(self.items) - - def __repr__(self): - return 'Log({0})'.format(self.path) - - def __iter__(self): - return iter(self.items) - - def __nonzero__(self): - return bool(self.items) - - -class Data(collections.MutableMapping): - - def __init__(self, path): - self.path = path - self.store = {} - r = db.read(path, collection=db.COLLECTIONS.state_data) - if r: - self.store = r or self.store - - def __getitem__(self, key): - return self.store[key] - - def __setitem__(self, key, value): - self.store[key] = value - db.save(self.path, self.store, collection=db.COLLECTIONS.state_data) - - def __delitem__(self, key): - self.store.pop(key) - db.save(self.path, self.store, collection=db.COLLECTIONS.state_data) - - def __iter__(self): - return iter(self.store) - - def __len__(self): - return len(self.store) diff --git a/solar/solar/system_log/__init__.py b/solar/solar/system_log/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/solar/solar/system_log/change.py b/solar/solar/system_log/change.py new file mode 100644 index 00000000..7263b593 --- /dev/null +++ b/solar/solar/system_log/change.py @@ -0,0 +1,94 @@ + + +from dictdiffer import diff, patch, revert +import networkx as nx + +from solar.core.log import log +from solar.core import signals +from solar.core import resource +from solar import utils +from solar.interfaces.db import get_db +from solar.core import actions +from solar.system_log import data + +db = get_db() + + +def guess_action(from_, to): + # NOTE(dshulyak) imo the way to solve this - is dsl for orchestration, + # something where this action will be excplicitly specified + if not from_: + return 'run' + elif not to: + return 'remove' + else: + return 'update' + + +def connections(res, graph): + result = [] + for pred in graph.predecessors(res.name): + for num, edge in graph.get_edge_data(pred, res.name).items(): + if 'label' in edge: + if ':' in edge['label']: + parent, child = edge['label'].split(':') + mapping = [parent, child] + else: + mapping = [edge['label'], edge['label']] + else: + mapping = None + result.append([pred, res.name, mapping]) + return result + + +def to_dict(resource, graph): + res = resource.to_dict() + res['connections'] = connections(resource, graph) + return res + + +def create_diff(staged, commited): + if 'connections' in commited: + commited['connections'].sort() + staged['connections'].sort() + if 'tags' in commited: + commited['tags'].sort() + staged['tags'].sort() + + return list(diff(commited, staged)) + + +def _stage_changes(staged_resources, conn_graph, + commited_resources, staged_log): + + try: + srt = nx.topological_sort(conn_graph) + except: + for cycle in nx.simple_cycles(conn_graph): + log.debug('CYCLE: %s', cycle) + raise + + for res_uid in srt: + commited_data = commited_resources.get(res_uid, {}) + staged_data = staged_resources.get(res_uid, {}) + + df = create_diff(staged_data, commited_data) + + if df: + log_item = data.LogItem( + utils.generate_uuid(), + res_uid, + df, + guess_action(commited_data, staged_data)) + staged_log.append(log_item) + return staged_log + + +def stage_changes(): + conn_graph = signals.detailed_connection_graph() + staged = {r.name: to_dict(r, conn_graph) + for r in resource.load_all().values()} + commited = data.CD() + log = data.SL() + log.clean() + return _stage_changes(staged, conn_graph, commited, log) diff --git a/solar/solar/system_log/data.py b/solar/solar/system_log/data.py new file mode 100644 index 00000000..2565af2c --- /dev/null +++ b/solar/solar/system_log/data.py @@ -0,0 +1,120 @@ + +import os +import collections +from collections import deque +from functools import partial + +from solar import utils +from solar.interfaces.db import get_db + +from enum import Enum + + +db = get_db() + + +STATES = Enum('States', 'error inprogress pending success') + + +def state_file(name): + if 'log' in name: + return Log(name) + elif 'data' in name: + return Data(name) + + +CD = partial(state_file, 'commited_data') +SL = partial(state_file, 'stage_log') +CL = partial(state_file, 'commit_log') + + +class LogItem(object): + + def __init__(self, uid, res, diff, action, state=None): + self.uid = uid + self.res = res + self.diff = diff + self.state = state or STATES.pending + self.action = action + + def to_yaml(self): + return utils.yaml_dump(self.to_dict()) + + def to_dict(self): + return {'uid': self.uid, + 'res': self.res, + 'diff': self.diff, + 'state': self.state.name, + 'action': self.action} + + @classmethod + def from_dict(cls, **kwargs): + state = getattr(STATES, kwargs.get('state', ''), STATES.pending) + kwargs['state'] = state + return cls(**kwargs) + + def __str__(self): + return self.to_yaml() + + def __repr__(self): + return self.to_yaml() + + +class Log(object): + + def __init__(self, path): + self.ordered_log = db.get_set(path) + + def append(self, logitem): + self.ordered_log.add([(logitem.res, logitem.to_dict())]) + + def pop(self, uid): + item = self.get(uid) + self.ordered_log.rem([uid]) + return item + + def update(self, logitem): + self.ordered_log.update(logitem.res, logitem.to_dict()) + + def clean(self): + self.ordered_log.clean() + + def get(self, key): + item = self.ordered_log.get(key) + if item: + return LogItem.from_dict(**item) + return None + + def collection(self, n=0): + for item in self.ordered_log.get_left(n): + yield LogItem.from_dict(**item) + + def __iter__(self): + return iter(self.collection()) + + +class Data(collections.MutableMapping): + + def __init__(self, path): + self.path = path + self.store = {} + r = db.read(path, collection=db.COLLECTIONS.state_data) + if r: + self.store = r or self.store + + def __getitem__(self, key): + return self.store[key] + + def __setitem__(self, key, value): + self.store[key] = value + db.save(self.path, self.store, collection=db.COLLECTIONS.state_data) + + def __delitem__(self, key): + self.store.pop(key) + db.save(self.path, self.store, collection=db.COLLECTIONS.state_data) + + def __iter__(self): + return iter(self.store) + + def __len__(self): + return len(self.store) diff --git a/solar/solar/system_log/operations.py b/solar/solar/system_log/operations.py new file mode 100644 index 00000000..45f700c3 --- /dev/null +++ b/solar/solar/system_log/operations.py @@ -0,0 +1,21 @@ + + +from solar.system_log import data + + +def set_error(task_uuid, *args, **kwargs): + sl = data.SL() + item = sl.get(task_uuid) + if item: + item.state = data.STATES.error + sl.update(task_uuid, item) + + +def move_to_commited(task_uuid, *args, **kwargs): + sl = data.SL() + item = sl.get(task_uuid) + if item: + sl.rem(task_uuid) + cl = data.CL() + item.state = data.STATES.success + cl.append(item) diff --git a/solar/solar/system_log/signals.py b/solar/solar/system_log/signals.py new file mode 100644 index 00000000..cf595136 --- /dev/null +++ b/solar/solar/system_log/signals.py @@ -0,0 +1,21 @@ + +from celery.signals import task_failure, task_success + +from solar.system_log import operations +from solar.system_log import tasks + + +__all__ = ['system_log_on_task_error', 'system_log_on_task_success'] + + +@task_failure.connect +def system_log_on_task_error(sender, exception, traceback, einfo, *args, **kwargs): + task_id = kwargs.get('task_id') + if task_id: + tasks.error_logitem.apply_async(args=[task_id], queue='system_log') + +@task_success.connect +def system_log_on_task_success(sender, result, *args, **kwargs): + task_id = kwargs.get('task_id') + if task_id: + tasks.commit_logitem.apply_async(args=[task_id], queue='system_log') diff --git a/solar/solar/system_log/tasks.py b/solar/solar/system_log/tasks.py new file mode 100644 index 00000000..7f715bd7 --- /dev/null +++ b/solar/solar/system_log/tasks.py @@ -0,0 +1,16 @@ + + +from solar.orchestration.runner import app +from solar.system_log.operations import set_error, move_to_commited + +__all__ = ['error_logitem', 'commit_logitem'] + + +@app.task +def error_logitem(task_uuid): + return set_error(task_uuid.rsplit(':', 1)[-1]) + + +@app.task +def commit_logitem(task_uuid): + return move_to_commited(task_uuid.rsplit(':', 1)[-1]) From bd6286ca8422d39a008fca76feb9fab6c4396fd2 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 13 Jul 2015 17:28:13 +0300 Subject: [PATCH 04/86] Add discovery of tasks stored in separate modules --- celery.yml | 4 ++-- solar/solar/orchestration/runner.py | 18 ++++++++++++++++++ solar/solar/orchestration/tasks.py | 16 +++++----------- 3 files changed, 25 insertions(+), 13 deletions(-) create mode 100644 solar/solar/orchestration/runner.py diff --git a/celery.yml b/celery.yml index 4b5f9878..06980311 100644 --- a/celery.yml +++ b/celery.yml @@ -10,9 +10,9 @@ - shell: celery multi kill 2 chdir={{celery_dir}} tags: [stop] - - shell: celery multi start 2 -A solar.orchestration.tasks -Q:1 celery,scheduler -Q:2 celery,{{hostname.stdout}} + - shell: celery multi start 2 -A solar.orchestration.runner -Q:1 celery,scheduler -Q:2 celery,{{hostname.stdout}} chdir={{celery_dir}} tags: [master] - - shell: celery multi start 1 -A solar.orchestration.tasks -Q:1 celery,{{hostname.stdout}} + - shell: celery multi start 1 -A solar.orchestration.runner -Q:1 celery,{{hostname.stdout}} chdir={{celery_dir}} tags: [slave] \ No newline at end of file diff --git a/solar/solar/orchestration/runner.py b/solar/solar/orchestration/runner.py new file mode 100644 index 00000000..fcfe7f31 --- /dev/null +++ b/solar/solar/orchestration/runner.py @@ -0,0 +1,18 @@ + + +from celery import Celery + +app = Celery( + include=['solar.system_log.tasks', 'solar.orchestration.tasks'], + backend='redis://10.0.0.2:6379/1', + broker='redis://10.0.0.2:6379/1') +app.conf.update(CELERY_ACCEPT_CONTENT = ['json']) +app.conf.update(CELERY_TASK_SERIALIZER = 'json') + + +# NOTE(dshulyak) some autodiscovery system +# maybe https://github.com/mitsuhiko/pluginbase/ ? +from solar.system_log.signals import * +from solar.system_log.tasks import * +from solar.orchestration.tasks import * + diff --git a/solar/solar/orchestration/tasks.py b/solar/solar/orchestration/tasks.py index 49794ac9..a835a7b0 100644 --- a/solar/solar/orchestration/tasks.py +++ b/solar/solar/orchestration/tasks.py @@ -1,12 +1,9 @@ - - from functools import partial, wraps from itertools import islice import subprocess import time -from celery import Celery from celery.app import task from celery import group from celery.exceptions import Ignore @@ -15,18 +12,16 @@ import redis from solar.orchestration import graph from solar.core import actions from solar.core import resource +from solar.orchestration.runner import app -app = Celery( - 'tasks', - backend='redis://10.0.0.2:6379/1', - broker='redis://10.0.0.2:6379/1') -app.conf.update(CELERY_ACCEPT_CONTENT = ['json']) -app.conf.update(CELERY_TASK_SERIALIZER = 'json') - r = redis.StrictRedis(host='10.0.0.2', port=6379, db=1) +__all__ = ['solar_resource', 'cmd', 'sleep', + 'error', 'fault_tolerance', 'schedule_start', 'schedule_next'] + + class ReportTask(task.Task): def on_success(self, retval, task_id, args, kwargs): @@ -106,7 +101,6 @@ def anchor(ctxt, *args): def schedule(plan_uid, dg): next_tasks = list(traverse(dg)) graph.save_graph(plan_uid, dg) - print 'GRAPH {0}\n NEXT TASKS {1}'.format(dg.node, next_tasks) group(next_tasks)() From ef1d5a49e2663d0e0e877461a0f6d4e57931d4ec Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Mon, 13 Jul 2015 17:29:20 +0300 Subject: [PATCH 05/86] Move system log cli to separate module --- solar/solar/cli/main.py | 55 ++--------------------------------- solar/solar/cli/orch.py | 3 ++ solar/solar/cli/system_log.py | 47 ++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 52 deletions(-) create mode 100644 solar/solar/cli/system_log.py diff --git a/solar/solar/cli/main.py b/solar/solar/cli/main.py index 49f6bd85..d55df598 100644 --- a/solar/solar/cli/main.py +++ b/solar/solar/cli/main.py @@ -28,8 +28,6 @@ import tabulate import yaml from solar import utils -from solar import operations -from solar import state from solar.core import actions from solar.core import resource as sresource from solar.core.resource import assign_resources_to_nodes @@ -40,6 +38,7 @@ from solar.core.resource import virtual_resource as vr from solar.interfaces.db import get_db from solar.cli.orch import orchestration +from solar.cli.system_log import changes # NOTE: these are extensions, they shouldn't be imported here # Maybe each extension can also extend the CLI with parsers @@ -146,54 +145,6 @@ def init_actions(): actions.resource_action(resource_obj, action) -def init_changes(): - @main.group() - def changes(): - pass - - @changes.command() - def validate(): - errors = vr.validate_resources() - if errors: - for r, error in errors: - print 'ERROR: %s: %s' % (r.name, error) - sys.exit(1) - - @changes.command() - def stage(): - log = operations.stage_changes() - click.echo(log.show()) - - @changes.command() - @click.option('--one', is_flag=True, default=False) - def commit(one): - if one: - operations.commit_one() - else: - operations.commit_changes() - - @changes.command() - @click.option('--limit', default=5) - def history(limit): - click.echo(state.CL().show()) - - @changes.command() - @click.option('--last', is_flag=True, default=False) - @click.option('--all', is_flag=True, default=False) - @click.option('--uid', default=None) - def rollback(last, all, uid): - if last: - click.echo(operations.rollback_last()) - elif all: - click.echo(operations.rollback_all()) - elif uid: - click.echo(operations.rollback_uid(uid)) - - @changes.command() - def test(): - testing.test_all() - - def init_cli_connect(): @main.command() @click.argument('emitter') @@ -314,7 +265,7 @@ def init_cli_resource(): @resource.command() @click.argument('name') - @click.argument('base_path', type=click.Path(exists=True, file_okay=True)) + @click.argument('base_path', type=click.Path(exists=True)) @click.argument('args', nargs=-1) def create(args, base_path, name): args_parsed = {} @@ -425,13 +376,13 @@ def init_cli_resource(): def run(): init_actions() - init_changes() init_cli_connect() init_cli_connections() init_cli_deployment_config() init_cli_resource() main.add_command(orchestration) + main.add_command(changes) main() diff --git a/solar/solar/cli/orch.py b/solar/solar/cli/orch.py index 74c46ef6..6d78b702 100644 --- a/solar/solar/cli/orch.py +++ b/solar/solar/cli/orch.py @@ -21,17 +21,20 @@ def orchestration(): restart --reset """ + @orchestration.command() @click.argument('plan', type=click.File('rb')) def create(plan): click.echo(graph.create_plan(plan.read())) + @orchestration.command() @click.argument('uid') @click.argument('plan', type=click.File('rb')) def update(uid, plan): graph.update_plan(uid, plan.read()) + @orchestration.command() @click.argument('uid') def report(uid): diff --git a/solar/solar/cli/system_log.py b/solar/solar/cli/system_log.py new file mode 100644 index 00000000..1a4e03ec --- /dev/null +++ b/solar/solar/cli/system_log.py @@ -0,0 +1,47 @@ + +import sys + +import click + +from solar.core import testing +from solar.core import virtual_resource as vr +from solar.system_log import change +from solar.system_log import operations +from solar.system_log import data + + +@click.group() +def changes(): + pass + + +@changes.command() +def validate(): + errors = vr.validate_resources() + if errors: + for r, error in errors: + print 'ERROR: %s: %s' % (r.name, error) + sys.exit(1) + + +@changes.command() +def stage(): + log = change.stage_changes() + click.echo(list(log.collection())) + + +@changes.command() +@click.argument('uid') +def commit(uid): + operations.commit(uid) + + +@changes.command() +@click.option('-n', default=5) +def history(n): + click.echo(list(data.CL().collection(n))) + + +@changes.command() +def test(): + testing.test_all() From d1acbb88adca27fa59954d5066c274233b9c5cec Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 14 Jul 2015 12:52:16 +0300 Subject: [PATCH 06/86] Send events to modules from base task class --- solar/solar/orchestration/runner.py | 7 ------- solar/solar/orchestration/tasks.py | 6 +++++- solar/solar/system_log/signals.py | 21 --------------------- 3 files changed, 5 insertions(+), 29 deletions(-) delete mode 100644 solar/solar/system_log/signals.py diff --git a/solar/solar/orchestration/runner.py b/solar/solar/orchestration/runner.py index fcfe7f31..a98ea438 100644 --- a/solar/solar/orchestration/runner.py +++ b/solar/solar/orchestration/runner.py @@ -9,10 +9,3 @@ app = Celery( app.conf.update(CELERY_ACCEPT_CONTENT = ['json']) app.conf.update(CELERY_TASK_SERIALIZER = 'json') - -# NOTE(dshulyak) some autodiscovery system -# maybe https://github.com/mitsuhiko/pluginbase/ ? -from solar.system_log.signals import * -from solar.system_log.tasks import * -from solar.orchestration.tasks import * - diff --git a/solar/solar/orchestration/tasks.py b/solar/solar/orchestration/tasks.py index a835a7b0..16f4bc85 100644 --- a/solar/solar/orchestration/tasks.py +++ b/solar/solar/orchestration/tasks.py @@ -12,6 +12,7 @@ import redis from solar.orchestration import graph from solar.core import actions from solar.core import resource +from solar.system_log.tasks import commit_logitem, error_logitem from solar.orchestration.runner import app @@ -21,17 +22,20 @@ r = redis.StrictRedis(host='10.0.0.2', port=6379, db=1) __all__ = ['solar_resource', 'cmd', 'sleep', 'error', 'fault_tolerance', 'schedule_start', 'schedule_next'] - +# NOTE(dshulyak) i am not using celery.signals because it is not possible +# to extrace task_id from *task_success* signal class ReportTask(task.Task): def on_success(self, retval, task_id, args, kwargs): schedule_next.apply_async(args=[task_id, 'SUCCESS'], queue='scheduler') + commit_logitem.apply_async(args=[task_id], queue='system_log') def on_failure(self, exc, task_id, args, kwargs, einfo): schedule_next.apply_async( args=[task_id, 'ERROR'], kwargs={'errmsg': str(einfo.exception)}, queue='scheduler') + error_logitem.apply_async(args=[task_id], queue='system_log') report_task = partial(app.task, base=ReportTask, bind=True) diff --git a/solar/solar/system_log/signals.py b/solar/solar/system_log/signals.py deleted file mode 100644 index cf595136..00000000 --- a/solar/solar/system_log/signals.py +++ /dev/null @@ -1,21 +0,0 @@ - -from celery.signals import task_failure, task_success - -from solar.system_log import operations -from solar.system_log import tasks - - -__all__ = ['system_log_on_task_error', 'system_log_on_task_success'] - - -@task_failure.connect -def system_log_on_task_error(sender, exception, traceback, einfo, *args, **kwargs): - task_id = kwargs.get('task_id') - if task_id: - tasks.error_logitem.apply_async(args=[task_id], queue='system_log') - -@task_success.connect -def system_log_on_task_success(sender, result, *args, **kwargs): - task_id = kwargs.get('task_id') - if task_id: - tasks.commit_logitem.apply_async(args=[task_id], queue='system_log') From 6f750820a82891f4a8164bdce518f018b631a6b9 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 14 Jul 2015 12:52:51 +0300 Subject: [PATCH 07/86] Add plan creation based on resources topological sort --- solar/solar/cli/system_log.py | 5 ++++ solar/solar/orchestration/graph.py | 10 +++++--- solar/solar/system_log/change.py | 37 ++++++++++++++++++++++++++++-- 3 files changed, 47 insertions(+), 5 deletions(-) diff --git a/solar/solar/cli/system_log.py b/solar/solar/cli/system_log.py index 1a4e03ec..46a5e9b5 100644 --- a/solar/solar/cli/system_log.py +++ b/solar/solar/cli/system_log.py @@ -30,6 +30,11 @@ def stage(): click.echo(list(log.collection())) +@changes.command() +def send(): + click.echo(change.send_to_orchestration()) + + @changes.command() @click.argument('uid') def commit(uid): diff --git a/solar/solar/orchestration/graph.py b/solar/solar/orchestration/graph.py index d7342e23..443b838e 100644 --- a/solar/solar/orchestration/graph.py +++ b/solar/solar/orchestration/graph.py @@ -47,13 +47,17 @@ def parse_plan(plan_data): return dg +def create_plan_from_graph(dg): + dg.graph['uid'] = "{0}:{1}".format(dg.graph['name'], str(uuid.uuid4())) + save_graph(dg.graph['uid'], dg) + return dg.graph['uid'] + + def create_plan(plan_data): """ """ dg = parse_plan(plan_data) - dg.graph['uid'] = "{0}:{1}".format(dg.graph['name'], str(uuid.uuid4())) - save_graph(dg.graph['uid'], dg) - return dg.graph['uid'] + return create_plan_from_graph(dg) def update_plan(uid, plan_data): diff --git a/solar/solar/system_log/change.py b/solar/solar/system_log/change.py index 7263b593..aef1105a 100644 --- a/solar/solar/system_log/change.py +++ b/solar/solar/system_log/change.py @@ -10,6 +10,7 @@ from solar import utils from solar.interfaces.db import get_db from solar.core import actions from solar.system_log import data +from solar.orchestration import graph db = get_db() @@ -85,10 +86,42 @@ def _stage_changes(staged_resources, conn_graph, def stage_changes(): + log = data.SL() + log.clean() conn_graph = signals.detailed_connection_graph() staged = {r.name: to_dict(r, conn_graph) for r in resource.load_all().values()} commited = data.CD() - log = data.SL() - log.clean() return _stage_changes(staged, conn_graph, commited, log) + + +def send_to_orchestration(execute=False): + conn_graph = signals.detailed_connection_graph() + dg = nx.DiGraph() + staged = {r.name: to_dict(r, conn_graph) + for r in resource.load_all().values()} + commited = data.CD() + + for res_uid in conn_graph: + commited_data = commited.get(res_uid, {}) + staged_data = staged.get(res_uid, {}) + + df = create_diff(staged_data, commited_data) + + if df: + dg.add_node( + res_uid, status='PENDING', + errmsg=None, + **parameters(res_uid, guess_action(commited_data, staged_data))) + + dg.add_path(nx.topological_sort(conn_graph)) + # what it should be? + dg.graph['name'] = 'system_log' + return graph.create_plan_from_graph(dg) + + +def parameters(res, action): + return { + 'parameters': {'args': [res, action], + 'type': 'solar_resource'} + } From 5c0561ee0b8cf68b38833692c9c973bc60574e9d Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 14 Jul 2015 15:18:43 +0300 Subject: [PATCH 08/86] Add show command for a plan --- solar/solar/cli/orch.py | 5 +++++ solar/solar/orchestration/graph.py | 20 ++++++++++++++++++++ solar/solar/system_log/change.py | 6 ++---- 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/solar/solar/cli/orch.py b/solar/solar/cli/orch.py index 6d78b702..b8ed3e12 100644 --- a/solar/solar/cli/orch.py +++ b/solar/solar/cli/orch.py @@ -107,3 +107,8 @@ def dg(uid): plan.node[n]['color'] = color nx.write_dot(plan, 'graph.dot') subprocess.call(['dot', '-Tpng', 'graph.dot', '-o', 'graph.png']) + +@orchestration.command() +@click.argument('uid') +def show(uid): + click.echo(graph.show(uid)) diff --git a/solar/solar/orchestration/graph.py b/solar/solar/orchestration/graph.py index 443b838e..4913ab34 100644 --- a/solar/solar/orchestration/graph.py +++ b/solar/solar/orchestration/graph.py @@ -7,6 +7,8 @@ import networkx as nx import redis import yaml +from solar import utils + r = redis.StrictRedis(host='10.0.0.2', port=6379, db=1) @@ -53,6 +55,24 @@ def create_plan_from_graph(dg): return dg.graph['uid'] +def show(uid): + dg = get_graph(uid) + result = {} + tasks = [] + result['uid'] = dg.graph['uid'] + result['name'] = dg.graph['name'] + for n in nx.topological_sort(dg): + data = dg.node[n] + tasks.append( + {'uid': n, + 'parameters': data, + 'before': dg.successors(n), + 'after': dg.predecessors(n) + }) + result['tasks'] = tasks + return utils.yaml_dump(result) + + def create_plan(plan_data): """ """ diff --git a/solar/solar/system_log/change.py b/solar/solar/system_log/change.py index aef1105a..f80f664f 100644 --- a/solar/solar/system_log/change.py +++ b/solar/solar/system_log/change.py @@ -121,7 +121,5 @@ def send_to_orchestration(execute=False): def parameters(res, action): - return { - 'parameters': {'args': [res, action], - 'type': 'solar_resource'} - } + return {'args': [res, action], + 'type': 'solar_resource'} From 9a83dbf2d1fc932703707e1cf1e5763035e8f169 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 14 Jul 2015 17:00:10 +0300 Subject: [PATCH 09/86] Add several helpers for cleanup and fixes --- solar/solar/cli/system_log.py | 8 +++++++- solar/solar/system_log/change.py | 4 +++- solar/solar/system_log/data.py | 5 +++++ solar/solar/system_log/operations.py | 7 +++++-- 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/solar/solar/cli/system_log.py b/solar/solar/cli/system_log.py index 46a5e9b5..db171265 100644 --- a/solar/solar/cli/system_log.py +++ b/solar/solar/cli/system_log.py @@ -31,7 +31,7 @@ def stage(): @changes.command() -def send(): +def process(): click.echo(change.send_to_orchestration()) @@ -50,3 +50,9 @@ def history(n): @changes.command() def test(): testing.test_all() + + +@changes.command(name='clean-history') +def clean_history(): + data.CL().clean() + data.CD().clean() diff --git a/solar/solar/system_log/change.py b/solar/solar/system_log/change.py index f80f664f..b0a814e5 100644 --- a/solar/solar/system_log/change.py +++ b/solar/solar/system_log/change.py @@ -55,7 +55,9 @@ def create_diff(staged, commited): if 'tags' in commited: commited['tags'].sort() staged['tags'].sort() - + if 'tags' in commited.get('metadata', {}): + commited['metadata']['tags'].sort() + staged['metadata']['tags'].sort() return list(diff(commited, staged)) diff --git a/solar/solar/system_log/data.py b/solar/solar/system_log/data.py index 2565af2c..2e5e654e 100644 --- a/solar/solar/system_log/data.py +++ b/solar/solar/system_log/data.py @@ -70,6 +70,8 @@ class Log(object): def pop(self, uid): item = self.get(uid) + if not item: + return None self.ordered_log.rem([uid]) return item @@ -118,3 +120,6 @@ class Data(collections.MutableMapping): def __len__(self): return len(self.store) + + def clean(self): + db.save(self.path, {}, collection=db.COLLECTIONS.state_data) diff --git a/solar/solar/system_log/operations.py b/solar/solar/system_log/operations.py index 45f700c3..07f32053 100644 --- a/solar/solar/system_log/operations.py +++ b/solar/solar/system_log/operations.py @@ -1,6 +1,7 @@ from solar.system_log import data +from dictdiffer import patch def set_error(task_uuid, *args, **kwargs): @@ -13,9 +14,11 @@ def set_error(task_uuid, *args, **kwargs): def move_to_commited(task_uuid, *args, **kwargs): sl = data.SL() - item = sl.get(task_uuid) + item = sl.pop(task_uuid) if item: - sl.rem(task_uuid) + commited = data.CD() + staged_data = patch(item.diff, commited.get(item.res, {})) cl = data.CL() item.state = data.STATES.success cl.append(item) + commited[item.res] = staged_data From c36bdf5f5bcb1d2903aa01464c26dae2d7ffeca1 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 15 Jul 2015 10:40:19 +0300 Subject: [PATCH 10/86] Improve ordering for stage/commit log --- solar/solar/cli/orch.py | 1 + solar/solar/cli/system_log.py | 12 ++++++++++-- solar/solar/interfaces/db/redis_db.py | 8 +++++++- solar/solar/system_log/data.py | 6 +++++- 4 files changed, 23 insertions(+), 4 deletions(-) diff --git a/solar/solar/cli/orch.py b/solar/solar/cli/orch.py index b8ed3e12..e809c403 100644 --- a/solar/solar/cli/orch.py +++ b/solar/solar/cli/orch.py @@ -108,6 +108,7 @@ def dg(uid): nx.write_dot(plan, 'graph.dot') subprocess.call(['dot', '-Tpng', 'graph.dot', '-o', 'graph.png']) + @orchestration.command() @click.argument('uid') def show(uid): diff --git a/solar/solar/cli/system_log.py b/solar/solar/cli/system_log.py index db171265..70ff5ca8 100644 --- a/solar/solar/cli/system_log.py +++ b/solar/solar/cli/system_log.py @@ -27,7 +27,10 @@ def validate(): @changes.command() def stage(): log = change.stage_changes() - click.echo(list(log.collection())) + staged = list(log.reverse()) + if not staged: + click.echo('No changes') + click.echo(staged) @changes.command() @@ -44,7 +47,12 @@ def commit(uid): @changes.command() @click.option('-n', default=5) def history(n): - click.echo(list(data.CL().collection(n))) + commited = list(data.CL().collection(n)) + if not commited: + click.echo('No history.') + return + commited.reverse() + click.echo(commited) @changes.command() diff --git a/solar/solar/interfaces/db/redis_db.py b/solar/solar/interfaces/db/redis_db.py index 511950de..eaea121c 100644 --- a/solar/solar/interfaces/db/redis_db.py +++ b/solar/solar/interfaces/db/redis_db.py @@ -124,12 +124,18 @@ class OrderedSet(object): def rem_left(self, n=1): self.rem(r.zrevrange(self.order, 0, n-1)) - def get_left(self, n=1): + def reverse(self, n=1): result = [] for key in self.r.zrevrange(self.order, 0, n-1): result.append(self.get(key)) return result + def list(self, n=0): + result = [] + for key in self.r.zrange(self.order, 0, n-1): + result.append(self.get(key)) + return result + class FakeRedisDB(RedisDB): diff --git a/solar/solar/system_log/data.py b/solar/solar/system_log/data.py index 2e5e654e..0c99fdce 100644 --- a/solar/solar/system_log/data.py +++ b/solar/solar/system_log/data.py @@ -88,7 +88,11 @@ class Log(object): return None def collection(self, n=0): - for item in self.ordered_log.get_left(n): + for item in self.ordered_log.reverse(n=n): + yield LogItem.from_dict(**item) + + def reverse(self, n=0): + for item in self.ordered_log.list(n=n): yield LogItem.from_dict(**item) def __iter__(self): From 7e7b46614cc3af0e00aa14416eb011108b19d44c Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 15 Jul 2015 10:48:50 +0300 Subject: [PATCH 11/86] Simplify information stored for history We are not making use of connections/tags in system log, therefore they should be removed and added only when necessary --- solar/solar/system_log/change.py | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/solar/solar/system_log/change.py b/solar/solar/system_log/change.py index b0a814e5..e64d8e79 100644 --- a/solar/solar/system_log/change.py +++ b/solar/solar/system_log/change.py @@ -42,22 +42,7 @@ def connections(res, graph): return result -def to_dict(resource, graph): - res = resource.to_dict() - res['connections'] = connections(resource, graph) - return res - - def create_diff(staged, commited): - if 'connections' in commited: - commited['connections'].sort() - staged['connections'].sort() - if 'tags' in commited: - commited['tags'].sort() - staged['tags'].sort() - if 'tags' in commited.get('metadata', {}): - commited['metadata']['tags'].sort() - staged['metadata']['tags'].sort() return list(diff(commited, staged)) @@ -91,7 +76,7 @@ def stage_changes(): log = data.SL() log.clean() conn_graph = signals.detailed_connection_graph() - staged = {r.name: to_dict(r, conn_graph) + staged = {r.name: r.args_show() for r in resource.load_all().values()} commited = data.CD() return _stage_changes(staged, conn_graph, commited, log) @@ -100,7 +85,7 @@ def stage_changes(): def send_to_orchestration(execute=False): conn_graph = signals.detailed_connection_graph() dg = nx.DiGraph() - staged = {r.name: to_dict(r, conn_graph) + staged = {r.name: r.args_show() for r in resource.load_all().values()} commited = data.CD() From ce4fd8c8de707a6719b22e0bf0cffc8d71b21d4b Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 15 Jul 2015 11:49:44 +0300 Subject: [PATCH 12/86] Prepare orchestration plan based on precedence scheduling Use transitive reduction for graph output, to reduce number of edges, and keep graph clean --- solar/solar/cli/orch.py | 7 +++++-- solar/solar/system_log/change.py | 5 +++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/solar/solar/cli/orch.py b/solar/solar/cli/orch.py index e809c403..b17a8a33 100644 --- a/solar/solar/cli/orch.py +++ b/solar/solar/cli/orch.py @@ -105,8 +105,11 @@ def dg(uid): for n in plan: color = colors[plan.node[n]['status']] plan.node[n]['color'] = color - nx.write_dot(plan, 'graph.dot') - subprocess.call(['dot', '-Tpng', 'graph.dot', '-o', 'graph.png']) + nx.write_dot(plan, '{name}.dot'.format(name=plan.graph['name'])) + subprocess.call( + 'tred {name}.dot | dot -Tpng -o {name}.png'.format(name=plan.graph['name']), + shell=True) + click.echo('Created {name}.png'.format(name=plan.graph['name'])) @orchestration.command() diff --git a/solar/solar/system_log/change.py b/solar/solar/system_log/change.py index e64d8e79..16588c46 100644 --- a/solar/solar/system_log/change.py +++ b/solar/solar/system_log/change.py @@ -82,7 +82,7 @@ def stage_changes(): return _stage_changes(staged, conn_graph, commited, log) -def send_to_orchestration(execute=False): +def send_to_orchestration(): conn_graph = signals.detailed_connection_graph() dg = nx.DiGraph() staged = {r.name: r.args_show() @@ -100,8 +100,9 @@ def send_to_orchestration(execute=False): res_uid, status='PENDING', errmsg=None, **parameters(res_uid, guess_action(commited_data, staged_data))) + for pred in conn_graph.predecessors(res_uid): + dg.add_edge(pred, res_uid) - dg.add_path(nx.topological_sort(conn_graph)) # what it should be? dg.graph['name'] = 'system_log' return graph.create_plan_from_graph(dg) From 732d7ce3d1efb5c2aad9bd93802f91a82ed022e2 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 15 Jul 2015 14:49:50 +0300 Subject: [PATCH 13/86] Limit celery workers --- celery.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/celery.yml b/celery.yml index 06980311..3874e2f2 100644 --- a/celery.yml +++ b/celery.yml @@ -10,9 +10,9 @@ - shell: celery multi kill 2 chdir={{celery_dir}} tags: [stop] - - shell: celery multi start 2 -A solar.orchestration.runner -Q:1 celery,scheduler -Q:2 celery,{{hostname.stdout}} + - shell: celery multi start 2 -A solar.orchestration.runner -Q:1 scheduler,system_log -Q:2 celery,{{hostname.stdout}} chdir={{celery_dir}} tags: [master] - - shell: celery multi start 1 -A solar.orchestration.runner -Q:1 celery,{{hostname.stdout}} + - shell: celery multi start 1 -A solar.orchestration.runner -Q:1 {{hostname.stdout}} chdir={{celery_dir}} - tags: [slave] \ No newline at end of file + tags: [slave] From b4a2f14ab7692f0a4268ea04b454136f9e29a4de Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 15 Jul 2015 14:50:27 +0300 Subject: [PATCH 14/86] Use absolute path for resources Catched an issue with orchestration that runs under another directory --- solar/solar/cli/main.py | 3 ++- solar/solar/core/resource/virtual_resource.py | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/solar/solar/cli/main.py b/solar/solar/cli/main.py index d55df598..e97a783e 100644 --- a/solar/solar/cli/main.py +++ b/solar/solar/cli/main.py @@ -265,7 +265,8 @@ def init_cli_resource(): @resource.command() @click.argument('name') - @click.argument('base_path', type=click.Path(exists=True)) + @click.argument( + 'base_path', type=click.Path(exists=True, resolve_path=True)) @click.argument('args', nargs=-1) def create(args, base_path, name): args_parsed = {} diff --git a/solar/solar/core/resource/virtual_resource.py b/solar/solar/core/resource/virtual_resource.py index 569e690c..8e2d3633 100644 --- a/solar/solar/core/resource/virtual_resource.py +++ b/solar/solar/core/resource/virtual_resource.py @@ -41,9 +41,11 @@ def create_virtual_resource(vr_name, template): resources = template['resources'] connections = [] created_resources = [] + + cwd = os.getcwd() for resource in resources: name = resource['id'] - base_path = resource['from'] + base_path = os.path.join(cwd, resource['from']) args = resource['values'] new_resources = create(name, base_path, args, vr_name) created_resources += new_resources From fe94d9df099bcf089054ef9ed4d775998a6a7460 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 15 Jul 2015 17:16:47 +0300 Subject: [PATCH 15/86] Prevent fabric from raising SystemExit in case of errors --- solar/solar/cli/main.py | 9 ++++++++- solar/solar/core/handlers/ansible_template.py | 15 +++++++++------ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/solar/solar/cli/main.py b/solar/solar/cli/main.py index e97a783e..8b4968a4 100644 --- a/solar/solar/cli/main.py +++ b/solar/solar/cli/main.py @@ -36,6 +36,8 @@ from solar.core.tags_set_parser import Expression from solar.core import testing from solar.core.resource import virtual_resource as vr from solar.interfaces.db import get_db +from solar import errors +from solar.core.log import log from solar.cli.orch import orchestration from solar.cli.system_log import changes @@ -240,8 +242,13 @@ def init_cli_resource(): click.echo( 'action {} for resource {}'.format(action, resource) ) - actions.resource_action(sresource.load(resource), action) + r = sresource.load(resource_name) + try: + actions.resource_action(r, action_name) + except errors.SolarError as e: + log.debug(e) + sys.exit(1) @resource.command() def compile_all(): diff --git a/solar/solar/core/handlers/ansible_template.py b/solar/solar/core/handlers/ansible_template.py index dc6e8d45..f90d9c6a 100644 --- a/solar/solar/core/handlers/ansible_template.py +++ b/solar/solar/core/handlers/ansible_template.py @@ -1,11 +1,16 @@ # -*- coding: utf-8 -*- from fabric import api as fabric_api +from fabric.state import env import os from solar.core.log import log from solar.core.handlers.base import TempFileHandler +from solar import errors +# otherwise fabric will sys.exit(1) in case of errors +env.warn_only = True + class AnsibleTemplate(TempFileHandler): def action(self, resource, action_name): inventory_file = self._create_inventory(resource) @@ -15,12 +20,10 @@ class AnsibleTemplate(TempFileHandler): call_args = ['ansible-playbook', '--module-path', '/vagrant/library', '-i', inventory_file, playbook_file] log.debug('EXECUTING: %s', ' '.join(call_args)) - try: - fabric_api.local(' '.join(call_args)) - except Exception as e: - log.error(e.output) - log.exception(e) - raise + out = fabric_api.local(' '.join(call_args), capture=True) + if out.failed: + raise errors.SolarError(out) + def _create_inventory(self, r): directory = self.dirs[r.name] From debb03975e8b647f27980081371bd9fdad7b292f Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Thu, 16 Jul 2015 12:43:33 +0300 Subject: [PATCH 16/86] Fix update of logitem bug in system_log --- solar/solar/system_log/operations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solar/solar/system_log/operations.py b/solar/solar/system_log/operations.py index 07f32053..82714837 100644 --- a/solar/solar/system_log/operations.py +++ b/solar/solar/system_log/operations.py @@ -9,7 +9,7 @@ def set_error(task_uuid, *args, **kwargs): item = sl.get(task_uuid) if item: item.state = data.STATES.error - sl.update(task_uuid, item) + sl.update(item) def move_to_commited(task_uuid, *args, **kwargs): From eee7c214a1ae0034ef2edc261a12ca2132ce482f Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Fri, 17 Jul 2015 16:10:15 +0300 Subject: [PATCH 17/86] Fix resource imports after rebase --- solar/solar/cli/system_log.py | 4 ++-- solar/solar/core/resource/__init__.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/solar/solar/cli/system_log.py b/solar/solar/cli/system_log.py index 70ff5ca8..896655f5 100644 --- a/solar/solar/cli/system_log.py +++ b/solar/solar/cli/system_log.py @@ -4,7 +4,7 @@ import sys import click from solar.core import testing -from solar.core import virtual_resource as vr +from solar.core import resource from solar.system_log import change from solar.system_log import operations from solar.system_log import data @@ -17,7 +17,7 @@ def changes(): @changes.command() def validate(): - errors = vr.validate_resources() + errors = resource.validate_resources() if errors: for r, error in errors: print 'ERROR: %s: %s' % (r.name, error) diff --git a/solar/solar/core/resource/__init__.py b/solar/solar/core/resource/__init__.py index bbc61314..2a53b59d 100644 --- a/solar/solar/core/resource/__init__.py +++ b/solar/solar/core/resource/__init__.py @@ -7,6 +7,7 @@ __all__ = [ 'load_all', 'prepare_meta', 'wrap_resource', + 'validate_resources', ] @@ -18,3 +19,4 @@ from solar.core.resource.resource import load_all from solar.core.resource.resource import wrap_resource from solar.core.resource.virtual_resource import create from solar.core.resource.virtual_resource import prepare_meta +from solar.core.resource.virtual_resource import validate_resources From d0388102c2abd81d12b1ddf1495ab0845867a1e5 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Fri, 17 Jul 2015 16:24:44 +0300 Subject: [PATCH 18/86] Fix variables names in action command --- solar/solar/cli/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/solar/solar/cli/main.py b/solar/solar/cli/main.py index 8b4968a4..71d3b36b 100644 --- a/solar/solar/cli/main.py +++ b/solar/solar/cli/main.py @@ -243,9 +243,9 @@ def init_cli_resource(): 'action {} for resource {}'.format(action, resource) ) - r = sresource.load(resource_name) + r = sresource.load(resource) try: - actions.resource_action(r, action_name) + actions.resource_action(r, action) except errors.SolarError as e: log.debug(e) sys.exit(1) From aa205c5d2ae50411e4ca6fe850f86660b3674dac Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Fri, 17 Jul 2015 17:36:48 +0300 Subject: [PATCH 19/86] Use detailed error codes to handle puppet errors --- solar/solar/core/handlers/puppet.py | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/solar/solar/core/handlers/puppet.py b/solar/solar/core/handlers/puppet.py index bd62eb86..a495d8f5 100644 --- a/solar/solar/core/handlers/puppet.py +++ b/solar/solar/core/handlers/puppet.py @@ -9,20 +9,7 @@ import os from solar.core.log import log from solar.core.handlers.base import TempFileHandler from solar.core.provider import GitProvider - - -# TODO: -# puppet wont always return 0 on error, example: -# http://unix.stackexchange.com/questions/165333/how-to-get-non-zero-exit-code-from-puppet-when-configuration-cannot-be-applied - -# in fuel there is special handler based on puppet summary, but i think we can also use --detailed-exitcode -# https://docs.puppetlabs.com/references/3.6.2/man/agent.html -# --detailed-exitcodes -# Provide transaction information via exit codes. If this is enabled, an exit -# code of '2' means there were changes, an exit code of '4' means there were -# failures during the transaction, and an exit code of '6' means there were -# both changes and failures. - +from solar import errors class ResourceSSHMixin(object): @@ -161,14 +148,19 @@ class Puppet(ResourceSSHMixin, TempFileHandler): self._scp_command(resource, action_file, '/tmp/action.pp') - self._ssh_command( + cmd = self._ssh_command( resource, - 'puppet', 'apply', '-vd', '/tmp/action.pp', + 'puppet', 'apply', '-vd', '/tmp/action.pp', '--detailed-exitcodes', env={ 'FACTER_resource_name': resource.name, }, use_sudo=True ) + # 0 - no changes, 2 - successfull changes + if cmd.return_code not in [0, 2]: + raise errors.SolarError( + 'Puppet for %s failed with %d', resource.name, cmd.return_code) + return cmd def clone_manifests(self, resource): git = resource.args['git'].value From 0dd513375f20d433c42546e958a20ccbf6eb9785 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Sat, 18 Jul 2015 02:21:34 +0200 Subject: [PATCH 20/86] Revert "Use detailed error codes to handle puppet errors" --- solar/solar/core/handlers/puppet.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/solar/solar/core/handlers/puppet.py b/solar/solar/core/handlers/puppet.py index a495d8f5..bd62eb86 100644 --- a/solar/solar/core/handlers/puppet.py +++ b/solar/solar/core/handlers/puppet.py @@ -9,7 +9,20 @@ import os from solar.core.log import log from solar.core.handlers.base import TempFileHandler from solar.core.provider import GitProvider -from solar import errors + + +# TODO: +# puppet wont always return 0 on error, example: +# http://unix.stackexchange.com/questions/165333/how-to-get-non-zero-exit-code-from-puppet-when-configuration-cannot-be-applied + +# in fuel there is special handler based on puppet summary, but i think we can also use --detailed-exitcode +# https://docs.puppetlabs.com/references/3.6.2/man/agent.html +# --detailed-exitcodes +# Provide transaction information via exit codes. If this is enabled, an exit +# code of '2' means there were changes, an exit code of '4' means there were +# failures during the transaction, and an exit code of '6' means there were +# both changes and failures. + class ResourceSSHMixin(object): @@ -148,19 +161,14 @@ class Puppet(ResourceSSHMixin, TempFileHandler): self._scp_command(resource, action_file, '/tmp/action.pp') - cmd = self._ssh_command( + self._ssh_command( resource, - 'puppet', 'apply', '-vd', '/tmp/action.pp', '--detailed-exitcodes', + 'puppet', 'apply', '-vd', '/tmp/action.pp', env={ 'FACTER_resource_name': resource.name, }, use_sudo=True ) - # 0 - no changes, 2 - successfull changes - if cmd.return_code not in [0, 2]: - raise errors.SolarError( - 'Puppet for %s failed with %d', resource.name, cmd.return_code) - return cmd def clone_manifests(self, resource): git = resource.args['git'].value From 6ffd408c4b6bac81b6fa04a1b02e4533e5ffab56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Fri, 17 Jul 2015 23:25:33 +0000 Subject: [PATCH 21/86] Keystone fixes use newer ansible_module fix admin_port connection --- example-puppet.py | 10 +-- library/keystone_service.py | 168 ++++++++++++++++++------------------ 2 files changed, 88 insertions(+), 90 deletions(-) diff --git a/example-puppet.py b/example-puppet.py index fee1dcee..72d4f431 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -118,9 +118,9 @@ def deploy(): signals.connect(node1, keystone_service_endpoint) signals.connect(keystone_puppet, keystone_service_endpoint, { 'admin_token': 'admin_token', - 'admin_port': 'keystone_admin_port', + 'admin_port': ['admin_port', 'keystone_admin_port'], 'ip': ['keystone_host', 'admin_ip', 'internal_ip', 'public_ip'], - 'port': ['admin_port', 'internal_port', 'public_port'], + 'port': ['internal_port', 'public_port'], }) signals.connect(keystone_puppet, admin_tenant) @@ -327,7 +327,7 @@ def undeploy(): 'neutron_keystone_role', 'neutron_keystone_user', 'services_tenant', - #'keystone_service_endpoint', + 'keystone_service_endpoint', 'admin_role', 'admin_user', 'admin_tenant', @@ -338,7 +338,7 @@ def undeploy(): 'mariadb_service1', 'openstack_rabbitmq_user', 'openstack_vhost', - 'rabbitmq1', + 'rabbitmq_service1', ] resources = map(resource.wrap_resource, db.get_list(collection=db.COLLECTIONS.resource)) @@ -376,7 +376,7 @@ def undeploy(): # actions.resource_action(resources['openstack_rabbitmq_user'], 'remove') # actions.resource_action(resources['openstack_vhost'], 'remove') - # actions.resource_action(resources['rabbitmq1'], 'remove') + # actions.resource_action(resources['rabbitmq_service1'], 'remove') db.clear() diff --git a/library/keystone_service.py b/library/keystone_service.py index ab0425c8..4298445a 100644 --- a/library/keystone_service.py +++ b/library/keystone_service.py @@ -1,8 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copied from: https://github.com/openstack-ansible/openstack-ansible-modules/blob/master/keystone_service - DOCUMENTATION = ''' --- module: keystone_service @@ -124,116 +122,116 @@ def get_endpoint(keystone, name): return endpoints[0] -def ensure_service_present(keystone, name, service_type, description, - check_mode): - """ Ensure the service is present and has the right values +def ensure_present(keystone, name, service_type, description, public_url, + internal_url, admin_url, region, check_mode): + """ Ensure the service and its endpoint are present and have the right values. - Returns a pair, where the first element is a boolean that indicates - a state change, and the second element is the service uuid, or None - if running in check mode""" + Returns a tuple, where the first element is a boolean that indicates + a state change, the second element is the service uuid (or None in + check mode), and the third element is the endpoint uuid (or None in + check mode).""" + # Fetch service and endpoint, if they exist. service = None - try: - service = get_service(keystone, name) - except: - # Service doesn't exist yet, we'll need to create one - pass - else: - # See if it matches exactly - if service.name == name and \ - service.type == service_type and \ - service.description == description: - - # Same, no changes needed - return (False, service.id) - - # At this point, we know we will need to make a change - if check_mode: - return (True, None) - - if service is None: - service = keystone.services.create(name=name, - service_type=service_type, - description=description) - return (True, service.id) - else: - msg = "keystone v2 API doesn't support updating services" - raise ValueError(msg) - - -def ensure_endpoint_present(keystone, name, public_url, internal_url, - admin_url, region, check_mode): - """ Ensure the service endpoint is present and have the right values - - Assumes the service object has already been created at this point""" - - service = get_service(keystone, name) endpoint = None - try: - endpoint = get_endpoint(keystone, name) - except: - # Endpoint doesn't exist yet, we'll need to create one - pass - else: - # See if it matches - if endpoint.publicurl == public_url and \ - endpoint.adminurl == admin_url and \ - endpoint.internalurl == internal_url and \ - endpoint.region == region: + try: service = get_service(keystone, name) + except: pass + try: endpoint = get_endpoint(keystone, name) + except: pass - # Same, no changes needed - return (False, endpoint.id) + changed = False - # At this point, we know we will need to make a change - if check_mode: - return (True, None) + # Delete endpoint if it exists and doesn't match. + if endpoint is not None: + identical = endpoint.publicurl == public_url and \ + endpoint.adminurl == admin_url and \ + endpoint.internalurl == internal_url and \ + endpoint.region == region + if not identical: + changed = True + ensure_endpoint_absent(keystone, name, check_mode) + endpoint = None + # Delete service and its endpoint if the service exists and doesn't match. + if service is not None: + identical = service.name == name and \ + service.type == service_type and \ + service.description == description + if not identical: + changed = True + ensure_endpoint_absent(keystone, name, check_mode) + endpoint = None + ensure_service_absent(keystone, name, check_mode) + service = None + + # Recreate service, if necessary. + if service is None: + if not check_mode: + service = keystone.services.create( + name=name, + service_type=service_type, + description=description, + ) + changed = True + + # Recreate endpoint, if necessary. if endpoint is None: - endpoint = keystone.endpoints.create(region=region, - service_id=service.id, - publicurl=public_url, - adminurl=admin_url, - internalurl=internal_url) - return (True, endpoint.id) - else: - msg = "keystone v2 API doesn't support updating endpoints" - raise ValueError(msg) + if not check_mode: + endpoint = keystone.endpoints.create( + region=region, + service_id=service.id, + publicurl=public_url, + adminurl=admin_url, + internalurl=internal_url, + ) + changed = True + + if check_mode: + # In check mode, the service/endpoint uuids will be the old uuids, + # so omit them. + return changed, None, None + return changed, service.id, endpoint.id def ensure_service_absent(keystone, name, check_mode): """ Ensure the service is absent""" + try: + service = get_service(keystone, name) + if not check_mode: + keystone.services.delete(service.id) + return True + except KeyError: + # Service doesn't exist, so we're done. + return False - service = get_service(keystone, name) - keystone.services.delete(service.id) - return True def ensure_endpoint_absent(keystone, name, check_mode): """ Ensure the service endpoint """ - endpoint = get_endpoint(keystone, name) - keystone.endpoints.delete(endpoint.id) - return True + try: + endpoint = get_endpoint(keystone, name) + if not check_mode: + keystone.endpoints.delete(endpoint.id) + return True + except KeyError: + # Endpoint doesn't exist, so we're done. + return False def dispatch(keystone, name, service_type, description, public_url, internal_url, admin_url, region, state, check_mode): if state == 'present': - (service_changed, service_id) = ensure_service_present(keystone, - name, - service_type, - description, - check_mode) - - (endpoint_changed, endpoint_id) = ensure_endpoint_present( + (changed, service_id, endpoint_id) = ensure_present( keystone, name, + service_type, + description, public_url, internal_url, admin_url, region, - check_mode) - return dict(changed=service_changed or endpoint_changed, - service_id=service_id, - endpoint_id=endpoint_id) + check_mode, + ) + return dict(changed=changed, service_id=service_id, endpoint_id=endpoint_id) elif state == 'absent': endpoint_changed = ensure_endpoint_absent(keystone, name, check_mode) service_changed = ensure_service_absent(keystone, name, check_mode) From 824c12d9f93c82c5ba195a3b232cd948baa544e6 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 14 Jul 2015 11:23:20 +0200 Subject: [PATCH 22/86] Add cinder puppet resource Populate from autogenerated parameters Signed-off-by: Bogdan Dobrelya --- resources/cinder_puppet/actions/remove.pp | 4 + resources/cinder_puppet/actions/run.pp | 113 +++++++++++++ resources/cinder_puppet/meta.yaml | 192 ++++++++++++++++++++++ resources/cinder_puppet/test.py | 10 ++ 4 files changed, 319 insertions(+) create mode 100644 resources/cinder_puppet/actions/remove.pp create mode 100644 resources/cinder_puppet/actions/run.pp create mode 100644 resources/cinder_puppet/meta.yaml create mode 100644 resources/cinder_puppet/test.py diff --git a/resources/cinder_puppet/actions/remove.pp b/resources/cinder_puppet/actions/remove.pp new file mode 100644 index 00000000..7d990fec --- /dev/null +++ b/resources/cinder_puppet/actions/remove.pp @@ -0,0 +1,4 @@ +class {'cinder': + enabled => false, + package_ensure => 'absent' +} diff --git a/resources/cinder_puppet/actions/run.pp b/resources/cinder_puppet/actions/run.pp new file mode 100644 index 00000000..35d3caf7 --- /dev/null +++ b/resources/cinder_puppet/actions/run.pp @@ -0,0 +1,113 @@ +$resource = hiera('{{ resource_name }}') + +$ip = $resource['input']['ip']['value'] + +$db_user = $resource['input']['db_user']['value'] +$db_password = $resource['input']['db_password']['value'] +$db_name = $resource['input']['db_name']['value'] + +$database_connection = $resource['input']['database_connection']['value'] +$database_idle_timeout = $resource['input']['database_idle_timeout']['value'] +$database_min_pool_size = $resource['input']['database_min_pool_size']['value'] +$database_max_pool_size = $resource['input']['database_max_pool_size']['value'] +$database_max_retries = $resource['input']['database_max_retries']['value'] +$database_retry_interval = $resource['input']['database_retry_interval']['value'] +$database_max_overflow = $resource['input']['database_max_overflow']['value'] +$rpc_backend = $resource['input']['rpc_backend']['value'] +$control_exchange = $resource['input']['control_exchange']['value'] +$rabbit_host = $resource['input']['rabbit_host']['value'] +$rabbit_port = $resource['input']['rabbit_port']['value'] +$rabbit_hosts = $resource['input']['rabbit_hosts']['value'] +$rabbit_virtual_host = $resource['input']['rabbit_virtual_host']['value'] +$rabbit_userid = $resource['input']['rabbit_userid']['value'] +$rabbit_password = $resource['input']['rabbit_password']['value'] +$rabbit_use_ssl = $resource['input']['rabbit_use_ssl']['value'] +$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs']['value'] +$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile']['value'] +$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile']['value'] +$kombu_ssl_version = $resource['input']['kombu_ssl_version']['value'] +$amqp_durable_queues = $resource['input']['amqp_durable_queues']['value'] +$qpid_hostname = $resource['input']['qpid_hostname']['value'] +$qpid_port = $resource['input']['qpid_port']['value'] +$qpid_username = $resource['input']['qpid_username']['value'] +$qpid_password = $resource['input']['qpid_password']['value'] +$qpid_sasl_mechanisms = $resource['input']['qpid_sasl_mechanisms']['value'] +$qpid_reconnect = $resource['input']['qpid_reconnect']['value'] +$qpid_reconnect_timeout = $resource['input']['qpid_reconnect_timeout']['value'] +$qpid_reconnect_limit = $resource['input']['qpid_reconnect_limit']['value'] +$qpid_reconnect_interval_min = $resource['input']['qpid_reconnect_interval_min']['value'] +$qpid_reconnect_interval_max = $resource['input']['qpid_reconnect_interval_max']['value'] +$qpid_reconnect_interval = $resource['input']['qpid_reconnect_interval']['value'] +$qpid_heartbeat = $resource['input']['qpid_heartbeat']['value'] +$qpid_protocol = $resource['input']['qpid_protocol']['value'] +$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay']['value'] +$package_ensure = $resource['input']['package_ensure']['value'] +$use_ssl = $resource['input']['use_ssl']['value'] +$ca_file = $resource['input']['ca_file']['value'] +$cert_file = $resource['input']['cert_file']['value'] +$key_file = $resource['input']['key_file']['value'] +$api_paste_config = $resource['input']['api_paste_config']['value'] +$use_syslog = $resource['input']['use_syslog']['value'] +$log_facility = $resource['input']['log_facility']['value'] +$log_dir = $resource['input']['log_dir']['value'] +$verbose = $resource['input']['verbose']['value'] +$debug = $resource['input']['debug']['value'] +$storage_availability_zone = $resource['input']['storage_availability_zone']['value'] +$default_availability_zone = $resource['input']['default_availability_zone']['value'] +$mysql_module = $resource['input']['mysql_module']['value'] +$sql_connection = $resource['input']['sql_connection']['value'] +$sql_idle_timeout = $resource['input']['sql_idle_timeout']['value'] + +class {'cinder': + database_connection => "mysql://${db_user}:${db_password}@${ip}/${db_name}", + database_idle_timeout => $database_idle_timeout, + database_min_pool_size => $database_min_pool_size, + database_max_pool_size => $database_max_pool_size, + database_max_retries => $database_max_retries, + database_retry_interval => $database_retry_interval, + database_max_overflow => $database_max_overflow, + rpc_backend => $rpc_backend, + control_exchange => $control_exchange, + rabbit_host => $rabbit_host, + rabbit_port => $rabbit_port, + rabbit_hosts => $rabbit_hosts, + rabbit_virtual_host => $rabbit_virtual_host, + rabbit_userid => $rabbit_userid, + rabbit_password => $rabbit_password, + rabbit_use_ssl => $rabbit_use_ssl, + kombu_ssl_ca_certs => $kombu_ssl_ca_certs, + kombu_ssl_certfile => $kombu_ssl_certfile, + kombu_ssl_keyfile => $kombu_ssl_keyfile, + kombu_ssl_version => $kombu_ssl_version, + amqp_durable_queues => $amqp_durable_queues, + qpid_hostname => $qpid_hostname, + qpid_port => $qpid_port, + qpid_username => $qpid_username, + qpid_password => $qpid_password, + qpid_sasl_mechanisms => $qpid_sasl_mechanisms, + qpid_reconnect => $qpid_reconnect, + qpid_reconnect_timeout => $qpid_reconnect_timeout, + qpid_reconnect_limit => $qpid_reconnect_limit, + qpid_reconnect_interval_min => $qpid_reconnect_interval_min, + qpid_reconnect_interval_max => $qpid_reconnect_interval_max, + qpid_reconnect_interval => $qpid_reconnect_interval, + qpid_heartbeat => $qpid_heartbeat, + qpid_protocol => $qpid_protocol, + qpid_tcp_nodelay => $qpid_tcp_nodelay, + package_ensure => $package_ensure, + use_ssl => $use_ssl, + ca_file => $ca_file, + cert_file => $cert_file, + key_file => $key_file, + api_paste_config => $api_paste_config, + use_syslog => $use_syslog, + log_facility => $log_facility, + log_dir => $log_dir, + verbose => $verbose, + debug => $debug, + storage_availability_zone => $storage_availability_zone, + default_availability_zone => $default_availability_zone, + mysql_module => $mysql_module, + sql_connection => $sql_connection, + sql_idle_timeout => $sql_idle_timeout, +} diff --git a/resources/cinder_puppet/meta.yaml b/resources/cinder_puppet/meta.yaml new file mode 100644 index 00000000..59e26b7e --- /dev/null +++ b/resources/cinder_puppet/meta.yaml @@ -0,0 +1,192 @@ +id: cinder_puppet +handler: puppet +puppet_module: cinder +version: 1.0.0 +input: + database_connection: + schema: str! + value: 'sqlite:////var/lib/cinder/cinder.sqlite' + database_idle_timeout: + schema: int! + value: 3600 + database_min_pool_size: + schema: int! + value: 1 + database_max_pool_size: + schema: str! + value: undef + database_max_retries: + schema: int! + value: 10 + database_retry_interval: + schema: int! + value: 10 + database_max_overflow: + schema: str! + value: undef + rpc_backend: + schema: str! + value: 'cinder.openstack.common.rpc.impl_kombu' + control_exchange: + schema: str! + value: 'openstack' + rabbit_host: + schema: str! + value: '127.0.0.1' + rabbit_port: + schema: int! + value: 5672 + rabbit_hosts: + schema: str! + value: false + rabbit_virtual_host: + schema: str! + value: '/' + rabbit_userid: + schema: str! + value: 'guest' + rabbit_password: + schema: str! + value: false + rabbit_use_ssl: + schema: str! + value: false + kombu_ssl_ca_certs: + schema: str! + value: undef + kombu_ssl_certfile: + schema: str! + value: undef + kombu_ssl_keyfile: + schema: str! + value: undef + kombu_ssl_version: + schema: str! + value: 'TLSv1' + amqp_durable_queues: + schema: str! + value: false + qpid_hostname: + schema: str! + value: 'localhost' + qpid_port: + schema: int! + value: 5672 + qpid_username: + schema: str! + value: 'guest' + qpid_password: + schema: str! + value: false + qpid_sasl_mechanisms: + schema: str! + value: false + qpid_reconnect: + schema: str! + value: true + qpid_reconnect_timeout: + schema: int! + value: 0 + qpid_reconnect_limit: + schema: int! + value: 0 + qpid_reconnect_interval_min: + schema: int! + value: 0 + qpid_reconnect_interval_max: + schema: int! + value: 0 + qpid_reconnect_interval: + schema: int! + value: 0 + qpid_heartbeat: + schema: int! + value: 60 + qpid_protocol: + schema: str! + value: 'tcp' + qpid_tcp_nodelay: + schema: str! + value: true + package_ensure: + schema: str! + value: 'present' + use_ssl: + schema: str! + value: false + ca_file: + schema: str! + value: false + cert_file: + schema: str! + value: false + key_file: + schema: str! + value: false + api_paste_config: + schema: str! + value: '/etc/cinder/api-paste.ini' + use_syslog: + schema: str! + value: false + log_facility: + schema: str! + value: 'LOG_USER' + log_dir: + schema: str! + value: '/var/log/cinder' + verbose: + schema: str! + value: false + debug: + schema: str! + value: false + storage_availability_zone: + schema: str! + value: 'nova' + default_availability_zone: + schema: str! + value: false + mysql_module: + schema: str! + value: undef + sql_connection: + schema: str! + value: undef + sql_idle_timeout: + schema: str! + value: undef + + db_user: + schema: str! + value: cinder + db_password: + schema: str! + value: cinder + db_name: + schema: str! + value: cinder + + port: + schema: int! + value: 8776 + + git: + schema: {repository: str!, branch: str!} + value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'} + +# forge: +# schema: str! +# value: 'stackforge-cinder' + + ip: + schema: str! + value: + ssh_key: + schema: str! + value: + ssh_user: + schema: str! + value: + +tags: [resource/cinder_service, resources/cinder] diff --git a/resources/cinder_puppet/test.py b/resources/cinder_puppet/test.py new file mode 100644 index 00000000..ec72c3ca --- /dev/null +++ b/resources/cinder_puppet/test.py @@ -0,0 +1,10 @@ +import requests + +from solar.core.log import log + + +def test(resource): + log.debug('Testing cinder_puppet') + requests.get( + 'http://%s:%s' % (resource.args['ip'].value, resource.args['port'].value) + ) From 831fb55c865c1c93e44967f23e5848f22f130399 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 14 Jul 2015 18:22:26 +0200 Subject: [PATCH 23/86] Add a composition layer for cinder puppet Signed-off-by: Bogdan Dobrelya --- example-puppet.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/example-puppet.py b/example-puppet.py index fee1dcee..aabe1768 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -60,9 +60,6 @@ def deploy(): 'password': 'openstack_password' })[0] - signals.connect(node1, rabbitmq_service1) - signals.connect(rabbitmq_service1, openstack_vhost) - signals.connect(rabbitmq_service1, openstack_rabbitmq_user) signals.connect(openstack_vhost, openstack_rabbitmq_user, { 'vhost_name', }) @@ -224,7 +221,8 @@ def deploy(): # signals.connect(node1, cinder_db) # signals.connect(node1, cinder_db_user) - # signals.connect(node1, cinder_puppet) + cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', {})[0] + signals.connect(node1, cinder_puppet) # signals.connect(rabbitmq_service1, cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'}) # signals.connect(openstack_vhost, cinder_puppet, {'vhost_name': 'rabbit_virtual_host'}) # signals.connect(openstack_rabbitmq_user, cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'}) From 58edfdf4bc15397e87208989026912ee303be3e7 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 15 Jul 2015 09:22:53 +0200 Subject: [PATCH 24/86] Fix the cinder puppet parameters schema Signed-off-by: Bogdan Dobrelya --- resources/cinder_puppet/meta.yaml | 102 +++++++++++++++--------------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/resources/cinder_puppet/meta.yaml b/resources/cinder_puppet/meta.yaml index 59e26b7e..dfe6354a 100644 --- a/resources/cinder_puppet/meta.yaml +++ b/resources/cinder_puppet/meta.yaml @@ -4,157 +4,157 @@ puppet_module: cinder version: 1.0.0 input: database_connection: - schema: str! + schema: str value: 'sqlite:////var/lib/cinder/cinder.sqlite' database_idle_timeout: - schema: int! + schema: int value: 3600 database_min_pool_size: - schema: int! + schema: int value: 1 database_max_pool_size: - schema: str! + schema: str value: undef database_max_retries: - schema: int! + schema: int value: 10 database_retry_interval: - schema: int! + schema: int value: 10 database_max_overflow: - schema: str! + schema: str value: undef rpc_backend: - schema: str! + schema: str value: 'cinder.openstack.common.rpc.impl_kombu' control_exchange: - schema: str! + schema: str value: 'openstack' rabbit_host: - schema: str! + schema: str value: '127.0.0.1' rabbit_port: - schema: int! + schema: int value: 5672 rabbit_hosts: - schema: str! + schema: bool value: false rabbit_virtual_host: - schema: str! + schema: str value: '/' rabbit_userid: - schema: str! + schema: str value: 'guest' rabbit_password: schema: str! - value: false + value: 'rabbit' rabbit_use_ssl: - schema: str! + schema: bool value: false kombu_ssl_ca_certs: - schema: str! + schema: str value: undef kombu_ssl_certfile: - schema: str! + schema: str value: undef kombu_ssl_keyfile: - schema: str! + schema: str value: undef kombu_ssl_version: - schema: str! + schema: str value: 'TLSv1' amqp_durable_queues: - schema: str! + schema: bool value: false qpid_hostname: - schema: str! + schema: str value: 'localhost' qpid_port: - schema: int! + schema: int value: 5672 qpid_username: - schema: str! + schema: str value: 'guest' qpid_password: schema: str! - value: false + value: 'qpid' qpid_sasl_mechanisms: - schema: str! + schema: bool value: false qpid_reconnect: - schema: str! + schema: bool value: true qpid_reconnect_timeout: - schema: int! + schema: int value: 0 qpid_reconnect_limit: - schema: int! + schema: int value: 0 qpid_reconnect_interval_min: - schema: int! + schema: int value: 0 qpid_reconnect_interval_max: - schema: int! + schema: int value: 0 qpid_reconnect_interval: - schema: int! + schema: int value: 0 qpid_heartbeat: - schema: int! + schema: int value: 60 qpid_protocol: - schema: str! + schema: str value: 'tcp' qpid_tcp_nodelay: - schema: str! + schema: bool value: true package_ensure: - schema: str! + schema: str value: 'present' use_ssl: - schema: str! + schema: bool value: false ca_file: - schema: str! + schema: bool value: false cert_file: - schema: str! + schema: bool value: false key_file: - schema: str! + schema: bool value: false api_paste_config: - schema: str! + schema: str value: '/etc/cinder/api-paste.ini' use_syslog: - schema: str! + schema: bool value: false log_facility: - schema: str! + schema: str value: 'LOG_USER' log_dir: - schema: str! + schema: str value: '/var/log/cinder' verbose: - schema: str! + schema: bool value: false debug: - schema: str! + schema: bool value: false storage_availability_zone: - schema: str! + schema: str value: 'nova' default_availability_zone: - schema: str! + schema: bool value: false mysql_module: - schema: str! + schema: str value: undef sql_connection: - schema: str! + schema: str value: undef sql_idle_timeout: - schema: str! + schema: str value: undef db_user: From afeb46983c0cf5a958a3fadf390ce3d679069e41 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 15 Jul 2015 11:04:54 +0200 Subject: [PATCH 25/86] Add README for cinder puppet resource Signed-off-by: Bogdan Dobrelya --- resources/cinder_puppet/README.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 resources/cinder_puppet/README.md diff --git a/resources/cinder_puppet/README.md b/resources/cinder_puppet/README.md new file mode 100644 index 00000000..a3c6fb82 --- /dev/null +++ b/resources/cinder_puppet/README.md @@ -0,0 +1,4 @@ +# Cinder resource for puppet handler + +Controlls a live cycle of the cinder entities, +like a main puppet class, user, DB, AMQP, packages. From e4467ddfa0b70134b6b4db0ed3483e910c98cc53 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Wed, 15 Jul 2015 14:29:15 +0200 Subject: [PATCH 26/86] Add keystone specific metadata to cinder The keystone_* params are mandatory for every puppet resource. Signed-off-by: Bogdan Dobrelya --- resources/cinder_puppet/meta.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/resources/cinder_puppet/meta.yaml b/resources/cinder_puppet/meta.yaml index dfe6354a..ba333fea 100644 --- a/resources/cinder_puppet/meta.yaml +++ b/resources/cinder_puppet/meta.yaml @@ -175,6 +175,22 @@ input: schema: {repository: str!, branch: str!} value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'} + keystone_host: + schema: str! + value: '' + keystone_port: + schema: int! + value: '' + keystone_user: + schema: str! + value: '' + keystone_password: + schema: str! + value: '' + keystone_tenant: + schema: str! + value: '' + # forge: # schema: str! # value: 'stackforge-cinder' From ec02da0a812ec33ac245ba97177bc86871e0f5f7 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Thu, 16 Jul 2015 17:44:52 +0200 Subject: [PATCH 27/86] Update a composition layer for cinder puppet Signed-off-by: Bogdan Dobrelya --- example-puppet.py | 93 +++++++++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 40 deletions(-) diff --git a/example-puppet.py b/example-puppet.py index aabe1768..28d1682f 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -200,40 +200,48 @@ def deploy(): 'port': ['admin_port', 'internal_port', 'public_port'], }) - # # CINDER - # cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', { - # 'rabbit_userid': 'guest', 'rabbit_password': 'guest'})[0] - # cinder_db = vr.create('cinder_db', 'resources/mariadb_db/', { - # 'db_name': 'cinder_db', 'login_user': 'root'})[0] - # cinder_db_user = vr.create('cinder_db_user', 'resources/mariadb_user/', { - # 'user_name': 'cinder', 'user_password': 'cinder', 'login_user': 'root'})[0] - # cinder_keystone_user = vr.create('cinder_keystone_user', 'resources/keystone_user', { - # 'user_name': 'cinder', 'user_password': 'cinder'})[0] - # cinder_keystone_role = vr.create('cinder_keystone_role', 'resources/keystone_role', { - # 'role_name': 'cinder'})[0] - # cinder_keystone_service_endpoint = vr.create( - # 'cinder_keystone_service_endpoint', 'resources/keystone_service_endpoint', { - # 'adminurl': 'http://{{admin_ip}}:{{admin_port}}', - # 'internalurl': 'http://{{internal_ip}}:{{internal_port}}', - # 'publicurl': 'http://{{public_ip}}:{{public_port}}', - # 'description': 'OpenStack Network Service', 'type': 'network'})[0] + # CINDER + cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', { + 'rabbit_userid': 'guest', 'rabbit_password': 'guest'})[0] + cinder_db = vr.create('cinder_db', 'resources/mariadb_db/', { + 'db_name': 'cinder_db', 'login_user': 'root'})[0] + cinder_db_user = vr.create('cinder_db_user', 'resources/mariadb_user/', { + 'user_name': 'cinder', 'user_password': 'cinder', 'login_user': 'root'})[0] + cinder_keystone_user = vr.create('cinder_keystone_user', 'resources/keystone_user', { + 'user_name': 'cinder', 'user_password': 'cinder'})[0] + cinder_keystone_role = vr.create('cinder_keystone_role', 'resources/keystone_role', { + 'role_name': 'cinder'})[0] + cinder_keystone_service_endpoint = vr.create( + 'cinder_keystone_service_endpoint', + 'resources/keystone_service_endpoint', { + 'endpoint_name': 'cinder', + 'adminurl': 'http://{{admin_ip}}:{{admin_port}}', + 'internalurl': 'http://{{internal_ip}}:{{internal_port}}', + 'publicurl': 'http://{{public_ip}}:{{public_port}}', + 'description': 'OpenStack Block Storage Service', 'type': 'volume'})[0] - - # signals.connect(node1, cinder_db) - # signals.connect(node1, cinder_db_user) - cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', {})[0] + signals.connect(node1, cinder_db) + signals.connect(node1, cinder_db_user) signals.connect(node1, cinder_puppet) - # signals.connect(rabbitmq_service1, cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'}) - # signals.connect(openstack_vhost, cinder_puppet, {'vhost_name': 'rabbit_virtual_host'}) - # signals.connect(openstack_rabbitmq_user, cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'}) - # signals.connect(mariadb_service1, cinder_db, { - # 'port': 'login_port', 'root_password': 'login_password'}) - # signals.connect(mariadb_service1, cinder_db_user, { - # 'port': 'login_port', 'root_password': 'login_password'}) - # signals.connect(cinder_db, cinder_db_user, {'db_name': 'db_name'}) - - # signals.connect(services_tenant, cinder_keystone_user) - # signals.connect(cinder_keystone_user, cinder_keystone_role) + signals.connect(rabbitmq_service1, cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'}) + signals.connect(admin_user, cinder_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'}) #? + signals.connect(openstack_vhost, cinder_puppet, {'vhost_name': 'rabbit_virtual_host'}) + #signals.connect(openstack_rabbitmq_user, cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'}) + signals.connect(mariadb_service1, cinder_db, { + 'port': 'login_port', 'root_password': 'login_password'}) + signals.connect(mariadb_service1, cinder_db_user, { + 'port': 'login_port', 'root_password': 'login_password'}) + signals.connect(cinder_db, cinder_db_user, {'db_name': 'db_name'}) + signals.connect(cinder_db, cinder_puppet, {'db_name': 'db_name'}) + signals.connect(cinder_db_user, cinder_puppet, {'login_user': 'db_user', 'login_password': 'db_password'}) + signals.connect(keystone_puppet, cinder_puppet, {'ip': 'keystone_host', 'port': 'keystone_port'}) #? + signals.connect(services_tenant, cinder_keystone_user) + signals.connect(cinder_keystone_user, cinder_keystone_role) + signals.connect(keystone_puppet, cinder_keystone_service_endpoint, {'ip': 'ip', 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user'}) #? + signals.connect(cinder_puppet, cinder_keystone_service_endpoint, {'ip': 'admin_ip', 'port': 'admin_port'}) + signals.connect(cinder_puppet, cinder_keystone_service_endpoint, {'ip': 'internal_ip', 'port': 'internal_port'}) + signals.connect(cinder_puppet, cinder_keystone_service_endpoint, {'ip': 'public_ip', 'port': 'public_port'}) + signals.connect(keystone_puppet, cinder_keystone_service_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'}) #? # NOVA # #nova_network_puppet = vr.create('nova_network_puppet', GitProvider(GIT_PUPPET_LIBS_URL, 'nova_network'), {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0] @@ -291,20 +299,19 @@ def deploy(): actions.resource_action(admin_role, 'run') actions.resource_action(keystone_service_endpoint, 'run') - actions.resource_action(services_tenant, 'run') + actions.resource_action(neutron_keystone_user, 'run') actions.resource_action(neutron_keystone_role, 'run') - actions.resource_action(neutron_puppet, 'run') actions.resource_action(neutron_keystone_service_endpoint, 'run') - # actions.resource_action(cinder_db, 'run') - # actions.resource_action(cinder_db_user, 'run') - # actions.resource_action(cinder_keystone_user, 'run') - # actions.resource_action(cinder_keystone_role, 'run') - - # actions.resource_action(cinder_puppet, 'run') + actions.resource_action(cinder_db, 'run') + actions.resource_action(cinder_db_user, 'run') + actions.resource_action(cinder_keystone_user, 'run') + actions.resource_action(cinder_keystone_role, 'run') + actions.resource_action(cinder_puppet, 'run') + actions.resource_action(cinder_keystone_service_endpoint, 'run') # actions.resource_action(nova_keystone_user, 'run') # actions.resource_action(nova_keystone_role, 'run') @@ -320,6 +327,12 @@ def undeploy(): db = get_db() to_remove = [ + 'cinder_keystone_service_endpoint', + 'cinder_puppet', + 'cinder_keystone_role', + 'cinder_keystone_user', + 'cinder_db_user', + 'cinder_db', 'neutron_keystone_service_endpoint', 'neutron_puppet', 'neutron_keystone_role', From 74abd99f95d12d9e19fcf4c45c27aa55dfa9601c Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Fri, 17 Jul 2015 11:21:52 +0200 Subject: [PATCH 28/86] Fix undef db conneciton for cinder Do not pass deprecated param for cinder Signed-off-by: Bogdan Dobrelya --- example-puppet.py | 6 +++--- resources/cinder_puppet/actions/run.pp | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/example-puppet.py b/example-puppet.py index 28d1682f..11703b7f 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -231,9 +231,9 @@ def deploy(): 'port': 'login_port', 'root_password': 'login_password'}) signals.connect(mariadb_service1, cinder_db_user, { 'port': 'login_port', 'root_password': 'login_password'}) - signals.connect(cinder_db, cinder_db_user, {'db_name': 'db_name'}) - signals.connect(cinder_db, cinder_puppet, {'db_name': 'db_name'}) - signals.connect(cinder_db_user, cinder_puppet, {'login_user': 'db_user', 'login_password': 'db_password'}) + signals.connect(cinder_db, cinder_db_user, {'db_name': 'db_name', 'login_user': 'login_user', 'login_password': 'login_password'}) #? + #signals.connect(cinder_db, cinder_puppet, {'db_name': 'db_name'}) + signals.connect(cinder_db_user, cinder_puppet, {'db_name': 'db_name', 'login_user': 'db_user', 'login_password': 'db_password'}) #? signals.connect(keystone_puppet, cinder_puppet, {'ip': 'keystone_host', 'port': 'keystone_port'}) #? signals.connect(services_tenant, cinder_keystone_user) signals.connect(cinder_keystone_user, cinder_keystone_role) diff --git a/resources/cinder_puppet/actions/run.pp b/resources/cinder_puppet/actions/run.pp index 35d3caf7..851e3945 100644 --- a/resources/cinder_puppet/actions/run.pp +++ b/resources/cinder_puppet/actions/run.pp @@ -55,7 +55,8 @@ $debug = $resource['input']['debug']['value'] $storage_availability_zone = $resource['input']['storage_availability_zone']['value'] $default_availability_zone = $resource['input']['default_availability_zone']['value'] $mysql_module = $resource['input']['mysql_module']['value'] -$sql_connection = $resource['input']['sql_connection']['value'] +# Do not apply the legacy stuff +#$sql_connection = $resource['input']['sql_connection']['value'] $sql_idle_timeout = $resource['input']['sql_idle_timeout']['value'] class {'cinder': From 143d17c6e6f05bdfcee170b18050615ee32666d6 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Fri, 17 Jul 2015 13:10:02 +0200 Subject: [PATCH 29/86] Align cinder connections Align cinder connections to follow the nova template https://github.com/Mirantis/solar/pull/27 Signed-off-by: Bogdan Dobrelya --- example-puppet.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/example-puppet.py b/example-puppet.py index 11703b7f..9137b97c 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -220,29 +220,30 @@ def deploy(): 'publicurl': 'http://{{public_ip}}:{{public_port}}', 'description': 'OpenStack Block Storage Service', 'type': 'volume'})[0] + signals.connect(node1, cinder_puppet) signals.connect(node1, cinder_db) signals.connect(node1, cinder_db_user) - signals.connect(node1, cinder_puppet) signals.connect(rabbitmq_service1, cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'}) signals.connect(admin_user, cinder_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'}) #? signals.connect(openstack_vhost, cinder_puppet, {'vhost_name': 'rabbit_virtual_host'}) #signals.connect(openstack_rabbitmq_user, cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'}) - signals.connect(mariadb_service1, cinder_db, { - 'port': 'login_port', 'root_password': 'login_password'}) - signals.connect(mariadb_service1, cinder_db_user, { - 'port': 'login_port', 'root_password': 'login_password'}) - signals.connect(cinder_db, cinder_db_user, {'db_name': 'db_name', 'login_user': 'login_user', 'login_password': 'login_password'}) #? - #signals.connect(cinder_db, cinder_puppet, {'db_name': 'db_name'}) - signals.connect(cinder_db_user, cinder_puppet, {'db_name': 'db_name', 'login_user': 'db_user', 'login_password': 'db_password'}) #? - signals.connect(keystone_puppet, cinder_puppet, {'ip': 'keystone_host', 'port': 'keystone_port'}) #? + signals.connect(mariadb_service1, cinder_db, {'port': 'login_port', 'root_password': 'login_password'}) + signals.connect(mariadb_service1, cinder_db_user, {'port': 'login_port', 'root_password': 'login_password'}) + signals.connect(cinder_db, cinder_db_user, {'db_name': 'db_name'}) + signals.connect(cinder_db_user, cinder_puppet, {'user_name':'db_user', 'db_name':'db_name', 'user_password':'db_password'}) + signals.connect(keystone_puppet, cinder_puppet, {'ip': 'keystone_host', 'admin_port': 'keystone_port'}) #or non admin port? signals.connect(services_tenant, cinder_keystone_user) signals.connect(cinder_keystone_user, cinder_keystone_role) - signals.connect(keystone_puppet, cinder_keystone_service_endpoint, {'ip': 'ip', 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user'}) #? + signals.connect(cinder_keystone_user, cinder_puppet, {'user_name': 'keystone_user', 'tenant_name': 'keystone_tenant', 'user_password': 'keystone_password'}) + signals.connect(mariadb_service1, cinder_puppet, {'ip':'ip'}) + signals.connect(cinder_puppet, cinder_keystone_service_endpoint, { + 'ip': 'ip', 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user',}) + signals.connect(keystone_puppet, cinder_keystone_service_endpoint, { + 'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'}) signals.connect(cinder_puppet, cinder_keystone_service_endpoint, {'ip': 'admin_ip', 'port': 'admin_port'}) signals.connect(cinder_puppet, cinder_keystone_service_endpoint, {'ip': 'internal_ip', 'port': 'internal_port'}) signals.connect(cinder_puppet, cinder_keystone_service_endpoint, {'ip': 'public_ip', 'port': 'public_port'}) - signals.connect(keystone_puppet, cinder_keystone_service_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'}) #? - + # NOVA # #nova_network_puppet = vr.create('nova_network_puppet', GitProvider(GIT_PUPPET_LIBS_URL, 'nova_network'), {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0] # # TODO: fix rabbitmq user/password From c438cd43d5dfaa7555ceef96699567d3f1a7cc41 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 20 Jul 2015 09:45:07 +0200 Subject: [PATCH 30/86] Fix rabbit connections after rebase Signed-off-by: Bogdan Dobrelya --- example-puppet.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/example-puppet.py b/example-puppet.py index 9137b97c..0dc97be2 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -60,6 +60,9 @@ def deploy(): 'password': 'openstack_password' })[0] + signals.connect(node1, rabbitmq_service1) + signals.connect(rabbitmq_service1, openstack_vhost) + signals.connect(rabbitmq_service1, openstack_rabbitmq_user) signals.connect(openstack_vhost, openstack_rabbitmq_user, { 'vhost_name', }) From 7d7fb4f4279fe1b68451c3c157e78030fb0fb66a Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 20 Jul 2015 09:52:37 +0200 Subject: [PATCH 31/86] Fix keystone_host connection for cinder endp Signed-off-by: Bogdan Dobrelya --- example-puppet.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/example-puppet.py b/example-puppet.py index 0dc97be2..71d01a00 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -240,12 +240,11 @@ def deploy(): signals.connect(cinder_keystone_user, cinder_puppet, {'user_name': 'keystone_user', 'tenant_name': 'keystone_tenant', 'user_password': 'keystone_password'}) signals.connect(mariadb_service1, cinder_puppet, {'ip':'ip'}) signals.connect(cinder_puppet, cinder_keystone_service_endpoint, { - 'ip': 'ip', 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user',}) + 'ip': ['ip', 'keystone_host'], 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user',}) signals.connect(keystone_puppet, cinder_keystone_service_endpoint, { - 'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'}) - signals.connect(cinder_puppet, cinder_keystone_service_endpoint, {'ip': 'admin_ip', 'port': 'admin_port'}) - signals.connect(cinder_puppet, cinder_keystone_service_endpoint, {'ip': 'internal_ip', 'port': 'internal_port'}) - signals.connect(cinder_puppet, cinder_keystone_service_endpoint, {'ip': 'public_ip', 'port': 'public_port'}) + 'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token', + 'ip': ['admin_ip', 'internal_ip', 'public_ip'], + 'port': ['admin_port', 'internal_port', 'public_port'],}) # NOVA # #nova_network_puppet = vr.create('nova_network_puppet', GitProvider(GIT_PUPPET_LIBS_URL, 'nova_network'), {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0] From 80f8c08e9726714c5ead744c7e0d17bbe2160255 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 20 Jul 2015 10:23:30 +0200 Subject: [PATCH 32/86] Fix cinder endpoint port connection Signed-off-by: Bogdan Dobrelya --- example-puppet.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/example-puppet.py b/example-puppet.py index 71d01a00..3eb815a5 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -240,11 +240,11 @@ def deploy(): signals.connect(cinder_keystone_user, cinder_puppet, {'user_name': 'keystone_user', 'tenant_name': 'keystone_tenant', 'user_password': 'keystone_password'}) signals.connect(mariadb_service1, cinder_puppet, {'ip':'ip'}) signals.connect(cinder_puppet, cinder_keystone_service_endpoint, { - 'ip': ['ip', 'keystone_host'], 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user',}) - signals.connect(keystone_puppet, cinder_keystone_service_endpoint, { - 'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token', - 'ip': ['admin_ip', 'internal_ip', 'public_ip'], + 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user', + 'ip': ['ip', 'keystone_host', 'admin_ip', 'internal_ip', 'public_ip'], 'port': ['admin_port', 'internal_port', 'public_port'],}) + signals.connect(keystone_puppet, cinder_keystone_service_endpoint, { + 'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'}) # NOVA # #nova_network_puppet = vr.create('nova_network_puppet', GitProvider(GIT_PUPPET_LIBS_URL, 'nova_network'), {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0] From 7dfd32eb1a002e12db8cef05b371b5d28f3b2fc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Thu, 16 Jul 2015 22:59:08 +0000 Subject: [PATCH 33/86] nova-api resource --- example-puppet.py | 97 ++++++++++++++++++++++------ resources/nova/actions/remove.yml | 6 -- resources/nova/actions/run.yml | 6 -- resources/nova/meta.yaml | 13 ---- resources/nova_api/actions/remove.pp | 29 +++++++++ resources/nova_api/actions/run.pp | 33 ++++++++++ resources/nova_api/meta.yaml | 59 +++++++++++++++++ 7 files changed, 197 insertions(+), 46 deletions(-) delete mode 100644 resources/nova/actions/remove.yml delete mode 100644 resources/nova/actions/run.yml delete mode 100644 resources/nova/meta.yaml create mode 100644 resources/nova_api/actions/remove.pp create mode 100644 resources/nova_api/actions/run.pp create mode 100644 resources/nova_api/meta.yaml diff --git a/example-puppet.py b/example-puppet.py index 72d4f431..690b343a 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -238,25 +238,75 @@ def deploy(): # signals.connect(cinder_keystone_user, cinder_keystone_role) # NOVA - # #nova_network_puppet = vr.create('nova_network_puppet', GitProvider(GIT_PUPPET_LIBS_URL, 'nova_network'), {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0] - # # TODO: fix rabbitmq user/password - # nova_network_puppet = vr.create('nova_network_puppet', 'resources/nova_network_puppet', {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0] + nova_api = vr.create('nova_api', 'resources/nova_api', {})[0] + nova_db = vr.create('nova_db', 'resources/mariadb_db/', { + 'db_name': 'nova_db', + 'login_user': 'root'})[0] + nova_db_user = vr.create('nova_db_user', 'resources/mariadb_user/', { + 'user_name': 'nova', + 'user_password': 'nova', + 'login_user': 'root'})[0] + nova_keystone_user = vr.create('nova_keystone_user', 'resources/keystone_user', { + 'user_name': 'nova', + 'user_password': 'nova'})[0] + nova_keystone_role = vr.create('nova_keystone_role', 'resources/keystone_role', { + 'role_name': 'admin'})[0] + nova_keystone_service_endpoint = vr.create('nova_keystone_service_endpoint', 'resources/keystone_service_endpoint', { + 'endpoint_name': 'nova', + 'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2/%(tenant_id)s', + 'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2/%(tenant_id)s', + 'publicurl': 'http://{{public_ip}}:{{public_port}}/v2/%(tenant_id)s', + 'description': 'OpenStack Compute Service', + 'type': 'compute', + 'public_port': 8774, + 'internal_port': 8774, + 'admin_port': 8774})[0] - # nova_keystone_user = vr.create('nova_keystone_user', 'resources/keystone_user', {'user_name': 'nova', 'user_password': 'nova'})[0] - # nova_keystone_role = vr.create('nova_keystone_role', 'resources/keystone_role', {'role_name': 'nova'})[0] + signals.connect(node1, nova_api) + signals.connect(node1, nova_db) + signals.connect(node1, nova_db_user) + signals.connect(mariadb_service1, nova_db, { + 'port': 'login_port', + 'root_password': 'login_password'}) + signals.connect(mariadb_service1, nova_db_user, { + 'port': 'login_port', + 'root_password': 'login_password'}) + signals.connect(nova_db, nova_db_user, { + 'db_name': 'db_name'}) + signals.connect(services_tenant, nova_keystone_user) + signals.connect(nova_keystone_user, nova_keystone_role) + signals.connect(keystone_puppet, nova_api, { + 'ip': 'keystone_host', + 'admin_port': 'keystone_port'}) + signals.connect(nova_keystone_user, nova_api, { + 'user_name': 'keystone_user_name', + 'tenant_name': 'keystone_tenant_name', + 'user_password': 'keystone_password'}) + signals.connect(rabbitmq_service1, nova_api, { + 'ip': 'rabbitmq_host'}) + signals.connect(openstack_rabbitmq_user, nova_api, { + 'user_name': 'rabbitmq_user', + 'password': 'rabbitmq_password'}) + signals.connect(keystone_puppet, nova_keystone_service_endpoint, { + 'ip': 'keystone_host', + 'admin_port': 'keystone_admin_port', + 'admin_token': 'admin_token'}) + signals.connect(mariadb_service1, nova_api, { + 'ip':'db_host'}) + signals.connect(nova_db_user, nova_api, { + 'user_name':'db_user', + 'db_name':'db_name', + 'user_password':'db_password'}) + signals.connect(nova_api, nova_keystone_service_endpoint, { + 'ip': ['ip', 'public_ip', 'internal_ip', 'admin_ip'], + 'ssh_key': 'ssh_key', + 'ssh_user': 'ssh_user'}) + signals.connect(nova_api, nova_keystone_service_endpoint, { + 'ip': 'ip', + 'ssh_key': 'ssh_key', + 'ssh_user': 'ssh_user'}) - # TODO: 'services' tenant-id is hardcoded - # nova_keystone_service_endpoint = vr.create('nova_keystone_service_endpoint', 'resources/keystone_service_endpoint', {'adminurl': 'http://{{ip}}:{{admin_port}}/v2/services', 'internalurl': 'http://{{ip}}:{{public_port}}/v2/services', 'publicurl': 'http://{{ip}}:{{port}}/v2/services', 'description': 'OpenStack Compute Service', 'type': 'compute', 'port': 8776, 'admin_port': 8776})[0] - - # signals.connect(node1, nova_network_puppet) - - # signals.connect(services_tenant, nova_keystone_user) - # signals.connect(neutron_keystone_user, nova_keystone_role) - - # signals.connect(nova_keystone_user, nova_network_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'}) # signals.connect(keystone_puppet, nova_network_puppet, {'ip': 'keystone_host', 'port': 'keystone_port'}) - - # signals.connect(nova_network_puppet, nova_keystone_service_endpoint, {'ip': 'ip', 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user'}) # signals.connect(keystone_puppet, nova_keystone_service_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_port', 'admin_token': 'admin_token'}) # signals.connect(rabbitmq_service1, nova_network_puppet, {'ip': 'rabbitmq_host', 'port': 'rabbitmq_port'}) @@ -308,11 +358,12 @@ def deploy(): # actions.resource_action(cinder_puppet, 'run') - # actions.resource_action(nova_keystone_user, 'run') - # actions.resource_action(nova_keystone_role, 'run') - - # actions.resource_action(nova_network_puppet, 'run') - #actions.resource_action(nova_keystone_service_endpoint, 'run') + actions.resource_action(nova_db, 'run') + actions.resource_action(nova_db_user, 'run') + actions.resource_action(nova_keystone_user, 'run') + actions.resource_action(nova_keystone_role, 'run') + actions.resource_action(nova_api, 'run') + actions.resource_action(nova_keystone_service_endpoint, 'run') time.sleep(10) @@ -322,6 +373,10 @@ def undeploy(): db = get_db() to_remove = [ + 'nova_db', + 'nova_db_user', + 'nova_keystone_service_endpoint', + 'nova_api', 'neutron_keystone_service_endpoint', 'neutron_puppet', 'neutron_keystone_role', diff --git a/resources/nova/actions/remove.yml b/resources/nova/actions/remove.yml deleted file mode 100644 index 76142acf..00000000 --- a/resources/nova/actions/remove.yml +++ /dev/null @@ -1,6 +0,0 @@ -# TODO -- hosts: [{{ ip }}] - sudo: yes - tasks: - - shell: docker stop {{ name }} - - shell: docker rm {{ name }} diff --git a/resources/nova/actions/run.yml b/resources/nova/actions/run.yml deleted file mode 100644 index e223fe8f..00000000 --- a/resources/nova/actions/run.yml +++ /dev/null @@ -1,6 +0,0 @@ -# TODO -- hosts: [{{ ip }}] - sudo: yes - tasks: - - shell: docker run -d --net="host" --privileged \ - --name {{ name }} {{ image }} diff --git a/resources/nova/meta.yaml b/resources/nova/meta.yaml deleted file mode 100644 index 5c48b0b9..00000000 --- a/resources/nova/meta.yaml +++ /dev/null @@ -1,13 +0,0 @@ -id: nova -handler: ansible -version: 1.0.0 -input: - ip: - schema: str! - value: - port: - schema: int! - value: 8774 - image: # TODO - schema: str! - value: diff --git a/resources/nova_api/actions/remove.pp b/resources/nova_api/actions/remove.pp new file mode 100644 index 00000000..4d6c4864 --- /dev/null +++ b/resources/nova_api/actions/remove.pp @@ -0,0 +1,29 @@ +$resource = hiera($::resource_name) + +$rabbitmq_user = $resource['input']['rabbitmq_user']['value'] +$rabbitmq_password = $resource['input']['rabbitmq_password']['value'] +$rabbitmq_host = $resource['input']['rabbitmq_host']['value'] +$db_user = $resource['input']['db_user']['value'] +$db_password = $resource['input']['db_password']['value'] +$db_name = $resource['input']['db_name']['value'] +$db_host = $resource['input']['db_host']['value'] +$keystone_password = $resource['input']['keystone_password']['value'] +$keystone_host = $resource['input']['keystone_host']['value'] +$keystone_port = $resource['input']['keystone_port']['value'] +$keystone_tenant_name = $resource['input']['keystone_tenant_name']['value'] +$keystone_user = $resource['input']['keystone_user_name']['value'] + +class { 'nova': + database_connection => "mysql://${db_user}:${db_password}@${db_host}/${db_name}?charset=utf8", + rabbit_userid => $rabbitmq_user, + rabbit_password => $rabbitmq_password, + rabbit_host => $rabbitmq_host, + image_service => 'nova.image.glance.GlanceImageService', + glance_api_servers => 'localhost:9292', + verbose => false, +} + +class { 'nova::api': + admin_password => $keystone_password, + ensure_package => 'absent' +} diff --git a/resources/nova_api/actions/run.pp b/resources/nova_api/actions/run.pp new file mode 100644 index 00000000..c1384f8b --- /dev/null +++ b/resources/nova_api/actions/run.pp @@ -0,0 +1,33 @@ +$resource = hiera($::resource_name) + +$rabbitmq_user = $resource['input']['rabbitmq_user']['value'] +$rabbitmq_password = $resource['input']['rabbitmq_password']['value'] +$rabbitmq_host = $resource['input']['rabbitmq_host']['value'] +$db_user = $resource['input']['db_user']['value'] +$db_password = $resource['input']['db_password']['value'] +$db_name = $resource['input']['db_name']['value'] +$db_host = $resource['input']['db_host']['value'] +$keystone_password = $resource['input']['keystone_password']['value'] +$keystone_host = $resource['input']['keystone_host']['value'] +$keystone_port = $resource['input']['keystone_port']['value'] +$keystone_tenant_name = $resource['input']['keystone_tenant_name']['value'] +$keystone_user = $resource['input']['keystone_user_name']['value'] + +class { 'nova': + database_connection => "mysql://${db_user}:${db_password}@${db_host}/${db_name}?charset=utf8", + rabbit_userid => $rabbitmq_user, + rabbit_password => $rabbitmq_password, + rabbit_host => $rabbitmq_host, + image_service => 'nova.image.glance.GlanceImageService', + glance_api_servers => 'localhost:9292', + verbose => false, +} + +class { 'nova::api': + enabled => true, + admin_user => $keystone_user, + admin_password => $keystone_password, + auth_host => $keystone_host, + auth_port => $keystone_port, + admin_tenant_name => $keystone_tenant_name, +} diff --git a/resources/nova_api/meta.yaml b/resources/nova_api/meta.yaml new file mode 100644 index 00000000..55f033ec --- /dev/null +++ b/resources/nova_api/meta.yaml @@ -0,0 +1,59 @@ +id: nova +handler: puppet +puppet_module: nova +version: 1.0.0 +input: + db_user: + schema: str! + value: nova + db_password: + schema: str! + value: + db_name: + schema: str! + value: + db_host: + schema: str! + value: + + rabbitmq_user: + schema: str! + value: + rabbitmq_password: + schema: str! + value: + rabbitmq_host: + schema: str! + value: + + keystone_password: + schema: str! + value: + keystone_port: + schema: int! + value: + keystone_host: + schema: str! + value: + keystone_tenant_name: + schema: str! + value: + keystone_user_name: + schema: str! + value: + + git: + schema: {repository: str!, branch: str!} + value: {repository: 'https://github.com/openstack/puppet-nova', branch: 'stable/juno'} + + ip: + schema: str! + value: + ssh_key: + schema: str! + value: + ssh_user: + schema: str! + value: + +tags: [resource/nova_service, resources/nova] From 21f18a5f75e13780d4950fce072e80e7db516417 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Fri, 17 Jul 2015 12:46:53 +0000 Subject: [PATCH 34/86] add tag --- resources/nova_api/meta.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/nova_api/meta.yaml b/resources/nova_api/meta.yaml index 55f033ec..96ff21dc 100644 --- a/resources/nova_api/meta.yaml +++ b/resources/nova_api/meta.yaml @@ -56,4 +56,4 @@ input: schema: str! value: -tags: [resource/nova_service, resources/nova] +tags: [resource/nova_service, resources/nova, resource/nova-api] From 674dbb5709010c3a3288019463a5e581f31eff8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Fri, 17 Jul 2015 12:52:50 +0000 Subject: [PATCH 35/86] change path --- example-puppet.py | 2 +- resources/{nova_api => nova_api_puppet}/actions/remove.pp | 0 resources/{nova_api => nova_api_puppet}/actions/run.pp | 0 resources/{nova_api => nova_api_puppet}/meta.yaml | 0 4 files changed, 1 insertion(+), 1 deletion(-) rename resources/{nova_api => nova_api_puppet}/actions/remove.pp (100%) rename resources/{nova_api => nova_api_puppet}/actions/run.pp (100%) rename resources/{nova_api => nova_api_puppet}/meta.yaml (100%) diff --git a/example-puppet.py b/example-puppet.py index 690b343a..f83a5149 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -238,7 +238,7 @@ def deploy(): # signals.connect(cinder_keystone_user, cinder_keystone_role) # NOVA - nova_api = vr.create('nova_api', 'resources/nova_api', {})[0] + nova_api = vr.create('nova_api', 'resources/nova_api_puppet', {})[0] nova_db = vr.create('nova_db', 'resources/mariadb_db/', { 'db_name': 'nova_db', 'login_user': 'root'})[0] diff --git a/resources/nova_api/actions/remove.pp b/resources/nova_api_puppet/actions/remove.pp similarity index 100% rename from resources/nova_api/actions/remove.pp rename to resources/nova_api_puppet/actions/remove.pp diff --git a/resources/nova_api/actions/run.pp b/resources/nova_api_puppet/actions/run.pp similarity index 100% rename from resources/nova_api/actions/run.pp rename to resources/nova_api_puppet/actions/run.pp diff --git a/resources/nova_api/meta.yaml b/resources/nova_api_puppet/meta.yaml similarity index 100% rename from resources/nova_api/meta.yaml rename to resources/nova_api_puppet/meta.yaml From 7158c33f4d09792e368737ffd5672bbfebf06e22 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 20 Jul 2015 12:05:15 +0200 Subject: [PATCH 36/86] Fix admin role for cinder in services tenant Signed-off-by: Bogdan Dobrelya --- example-puppet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example-puppet.py b/example-puppet.py index 3eb815a5..62b6babf 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -213,7 +213,7 @@ def deploy(): cinder_keystone_user = vr.create('cinder_keystone_user', 'resources/keystone_user', { 'user_name': 'cinder', 'user_password': 'cinder'})[0] cinder_keystone_role = vr.create('cinder_keystone_role', 'resources/keystone_role', { - 'role_name': 'cinder'})[0] + 'role_name': 'admin'})[0] cinder_keystone_service_endpoint = vr.create( 'cinder_keystone_service_endpoint', 'resources/keystone_service_endpoint', { From 21112df4f69bf58c79b4bdc21bb18885ddc78c0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Mon, 20 Jul 2015 10:07:02 +0000 Subject: [PATCH 37/86] Remove nodename --- resources/rabbitmq_service/actions/remove.pp | 3 --- resources/rabbitmq_service/actions/run.pp | 1 - resources/rabbitmq_service/meta.yaml | 3 --- 3 files changed, 7 deletions(-) diff --git a/resources/rabbitmq_service/actions/remove.pp b/resources/rabbitmq_service/actions/remove.pp index 1047bc46..f1121da5 100644 --- a/resources/rabbitmq_service/actions/remove.pp +++ b/resources/rabbitmq_service/actions/remove.pp @@ -1,11 +1,8 @@ $resource = hiera($::resource_name) -$node_name = $resource['input']['node_name']['value'] - class { '::rabbitmq': package_ensure => 'absent', environment_variables => { - 'RABBITMQ_NODENAME' => $node_name, 'RABBITMQ_SERVICENAME' => 'RabbitMQ' } } diff --git a/resources/rabbitmq_service/actions/run.pp b/resources/rabbitmq_service/actions/run.pp index befa93d9..2ea22054 100644 --- a/resources/rabbitmq_service/actions/run.pp +++ b/resources/rabbitmq_service/actions/run.pp @@ -2,7 +2,6 @@ $resource = hiera($::resource_name) $port = "${resource['input']['port']['value']}" $management_port = "${resource['input']['management_port']['value']}" -$node_name = $resource['input']['node_name']['value'] class { '::rabbitmq': service_manage => true, diff --git a/resources/rabbitmq_service/meta.yaml b/resources/rabbitmq_service/meta.yaml index 69ef8a08..010753fc 100644 --- a/resources/rabbitmq_service/meta.yaml +++ b/resources/rabbitmq_service/meta.yaml @@ -11,9 +11,6 @@ input: schema: str! value: '' - node_name: - schema: str! - value: 'node1' port: schema: int! value: '' From aefa3c78b7fa8d1bf024c4e88779483bf2e48a0f Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 20 Jul 2015 12:43:49 +0200 Subject: [PATCH 38/86] Fix README.md Signed-off-by: Bogdan Dobrelya --- resources/cinder_puppet/README.md | 110 +++++++++++++++++++++++++++++- 1 file changed, 109 insertions(+), 1 deletion(-) diff --git a/resources/cinder_puppet/README.md b/resources/cinder_puppet/README.md index a3c6fb82..0f288ca8 100644 --- a/resources/cinder_puppet/README.md +++ b/resources/cinder_puppet/README.md @@ -1,4 +1,112 @@ # Cinder resource for puppet handler Controlls a live cycle of the cinder entities, -like a main puppet class, user, DB, AMQP, packages. +like the main puppet class, auth, DB, AMQP, packages, +keystone user, role and endpoint. + +# Parameters + +source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/init.pp + + ``database_connection`` + Url used to connect to database. + (Optional) Defaults to + 'sqlite:////var/lib/cinder/cinder.sqlite' + + ``database_idle_timeout`` + Timeout when db connections should be reaped. + (Optional) Defaults to 3600. + + ``database_min_pool_size`` + Minimum number of SQL connections to keep open in a pool. + (Optional) Defaults to 1. + + ``database_max_pool_size`` + Maximum number of SQL connections to keep open in a pool. + (Optional) Defaults to undef. + + ``database_max_retries`` + Maximum db connection retries during startup. + Setting -1 implies an infinite retry count. + (Optional) Defaults to 10. + + ``database_retry_interval`` + Interval between retries of opening a sql connection. + (Optional) Defaults to 10. + + ``database_max_overflow`` + If set, use this value for max_overflow with sqlalchemy. + (Optional) Defaults to undef. + + ``rabbit_use_ssl`` + (optional) Connect over SSL for RabbitMQ + Defaults to false + + ``kombu_ssl_ca_certs`` + (optional) SSL certification authority file (valid only if SSL enabled). + Defaults to undef + + ``kombu_ssl_certfile`` + (optional) SSL cert file (valid only if SSL enabled). + Defaults to undef + + ``kombu_ssl_keyfile`` + (optional) SSL key file (valid only if SSL enabled). + Defaults to undef + + ``kombu_ssl_version`` + (optional) SSL version to use (valid only if SSL enabled). + Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be + available on some distributions. + Defaults to 'TLSv1' + + ``amqp_durable_queues`` + Use durable queues in amqp. + (Optional) Defaults to false. + + ``use_syslog`` + Use syslog for logging. + (Optional) Defaults to false. + + ``log_facility`` + Syslog facility to receive log lines. + (Optional) Defaults to LOG_USER. + + ``log_dir`` + (optional) Directory where logs should be stored. + If set to boolean false, it will not log to any directory. + Defaults to '/var/log/cinder' + + ``use_ssl`` + (optional) Enable SSL on the API server + Defaults to false, not set + + ``cert_file`` + (optinal) Certificate file to use when starting API server securely + Defaults to false, not set + + ``key_file`` + (optional) Private key file to use when starting API server securely + Defaults to false, not set + + ``ca_file`` + (optional) CA certificate file to use to verify connecting clients + Defaults to false, not set_ + + ``mysql_module`` + (optional) Deprecated. Does nothing. + + ``storage_availability_zone`` + (optional) Availability zone of the node. + Defaults to 'nova' + + ``default_availability_zone`` + (optional) Default availability zone for new volumes. + If not set, the storage_availability_zone option value is used as + the default for new volumes. + Defaults to false + + ``sql_connection`` + DEPRECATED + ``sql_idle_timeout`` + DEPRECATED \ No newline at end of file From 12806c7efef8becffc3e68558d7f6a637fbdf851 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 20 Jul 2015 15:26:08 +0200 Subject: [PATCH 39/86] Replace undef to empty strings for cinder Signed-off-by: Bogdan Dobrelya --- resources/cinder_puppet/meta.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/resources/cinder_puppet/meta.yaml b/resources/cinder_puppet/meta.yaml index ba333fea..611b3eac 100644 --- a/resources/cinder_puppet/meta.yaml +++ b/resources/cinder_puppet/meta.yaml @@ -14,7 +14,7 @@ input: value: 1 database_max_pool_size: schema: str - value: undef + value: '' database_max_retries: schema: int value: 10 @@ -23,7 +23,7 @@ input: value: 10 database_max_overflow: schema: str - value: undef + value: '' rpc_backend: schema: str value: 'cinder.openstack.common.rpc.impl_kombu' @@ -53,13 +53,13 @@ input: value: false kombu_ssl_ca_certs: schema: str - value: undef + value: '' kombu_ssl_certfile: schema: str - value: undef + value: '' kombu_ssl_keyfile: schema: str - value: undef + value: '' kombu_ssl_version: schema: str value: 'TLSv1' @@ -149,13 +149,13 @@ input: value: false mysql_module: schema: str - value: undef + value: '' sql_connection: schema: str - value: undef + value: '' sql_idle_timeout: schema: str - value: undef + value: '' db_user: schema: str! From 2a7f9587f568b84a3e0cd45bcd6cc164af119c6d Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Mon, 20 Jul 2015 16:31:13 +0200 Subject: [PATCH 40/86] Fix cinder endpoint Signed-off-by: Bogdan Dobrelya --- example-puppet.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/example-puppet.py b/example-puppet.py index 62b6babf..78d3cc83 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -218,9 +218,9 @@ def deploy(): 'cinder_keystone_service_endpoint', 'resources/keystone_service_endpoint', { 'endpoint_name': 'cinder', - 'adminurl': 'http://{{admin_ip}}:{{admin_port}}', - 'internalurl': 'http://{{internal_ip}}:{{internal_port}}', - 'publicurl': 'http://{{public_ip}}:{{public_port}}', + 'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2/%(tenant_id)s', + 'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2/%(tenant_id)s', + 'publicurl': 'http://{{public_ip}}:{{public_port}}/v2/%(tenant_id)s', 'description': 'OpenStack Block Storage Service', 'type': 'volume'})[0] signals.connect(node1, cinder_puppet) From 2e6c8ba67655c7b5541204efcd4198be4fc95684 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Mon, 20 Jul 2015 21:48:32 +0000 Subject: [PATCH 41/86] Use default nodename when removing --- resources/rabbitmq_service/actions/remove.pp | 3 --- 1 file changed, 3 deletions(-) diff --git a/resources/rabbitmq_service/actions/remove.pp b/resources/rabbitmq_service/actions/remove.pp index 1047bc46..f1121da5 100644 --- a/resources/rabbitmq_service/actions/remove.pp +++ b/resources/rabbitmq_service/actions/remove.pp @@ -1,11 +1,8 @@ $resource = hiera($::resource_name) -$node_name = $resource['input']['node_name']['value'] - class { '::rabbitmq': package_ensure => 'absent', environment_variables => { - 'RABBITMQ_NODENAME' => $node_name, 'RABBITMQ_SERVICENAME' => 'RabbitMQ' } } From 322e1bb8a019079f047276fece775cbc7e6c42e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Mon, 20 Jul 2015 22:30:33 +0000 Subject: [PATCH 42/86] Neutron fixes - use correct role name - use correct rabbitmq user/password - set vhost name --- example-puppet.py | 12 +++++++----- resources/neutron_puppet/actions/run.pp | 4 +++- resources/neutron_puppet/meta.yaml | 3 +++ 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/example-puppet.py b/example-puppet.py index 72d4f431..7f344aad 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -154,17 +154,14 @@ def deploy(): # NEUTRON # TODO: vhost cannot be specified in neutron Puppet manifests so this user has to be admin anyways - neutron_puppet = vr.create('neutron_puppet', 'resources/neutron_puppet', { - 'rabbitmq_user': 'guest', - 'rabbitmq_password': 'guest' - })[0] + neutron_puppet = vr.create('neutron_puppet', 'resources/neutron_puppet', {})[0] neutron_keystone_user = vr.create('neutron_keystone_user', 'resources/keystone_user', { 'user_name': 'neutron', 'user_password': 'neutron' })[0] neutron_keystone_role = vr.create('neutron_keystone_role', 'resources/keystone_role', { - 'role_name': 'neutron' + 'role_name': 'admin' })[0] neutron_keystone_service_endpoint = vr.create('neutron_keystone_service_endpoint', 'resources/keystone_service_endpoint', { 'endpoint_name': 'neutron', @@ -180,6 +177,11 @@ def deploy(): 'ip': 'rabbitmq_host', 'port': 'rabbitmq_port' }) + signals.connect(openstack_rabbitmq_user, neutron_puppet, { + 'user_name': 'rabbitmq_user', + 'password': 'rabbitmq_password'}) + signals.connect(openstack_vhost, neutron_puppet, { + 'vhost_name': 'rabbitmq_virtual_host'}) signals.connect(admin_user, neutron_puppet, { 'user_name': 'keystone_user', 'user_password': 'keystone_password', diff --git a/resources/neutron_puppet/actions/run.pp b/resources/neutron_puppet/actions/run.pp index c57f3af8..299636bc 100644 --- a/resources/neutron_puppet/actions/run.pp +++ b/resources/neutron_puppet/actions/run.pp @@ -1,4 +1,4 @@ -$resource = hiera('{{ resource_name }}') +$resource = hiera($::resource_name) $ip = $resource['input']['ip']['value'] @@ -6,6 +6,7 @@ $rabbitmq_user = $resource['input']['rabbitmq_user']['value'] $rabbitmq_password = $resource['input']['rabbitmq_password']['value'] $rabbitmq_host = $resource['input']['rabbitmq_host']['value'] $rabbitmq_port = $resource['input']['rabbitmq_port']['value'] +$rabbitmq_virtual_host = $resource['input']['rabbitmq_virtual_host']['value'] $keystone_host = $resource['input']['keystone_host']['value'] $keystone_port = $resource['input']['keystone_port']['value'] @@ -23,6 +24,7 @@ class { 'neutron': rabbit_password => $rabbitmq_password, rabbit_host => $rabbitmq_host, rabbit_port => $rabbitmq_port, + rabbit_virtual_host => $rabbitmq_virtual_host, service_plugins => ['metering'] } diff --git a/resources/neutron_puppet/meta.yaml b/resources/neutron_puppet/meta.yaml index a6bc77a5..ca43ceb0 100644 --- a/resources/neutron_puppet/meta.yaml +++ b/resources/neutron_puppet/meta.yaml @@ -24,6 +24,9 @@ input: rabbitmq_password: schema: str! value: '' + rabbitmq_virtual_host: + schema: str! + value: '' git: schema: {repository: str!, branch: str!} From 92a3bf21952ca3d0e2f761f4c2fa679a2064f7ce Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 21 Jul 2015 12:13:22 +0200 Subject: [PATCH 43/86] Fix rabbitmq connection for cinder resource Align with neutron fixes in https://github.com/Mirantis/solar/pull/41 Signed-off-by: Bogdan Dobrelya --- example-puppet.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/example-puppet.py b/example-puppet.py index 78d3cc83..8089312d 100644 --- a/example-puppet.py +++ b/example-puppet.py @@ -204,8 +204,7 @@ def deploy(): }) # CINDER - cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', { - 'rabbit_userid': 'guest', 'rabbit_password': 'guest'})[0] + cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', {})[0] cinder_db = vr.create('cinder_db', 'resources/mariadb_db/', { 'db_name': 'cinder_db', 'login_user': 'root'})[0] cinder_db_user = vr.create('cinder_db_user', 'resources/mariadb_user/', { @@ -229,7 +228,7 @@ def deploy(): signals.connect(rabbitmq_service1, cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'}) signals.connect(admin_user, cinder_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'}) #? signals.connect(openstack_vhost, cinder_puppet, {'vhost_name': 'rabbit_virtual_host'}) - #signals.connect(openstack_rabbitmq_user, cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'}) + signals.connect(openstack_rabbitmq_user, cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'}) signals.connect(mariadb_service1, cinder_db, {'port': 'login_port', 'root_password': 'login_password'}) signals.connect(mariadb_service1, cinder_db_user, {'port': 'login_port', 'root_password': 'login_password'}) signals.connect(cinder_db, cinder_db_user, {'db_name': 'db_name'}) From b01f370de8b02109e6738532fc17b2c7f8581090 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Tue, 21 Jul 2015 14:59:17 +0300 Subject: [PATCH 44/86] Add basica documentation for orchestration/system log usage --- docs/orchestration.md | 65 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 docs/orchestration.md diff --git a/docs/orchestration.md b/docs/orchestration.md new file mode 100644 index 00000000..d4a8b029 --- /dev/null +++ b/docs/orchestration.md @@ -0,0 +1,65 @@ +# Overview of orchestration commands and system log integration + +After user created all required resource - it is possible to automatically +detect which resource requires changes with + +``` +solar changes stage +``` + +After changes are staged - they will be used to populate history which can be seen +with command (*n* option used to limit number of items, -1 will return all changes) + +``` +solar changes history -n 5 +``` + +User is able to generate deployment scenario based on changes found by system log. +``` +solar changes process +``` + +This command will prepare deployment graph, and return uid of deployment graph to +work with. + +All commands that are able to manipulate deployment graph located in +*orch* namespace. + +Report will print all deployment tasks in topological order, with status, +and error if status of task is *ERROR* +``` +solar orch report +``` + +To see picture of deployment dependencies one can use following command +``` +solar orch dg +``` +Keep in mind that it is not representation of all edges that are kept in graph, +we are using trasitive reduction to leave only edges that are important for the +order of traversal. + +Execute deployment +``` +solar orch run-once +``` + +Gracefully stop deployment, after all already scheduled tasks are finished +``` +solar orch stop +``` + +Continue deployment execution for all tasks that are SKIPPED +``` +solar orch resume +``` + +All tasks will be returned to PENDING state, and deployment will be restarted +``` +solar orch restart +``` + +Orchestrator will retry tasks in ERROR state and continue execution +``` +solar orch retry +``` From 0e16ac9517c23ff67d0af5bc508afc731c62e98f Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 21 Jul 2015 14:19:46 +0200 Subject: [PATCH 45/86] Fix remove and resource name ref for cinder Signed-off-by: Bogdan Dobrelya --- resources/cinder_puppet/actions/remove.pp | 4 ++-- resources/cinder_puppet/actions/run.pp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/resources/cinder_puppet/actions/remove.pp b/resources/cinder_puppet/actions/remove.pp index 7d990fec..21f5561d 100644 --- a/resources/cinder_puppet/actions/remove.pp +++ b/resources/cinder_puppet/actions/remove.pp @@ -1,4 +1,4 @@ class {'cinder': - enabled => false, - package_ensure => 'absent' + package_ensure => 'absent', + rabbit_password => 'not important as removed', } diff --git a/resources/cinder_puppet/actions/run.pp b/resources/cinder_puppet/actions/run.pp index 851e3945..1be5df47 100644 --- a/resources/cinder_puppet/actions/run.pp +++ b/resources/cinder_puppet/actions/run.pp @@ -1,4 +1,4 @@ -$resource = hiera('{{ resource_name }}') +$resource = hiera($::resource_name) $ip = $resource['input']['ip']['value'] From e2d4eca435728d1032e38d87902d7d039a949fe3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 21 Jul 2015 13:38:49 +0000 Subject: [PATCH 46/86] Update virtual resource for current branch --- resources/keystone_puppet/actions/run.pp | 4 ++- resources/keystone_puppet/meta.yaml | 3 ++ templates/controller.yml | 39 ++++++++---------------- templates/keystone_api.yml | 19 ++++-------- templates/openstack_base.yml | 15 ++++++++- 5 files changed, 38 insertions(+), 42 deletions(-) diff --git a/resources/keystone_puppet/actions/run.pp b/resources/keystone_puppet/actions/run.pp index 9724fb2f..7845a793 100644 --- a/resources/keystone_puppet/actions/run.pp +++ b/resources/keystone_puppet/actions/run.pp @@ -3,6 +3,7 @@ $resource = hiera($::resource_name) $ip = $resource['input']['ip']['value'] $admin_token = $resource['input']['admin_token']['value'] $db_user = $resource['input']['db_user']['value'] +$db_host = $resource['input']['db_host']['value'] $db_password = $resource['input']['db_password']['value'] $db_name = $resource['input']['db_name']['value'] $admin_port = $resource['input']['admin_port']['value'] @@ -13,8 +14,9 @@ class {'keystone': verbose => true, catalog_type => 'sql', admin_token => $admin_token, - database_connection => "mysql://$db_user:$db_password@$ip/$db_name", + database_connection => "mysql://$db_user:$db_password@$db_host/$db_name", public_port => "$port", + admin_port => "$admin_port", token_driver => 'keystone.token.backends.kvs.Token' } diff --git a/resources/keystone_puppet/meta.yaml b/resources/keystone_puppet/meta.yaml index 8ee657ad..cab4e1ae 100644 --- a/resources/keystone_puppet/meta.yaml +++ b/resources/keystone_puppet/meta.yaml @@ -15,6 +15,9 @@ input: db_name: schema: str! value: keystone + db_host: + schema: str! + value: keystone admin_port: schema: int! diff --git a/templates/controller.yml b/templates/controller.yml index 194fd319..9376d04b 100644 --- a/templates/controller.yml +++ b/templates/controller.yml @@ -35,19 +35,18 @@ resources: from: templates/keystone_api.yml values: idx: 1 - image: 'kollaglue/centos-rdo-k-keystone' - config_dir: '/etc/solar/keystone_config_1' db_password: 'keystone_db_user::user_password' db_user: 'keystone_db_user::user_name' db_port: 'keystone_db_user::login_port' db_name: 'keystone_db_user::db_name' db_host: 'mariadb_service::ip' + admin_token: 132fdsfwqee + admin_port: 35357 + port: 5000 ip: '{{ip}}' ssh_user: '{{ssh_user}}' ssh_key: '{{ssh_key}}' - # TODO: HAproxy - - id: openstack_base from: templates/openstack_base.yml values: @@ -57,34 +56,20 @@ resources: keystone_ip: 'keystone_service_1::ip' keystone_admin_port: 'keystone_service_1::admin_port' keystone_port: 'keystone_service_1::port' - admin_token: 'keystone_config_1::admin_token' + admin_token: 'keystone_service_1::admin_token' - - id: glance_base - from: templates/glance_base.yml + - id: openrc_file + from: resources/openrc_file values: - login_user: root - login_password: 'mariadb_service::root_password' - login_port: 'mariadb_service::port' - db_name: 'glance' - user_password: 'glance' - user_name: 'glance' + keystone_host: 'keystone_service_1::ip' + keystone_port: 'keystone_service_1::admin_port' + tenant: 'admin_user::tenant_name' + user_name: 'admin_user::user_name' + password: 'admin_user::user_password' + ip: '{{ip}}' - ssh_user: '{{ssh_user}}' ssh_key: '{{ssh_key}}' - - - id: glance_registry_1 - from: templates/glance_registry.yml - values: - idx: 1 - keystone_admin_port: 'keystone_service_1::admin_port' - keystone_ip: 'keystone_service_1::ip' - mysql_password: 'glance_db_user::user_password' - mysql_user: 'keystone_db_user::user_name' - mysql_db: 'keystone_db_user::db_name' - mysql_ip: 'mariadb_service::ip' - ip: '{{ip}}' ssh_user: '{{ssh_user}}' - ssh_key: '{{ssh_key}}' tags: ['resources/controller', 'resource/primary_controller'] diff --git a/templates/keystone_api.yml b/templates/keystone_api.yml index a7e2efa4..381f7093 100644 --- a/templates/keystone_api.yml +++ b/templates/keystone_api.yml @@ -1,27 +1,20 @@ id: keystone_api_{{idx}} resources: - - id: keystone_config_{{idx}} - from: resources/keystone_config + - id: keystone_service_{{idx}} + from: resources/keystone_puppet values: - config_dir: '/etc/solar/keystone_{{idx}}' + admin_token: '{{admin_token}}' db_host: '{{db_host}}' - db_port: '{{db_port}}' db_name: '{{db_name}}' db_user: '{{db_user}}' db_password: '{{db_password}}' + + admin_port: '{{admin_port}}' + port: '{{port}}' ip: '{{ip}}' ssh_user: '{{ssh_user}}' ssh_key: '{{ssh_key}}' - - id: keystone_service_{{idx}} - from: resources/keystone_service - values: - image: 'kollaglue/centos-rdo-j-keystone' - config_dir: 'keystone_config_{{idx}}::config_dir' - ip: 'keystone_config_{{idx}}::ip' - ssh_user: 'keystone_config_{{idx}}::ssh_user' - ssh_key: 'keystone_config_{{idx}}::ssh_key' - tags: ['resources/keystone', 'resource/keystone_api'] diff --git a/templates/openstack_base.yml b/templates/openstack_base.yml index 73268028..db34acf8 100644 --- a/templates/openstack_base.yml +++ b/templates/openstack_base.yml @@ -25,6 +25,19 @@ resources: ssh_user: '{{ssh_user}}' ssh_key: '{{ssh_key}}' + - id: admin_role + from: resources/keystone_role + values: + role_name: 'admin' + user_name: 'admin_user::user_name' + tenant_name: 'admin_user::tenant_name' + keystone_port: '{{keystone_admin_port}}' + keystone_host: '{{keystone_ip}}' + admin_token: '{{admin_token}}' + ip: '{{ip}}' + ssh_user: '{{ssh_user}}' + ssh_key: '{{ssh_key}}' + - id: keystone_service_endpoint from: resources/keystone_service_endpoint values: @@ -35,7 +48,7 @@ resources: {% endraw %} description: 'OpenStack Identity Service' type: 'identity' - name: 'keystone' + endpoint_name: 'keystone' admin_port: '{{keystone_admin_port}}' public_port: '{{keystone_port}}' internal_port: '{{keystone_port}}' From c0d2bfb27a477f12857c208eecc58efe2021f196 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 21 Jul 2015 22:14:43 +0000 Subject: [PATCH 47/86] Use correct port numbers --- resources/rabbitmq_service/meta.yaml | 4 ++-- templates/keystone_api.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/resources/rabbitmq_service/meta.yaml b/resources/rabbitmq_service/meta.yaml index 69ef8a08..a1535450 100644 --- a/resources/rabbitmq_service/meta.yaml +++ b/resources/rabbitmq_service/meta.yaml @@ -16,10 +16,10 @@ input: value: 'node1' port: schema: int! - value: '' + value: 5672 management_port: schema: int! - value: '' + value: 15672 git: schema: {repository: str!, branch: str!} value: {repository: 'https://github.com/puppetlabs/puppetlabs-rabbitmq.git', branch: '5.1.0'} diff --git a/templates/keystone_api.yml b/templates/keystone_api.yml index 381f7093..a37804cf 100644 --- a/templates/keystone_api.yml +++ b/templates/keystone_api.yml @@ -10,8 +10,8 @@ resources: db_user: '{{db_user}}' db_password: '{{db_password}}' - admin_port: '{{admin_port}}' - port: '{{port}}' + admin_port: {{admin_port}} + port: {{port}} ip: '{{ip}}' ssh_user: '{{ssh_user}}' ssh_key: '{{ssh_key}}' From b14dfc89446b4950b49dd2b1cca6327bf56a034a Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 22 Jul 2015 12:53:08 +0300 Subject: [PATCH 48/86] Take into account only predecessors that are changed Processing nodes in topological order will guarantee that all changed predecessors of a given node are already in a orchestration graph --- solar/solar/system_log/change.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/solar/solar/system_log/change.py b/solar/solar/system_log/change.py index 16588c46..34a74589 100644 --- a/solar/solar/system_log/change.py +++ b/solar/solar/system_log/change.py @@ -89,7 +89,7 @@ def send_to_orchestration(): for r in resource.load_all().values()} commited = data.CD() - for res_uid in conn_graph: + for res_uid in nx.topological_sort(conn_graph): commited_data = commited.get(res_uid, {}) staged_data = staged.get(res_uid, {}) @@ -101,7 +101,8 @@ def send_to_orchestration(): errmsg=None, **parameters(res_uid, guess_action(commited_data, staged_data))) for pred in conn_graph.predecessors(res_uid): - dg.add_edge(pred, res_uid) + if pred in dg: + dg.add_edge(pred, res_uid) # what it should be? dg.graph['name'] = 'system_log' From 6afb6a17bfdc2fd3d4b1e1c7ae178252aa9bb083 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak Date: Wed, 22 Jul 2015 14:53:42 +0300 Subject: [PATCH 49/86] Fix system_log commands in README.md --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5c8190d5..680c26f7 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,9 @@ solar connect mariadb_service keystone_db '{"root_password": "login_password", " solar connect keystone_db keystone_db_user solar changes stage -solar changes commit +solar changes proccess + +solar orch run-once ``` You can fiddle with the above configuration like this: @@ -70,7 +72,9 @@ solar resource update keystone_db_user '{"user_password": "new_keystone_password solar resource update keystone_db_user user_password=new_keystone_password # another valid format solar changes stage -solar changes commit +solar changes proccess + +solar orch run-once ``` * Show the connections/graph: From dad6d3ffab4c07294b96e98dcef3ba4f35f9a584 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 22 Jul 2015 13:50:54 +0000 Subject: [PATCH 50/86] Create cinde-volume on all slaves. --- Vagrantfile | 2 +- slave_cinder.yml | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 slave_cinder.yml diff --git a/Vagrantfile b/Vagrantfile index 762451aa..c00dd04a 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -16,7 +16,7 @@ slave_script = <