Original schema

This commit is contained in:
Dmitry Shulyak 2015-03-26 17:10:04 -07:00
parent de87eebc4d
commit 77f90fa558
31 changed files with 766 additions and 52 deletions

54
.gitignore vendored
View File

@ -1,54 +1,4 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
# C extensions
*.so
.vagrant
*.pyc
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.cache
nosetests.xml
coverage.xml
# Translations
*.mo
*.pot
# Django stuff:
*.log
# Sphinx documentation
docs/_build/
# PyBuilder
target/

19
Vagrantfile vendored Normal file
View File

@ -0,0 +1,19 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "utopic"
config.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--memory", 2048]
end
config.vm.provision "ansible" do |ansible|
ansible.playbook = 'main.yml'
end
end

11
compose.yml Normal file
View File

@ -0,0 +1,11 @@
---
- hosts: all
sudo: yes
tasks:
- shell: docker-compose --version
register: compose
ignore_errors: true
- shell: curl -L https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
when: compose|failed
- shell: chmod +x /usr/local/bin/docker-compose

19
main.yml Normal file
View File

@ -0,0 +1,19 @@
---
- hosts: all
sudo: yes
tasks:
- shell: docker --version
ignore_errors: true
register: docker_version
- shell: curl -sSL https://get.docker.com/ubuntu/ | sudo sh
when: docker_version|failed
- shell: docker pull ubuntu
- shell: apt-get install -y python-pip
- shell: pip install -y {{item}}
with_items:
- keystoneclient
- novaclient
- glanceclient
- include: compose.yml
- include: openstack.yml

19
openstack.yml Normal file
View File

@ -0,0 +1,19 @@
- hosts: all
sudo: yes
tasks:
- shell: sh /vagrant/kolla/tools/genenv
- shell: docker pull {{item}}
with_items:
- kollaglue/centos-rdo-glance-registry
- kollaglue/centos-rdo-glance-api
- kollaglue/centos-rdo-keystone
- kollaglue/centos-rdo-mariadb-data
- kollaglue/centos-rdo-mariadb-app
- kollaglue/centos-rdo-nova-conductor:latest
- kollaglue/centos-rdo-nova-api:latest
- kollaglue/centos-rdo-nova-scheduler:latest
- kollaglue/centos-rdo-nova-libvirt
- kollaglue/centos-rdo-nova-network
- kollaglue/centos-rdo-nova-compute
- kollaglue/centos-rdo-rabbitmq

4
schema/ha.yml Normal file
View File

@ -0,0 +1,4 @@
---
# how to control other services by pacemaker/corosync? if they will
# be installed in containers

23
schema/ha_keepalived.yml Normal file
View File

@ -0,0 +1,23 @@
---
# keepalived ha configuration can be easily containerized
version: v1
# each orchestration item should have identity
id: puppet_ha
# description should be optional
description: "The basic openstack installation, everything on one node, without ha"
type: profile
# how to approach life cycle management? one way to do it is to add notion
# of event (deployment, maintenance, upgrade, patch)
# or maybe there will be no difference between them and we can
# drop this field
# event: deployment
# this is custom executable that will prepare our definition for the
# deployment by this handler
driver: puppet_master
# profile should be able to define resources that are not connected
# to any role
resources:
- ref: docker

View File

@ -0,0 +1,12 @@
---
id: compose
type: resource
handler: playbook
version: v1
run:
- shell: docker-compose -f {{compose.item}} -d
parameters:
item:
required: true
default: /some/path.yml

View File

@ -0,0 +1,34 @@
---
# ansible playbook to install all docker requirements
# wrapped in ours resource entity
id: docker
type: resource
handler: playbook
version: v1
run:
# all of this should be moved to regular ansible playbook
# we need to reuse as much as possible of ansible goodness
- shell: docker --version
ignore_errors: true
register: docker_version
- shell: curl -sSL https://get.docker.com/ubuntu/ | sudo sh
when: docker_version|failed
- shell: docker pull ubuntu
- shell: apt-get install -y python-pip
- shell: pip install -y {{item}}
with_items: docker.openstack_clients
- shell: docker-compose --version
register: compose
ignore_errors: true
- shell: curl -L https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
when: compose|failed
- shell: chmod +x /usr/local/bin/docker-compose
parameters:
openstack_clients:
default:
- keystoneclient
- novaclient
- glanceclient
required: true

View File

@ -0,0 +1,14 @@
---
id: images
type: resource
driver: playbook
version: v1
run:
- shell: docker pull {{item}}
with_items: images.list
parameters:
list:
required: true
another:
default: 1

View File

@ -0,0 +1,11 @@
---
id: networks
type: resource
handler: custom_network_schema
version: v1
# run part will be generated
# what kind of parameters?

View File

@ -0,0 +1,25 @@
---
id: network_eth
handler: network_schema
parameters:
networks:
- name: bbc01
cidr: 10.0.0.4/24
ip_ranges: []
provides: [network.ips]
----
id: network_infiniband
handler: playbook // infiniband_schema
----
id: network_check
handler: playbook
run:
- shell: ping {{item}}
with_items: network.ips

View File

@ -0,0 +1,21 @@
---
id: mariadb
type: service
version: v1
handler: ansible
resources:
- id: mariadb_settings
type: service
handler: ansible
run:
- template: src=mariadb dest=mariadb.conf
parameters:
port: 8000
host: localhost
password: admin
user: admin
- ref: compose
overwrite:
item: maridb.yml
endpoint: {{node.management.ip}}:4324

78
schema/simple.yml Normal file
View File

@ -0,0 +1,78 @@
---
# we need strategy around versioning, because right now there is
# too many moving components, and such approach doesnt scale
version: v1
# each orchestration item should have identity
id: simple_profile
# description should be optional
description: "The basic openstack installation, everything on one node, without ha"
type: profile
# how to approach life cycle management? one way to do it is to add notion
# of event (deployment, maintenance, upgrade, patch)
# or maybe there will be no difference between them and we can
# drop this field
# event: deployment
# this is custom executable that will prepare our definition for the
# deployment by this handler
driver: ansible
# profile should be able to define resources that are not connected
# to any role
# resources:
# - ref: docker
roles:
# id is used to specify that current entity created right here
- id: localhost
type: role
resources:
id: keys
- id: controller
type: role
# version: v1
resources:
# ref is used to specify that resource is created externally
- ref: images
overwrite:
list:
- kollaglue/centos-rdo-glance-registry
- kollaglue/centos-rdo-glance-api
- kollaglue/centos-rdo-keystone
- kollaglue/centos-rdo-mariadb-data
- kollaglue/centos-rdo-mariadb-app
- kollaglue/centos-rdo-nova-conductor:latest
- kollaglue/centos-rdo-nova-api:latest
- kollaglue/centos-rdo-nova-scheduler:latest
- kollaglue/centos-rdo-nova-libvirt
- kollaglue/centos-rdo-nova-network
- kollaglue/centos-rdo-nova-compute
- kollaglue/centos-rdo-rabbitmq
services:
- id: compute
type: service
resources:
# it is required that ebtables module will be loaded
# for nova-compute service, and it is not possible
# to load additional modules from container itself
- id: enable_ebtables
type: resource
handler: playbook
run:
- shell: modprobe ebtables
- ref: compose
# overwrite will be used to change default settings on resource
overwrite:
# how to find them?
# 1. relative path
item: nova-compute-network.yml
# required to make roles and services loosely coupled
provides:
# how to fetch information for node?
endpoint: {{network.management.ip}}:{{compute.api.port}}
- id: rabbitmq
type: service
resources:
- ref: compose
overwrite:
item: rabbitmq.yml
endpoint: {{node.management.ip}}:5572
- ref: mariadb

86
spec/base.spec Normal file
View File

@ -0,0 +1,86 @@
First draft of fully functional specification for new deployment tool.
By this specification it is proposed that new solution should be fully
plugable right from the start.
Fuel should be splitted at least in several repositories.
First one would fuel-api and will store only core api functionality
of the new tool.
Second one should be named something like fuel-core
(similarly to ansible-core-modules). This is what will be our verified
tools to deploy openstack in a way we want to deploy it.
Inventory api
==============
Do we want to reuse existing entities but in a new way? Or we need to
reconsider them? I am sure that we can do better with networking,
current solution just masks bad decisions that was done in past.
Orchestration api
====================
Resources
---------
Each resource should define deployment logic in a known (to fuel) tool,
and parameters that can be modified by user or another layer of inventory.
Services
--------
Single or several resources wrapped by additional data structure.
The primary purpose of service is to provide loose coupling between
roles and resources. And that can be achieved by using additional
parameters like endpoints.
::
endpoints: {{node.management.ip}}:5000
Installing fully functional service is not only a matter of running
docker-compose, but as well we need to execute some additional
tasks on the host system.
One more challenge - verify that service started to work
(how and when it should be performed?)
Role
-----
Combination of services and resources.
The main reason to introduce one - is to allow user an easy way
to map nodes in the cluster to the functionality that is desired.
How to handle primary-{{role}} and {{role}} difference?
Profiles
--------
Profile is the main entity that user will work with.
Several opinionated profile will be provided by us, and they should
be valuable mainly because of our expertise in deploying openstack.
Each of this entity will have parameter that process provided data
with plugable pythonic architecture
::
# profile handler
handler: ansible
::
# network_schema will require some amount of modifications and transformations, that is easier to accomplish within python code
handler: network_schema
Modular architecture
====================
Every piece in fuel-core will be developed in a modular style,
and within that module - developer should be able to add/change
entities like:
- deployment logic (ansible or other deployment code)
- fuel pythonic handlers or other interfaces for pythonic plugins
- resources
- profiles
- services

14
spec/inventory.spec Normal file
View File

@ -0,0 +1,14 @@
Inventory mechanism should provide an easy way for user to change any
piece of deployment configuration.
It means several things:
1. When writing modules - developer should take into account possibility
of modification it by user. Development may take a little bit longer, but we
are developing tool that will cover not single particular use case,
but a broad range customized production deployments.
2. Each resource should define what is changeable.
On the stage before deployment we will be able to know what resources
are used on the level of node/cluster and modify them the way we want.

81
spec/networking.spec Normal file
View File

@ -0,0 +1,81 @@
We should make network as separate resource for which we should be
able to add custom handlers.
This resource will actually serialize tasks, and provide inventory
information.
Input:
Different entities in custom database, like networks and nodes, maybe
interfaces and other things.
Another input is parameters, like ovs/linux (it may be parameters or
different tasks)
Output:
List of ansible tasks for orhestrator to execute, like
::
shell: ovs-vsctl add-br {{networks.management.bridge}}
And data to inventory
Networking entities
-----------------------
Network can have a list of subnets that are attached to different node racks.
Each subnets stores l3 parameters, such as cidr/ip ranges.
L2 parameters such as vlans can be stored on network.
Roles should be attached to network, and different subnets can not
be used as different roles per rack.
How it should work:
1. Untagged network created with some l2 parameters like vlan
2. Created subnet for this network with params (10.0.0.0/24)
3. User attaches network to cluster with roles public/management/storage
4. Role can store l2 parameters also (bridge, mtu)
5. User creates rack and uses this subnet
6. IPs assigned for each node in this rack from each subnet
7. During deployment we are creating bridges based on roles.
URIs
-------
/networks/
vlan
mtu
/networks/<network_id>/subnets
cidr
ip ranges
gateway
/clusters/<cluster_id>/networks/
Subset of network attached to cluster
/clusters/<cluster_id>/networks/<network_id>/network_roles
Roles attached to particular network
/network_roles/
bridge
/clusters/<cluster_id>/racks/<rack_id>/subnets
/clusters/<cluster_id>/racks/<rack_id>/nodes

52
spec/questions Normal file
View File

@ -0,0 +1,52 @@
Entities
------------
We clearly need orchestration entities like:
1. resources/roles/services/profiles
Also we need inventory entities:
2. nodes/networks/ifaces/cluster/release ?
Q: how to allow developer to extend this entities by modules?
Options:
1. Use completely schema-less data model
(i personally more comfortable with sql-like data models)
2. Dont allow anything except standart entities, if developer needs
to manage custom data - he can create its own micro-service and
then integrate it via custom type of resource
(one which perform query to third-part service)
Identities and namespaces
---------------------------
Identities required for several reasons:
- allow reusage of created once entities
- provide clear api to operate with entities
- specify dependencies with identities
Will be root namespace polluted with those entities?
Options:
1. We can create some variable namespace explicitly
2. Or use something like namepsace/entity (example contrail/network)
Multiple options for configuration
----------------------------------
If there will be same parameters defined within different
modules, how this should behave?
1. First option is concatenate several options and make a list of choices.
2. Raise a validation error that certain thing can be enabled with another.
Looks like both should be supported.
Deployment code
----------------
We need to be able to expose all functionality of any
particular deployment tool.
Current challenge: how to specify path to some deployment logic?

13
tool/setup.py Normal file
View File

@ -0,0 +1,13 @@
setup(
name='tool',
version='0.1',
license='Apache License 2.0',
include_package_data=True,
install_requires=['ansible', 'networkx', 'pyyaml', 'argparse'],
entry_points="""
[console_scripts]
tool=tool.main:main
""",
)

0
tool/tool/__init__.py Normal file
View File

35
tool/tool/main.py Normal file
View File

@ -0,0 +1,35 @@
import sys
import argparse
import yaml
from tool.profile_handlers import process
def parse():
parser = argparse.ArgumentParser()
parser.add_argument(
'-p',
'--profile',
help='profile file',
required=True)
parser.add_argument(
'-r',
'--resources',
help='resources dir',
required=True)
return parser.parse_args()
def main():
args = parse()
with open(args.config) as f:
profile = yaml.load(f)
return process(profile, resources)

View File

@ -0,0 +1,9 @@
from tool.profile_handlers import ansible
def process(profile, resources):
# it should be a fabric
return ansible.process(profile, resources)

View File

@ -0,0 +1,16 @@
from tool.resource_handlers import playbook
from tool.service_handlers import base
class AnsibleProfile(object):
"""This profile should just serialize
"""
def __init__(self, storage, config):
self.config = config
def inventory(self):
pass

View File

@ -0,0 +1,32 @@
"""The point of different ansible graph handlers is that graph data model
allows to decidy which tasks to run in background.
"""
import networkx as nx
class ProfileGraph(nx.DiGraph):
def __init__(self, profile):
super(ProfileGraph, self).__init__()
def add_resources(self, entity):
resources = entity.get('resources', [])
for res in resources:
self.add_resource(res)
def add_resource(self, resource):
self.add_node(resource['id'])
for dep in resource.get('requires', []):
self.add_edge(dep, resource['id'])
for dep in resource.get('required_for', []):
self.add_edge(resource['id'], dep)
def process(profile, resources):
# here we should know how to traverse profile data model
# that is specific to ansible
graph = nx.DiGraph(profile)

View File

View File

@ -0,0 +1,34 @@
class BaseResource(object):
def __init__(self, config, hosts='all'):
"""
config - data described in configuration files
hosts - can be overwritten if resource is inside of the role,
or maybe change for some resource directly
"""
self.config = config
self.uid = config['id']
self.hosts = hosts
def prepare(self):
"""Make some changes in database state."""
def inventory(self):
"""Return data that will be used for inventory"""
if 'parameters' in self.config:
params = self.config.get('parameters', {})
res = {}
for param, values in self.config.parameters.items():
res[param] = values.get('value') or values.get('default')
return res
def run(self):
"""Return data that will be used by orchestration framework"""
raise NotImplemented('Mandatory to overwrite')

View File

@ -0,0 +1,20 @@
"""
This handler required for custom modification for the networks resource.
It will create all required tasks for things like ovs/linux network
entities.
"""
from tool.resoure_handlers import base
class NetworkSchema(base.BaseResource):
def __init__(self, parameters):
pass
def add_bridge(self, bridge):
return 'shell: ovs-vsctl add-br {0}'.format(bridge)
def add_port(self, bridge, port):
return 'shell: ovs-vsctl add-port {0} {1}'.format(bridge, port)

View File

@ -0,0 +1,14 @@
"""
Just find or create existing playbook.
"""
from tool.resource_handlers import base
class Playbook(base.BaseResource):
def run(self):
return {
'hosts': self.hosts,
'tasks': self.config.get('run', []),
'sudo': 'yes'}

View File

View File

@ -0,0 +1,14 @@
class BaseService(object):
def __init__(self, resources, hosts='all'):
self.hosts = hosts
self.resources = resources
def run(self):
for resource in self.resources:
yield resource.run()
# how service should be different from resources, apart from providing
# additional data?

54
tool/tool/storage.py Normal file
View File

@ -0,0 +1,54 @@
import os
from fnmatch import fnmatch
import yaml
def get_files(path, pattern):
for root, dirs, files in os.walk(path):
for file_name in files:
if fnmatch(file_name, file_pattern):
yield os.path.join(root, file_name)
class Storage(object):
def __init__(self):
self.entities = {}
def add(self, resource):
if 'id' in resource:
self.entities[resource['id']] = resource
def add_profile(self, profile):
self.entities[profile['id']] = profile
for res in profile.get('resources', []):
self.add_resource(res)
def add_resource(self, resource):
if 'id' in resource:
self.entities[resource['id']] = resource
def add_service(self, service):
if 'id' in service:
self.entities[service['id']] = service
for resource in service.get('resources', []):
self.add_resource(resource)
def get(self, resource_id):
return self.entities[resource_id]
@classmethod
def from_files(self, path):
for file_path in get_files(path, '*.yml'):
with open(file_path) as f:
entity = yaml.load(f)
if entity['type'] == 'profile':
self.add_profile(entity)
elif entity['type'] == 'resource':
self.add_resource(entity)
elif entity['type'] == 'service':
self.add_service(entity)