diff --git a/.gitignore b/.gitignore index 83a9627c..043c504a 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,6 @@ .vagrant tmp/ + +#vim +*.swp diff --git a/Vagrantfile b/Vagrantfile index c57d5e05..1d12b1ba 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -13,21 +13,24 @@ SCRIPT Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "rustyrobot/deb-jessie-amd64" + config.vm.box = "deb/jessie-amd64" + #rustyrobot/deb-jessie-amd64" config.vm.define "solar-dev", primary: true do |guest1| guest1.vm.provision "shell", inline: init_script, privileged: true guest1.vm.provision "file", source: "~/.vagrant.d/insecure_private_key", destination: "/vagrant/tmp/keys/ssh_private" + guest1.vm.provision "file", source: "ansible.cfg", destination: "/home/vagrant/.ansible.cfg" guest1.vm.network "private_network", ip: "10.0.0.2" guest1.vm.host_name = "solar-dev" guest1.vm.provider :virtualbox do |v| - v.customize ["modifyvm", :id, "--memory", 2048] + v.customize ["modifyvm", :id, "--memory", 256] v.name = "solar-dev" end end config.vm.define "solar-dev2" do |guest2| + guest2.vm.provision "shell", inline: init_script, privileged: true guest2.vm.network "private_network", ip: "10.0.0.3" guest2.vm.host_name = "solar-dev2" @@ -37,4 +40,25 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| end end + config.vm.define "solar-dev3" do |guest3| + guest3.vm.provision "shell", inline: init_script, privileged: true + guest3.vm.network "private_network", ip: "10.0.0.4" + guest3.vm.host_name = "solar-dev3" + + guest3.vm.provider :virtualbox do |v| + v.customize ["modifyvm", :id, "--memory", 1024] + v.name = "solar-dev3" + end + end + + config.vm.define "solar-dev4" do |guest4| + guest4.vm.provision "shell", inline: init_script, privileged: true + guest4.vm.network "private_network", ip: "10.0.0.5" + guest4.vm.host_name = "solar-dev4" + + guest4.vm.provider :virtualbox do |v| + v.customize ["modifyvm", :id, "--memory", 1024] + v.name = "solar-dev4" + end + end end diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 00000000..14c80651 --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +host_key_checking = False diff --git a/cli.py b/cli.py new file mode 100644 index 00000000..c0f72eef --- /dev/null +++ b/cli.py @@ -0,0 +1,182 @@ +import click +import json +#import matplotlib +#matplotlib.use('Agg') # don't show windows +#import matplotlib.pyplot as plt +import networkx as nx +import os +import subprocess + +from x import actions as xa +from x import deployment as xd +from x import resource as xr +from x import signals as xs + + +@click.group() +def cli(): + pass + + +def init_cli_resource(): + @click.group() + def resource(): + pass + + cli.add_command(resource) + + @click.command() + @click.argument('resource_path') + @click.argument('action_name') + def action(action_name, resource_path): + print 'action', resource_path, action_name + r = xr.load(resource_path) + xa.resource_action(r, action_name) + + resource.add_command(action) + + @click.command() + @click.argument('name') + @click.argument('base_path') + @click.argument('dest_path') + @click.argument('args') + def create(args, dest_path, base_path, name): + print 'create', name, base_path, dest_path, args + args = json.loads(args) + xr.create(name, base_path, dest_path, args) + + resource.add_command(create) + + @click.command() + @click.argument('resource_path') + @click.argument('tag_name') + @click.option('--add/--delete', default=True) + def tag(add, tag_name, resource_path): + print 'Tag', resource_path, tag_name, add + r = xr.load(resource_path) + if add: + r.add_tag(tag_name) + else: + r.remove_tag(tag_name) + r.save() + + resource.add_command(tag) + + @click.command() + @click.argument('path') + @click.option('--all/--one', default=False) + @click.option('--tag', default=None) + def show(tag, all, path): + if all or tag: + for name, resource in xr.load_all(path).items(): + show = True + if tag: + if tag not in resource.tags: + show = False + + if show: + print resource + print + else: + print xr.load(path) + + resource.add_command(show) + + @click.command() + @click.argument('path') + @click.argument('args') + def update(args, path): + print 'Update', path, args + args = json.loads(args) + # Need to load all resources for bubbling effect to take place + # TODO: resources can be scattered around, this is a simple + # situation when we assume resources are all in one directory + base_path, name = os.path.split(path) + all = xr.load_all(base_path) + r = all[name] + r.update(args) + + resource.add_command(update) + + +def init_cli_connect(): + @click.command() + @click.argument('emitter') + @click.argument('receiver') + @click.option('--mapping', default=None) + def connect(mapping, receiver, emitter): + print 'Connect', emitter, receiver + emitter = xr.load(emitter) + receiver = xr.load(receiver) + print emitter + print receiver + if mapping is not None: + mapping = json.loads(mapping) + xs.connect(emitter, receiver, mapping=mapping) + + cli.add_command(connect) + + @click.command() + @click.argument('emitter') + @click.argument('receiver') + def disconnect(receiver, emitter): + print 'Disconnect', emitter, receiver + emitter = xr.load(emitter) + receiver = xr.load(receiver) + print emitter + print receiver + xs.disconnect(emitter, receiver) + + cli.add_command(disconnect) + + +def init_cli_connections(): + @click.group() + def connections(): + pass + + cli.add_command(connections) + + @click.command() + def show(): + print json.dumps(xs.CLIENTS, indent=2) + + connections.add_command(show) + + # TODO: this requires graphing libraries + @click.command() + def graph(): + #g = xs.connection_graph() + g = xs.detailed_connection_graph() + + nx.write_dot(g, 'graph.dot') + subprocess.call(['dot', '-Tpng', 'graph.dot', '-o', 'graph.png']) + + # Matplotlib + #pos = nx.spring_layout(g) + #nx.draw_networkx_nodes(g, pos) + #nx.draw_networkx_edges(g, pos, arrows=True) + #nx.draw_networkx_labels(g, pos) + #plt.axis('off') + #plt.savefig('graph.png') + + connections.add_command(graph) + + +def init_cli_deployment_config(): + @click.command() + @click.argument('filepath') + def deploy(filepath): + print 'Deploying from file {}'.format(filepath) + xd.deploy(filepath) + + cli.add_command(deploy) + + +if __name__ == '__main__': + init_cli_resource() + init_cli_connect() + init_cli_connections() + init_cli_deployment_config() + + cli() diff --git a/compose.yml b/compose.yml deleted file mode 100644 index 949b8492..00000000 --- a/compose.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- hosts: all - sudo: yes - tasks: - - shell: docker-compose --version - register: compose - ignore_errors: true - - shell: curl -L https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose - when: compose|failed - - shell: chmod +x /usr/local/bin/docker-compose diff --git a/config.yaml b/config.yaml new file mode 100644 index 00000000..29183d5b --- /dev/null +++ b/config.yaml @@ -0,0 +1 @@ +clients-data-file: /vagrant/clients.json diff --git a/docs/spec/building_dependencies.md b/docs/spec/building_dependencies.md deleted file mode 100644 index 2a96cd7b..00000000 --- a/docs/spec/building_dependencies.md +++ /dev/null @@ -1,67 +0,0 @@ - - -Problem: Different execution strategies ---------------------------------------- - -We will have different order of execution for different actions -(installation, removal, maintenance) - -1. Installation and removal of resources should be done in different order. -2. Running maintenance tasks may require completely different order -of actions, and this order can not be described one time for resources, -it should be described for each action. - -IMPORTANT: In such case resources are making very little sense, -because we need to define different dependencies and build different -executions graphs for tasks during lifecycle management - - -Dependency between resources ------------------------------ -Several options to manage ordering between executables - -1. Allow user to specify this order -2. Explicitly set requires/require_for in additional entity like profile -3. Deployment flow should reflect data-dependencies between resources - -1st option is pretty clear - and we should provide a way for user -to manage dependencies by himself -(even if they will lead to error during execution) - -2nd is similar to what is done in fuel, and allows explicitly set -what is expected to be executed. However we should -not hardcode those deps on resources/actions itself. Because it will lead to -tight-coupling, and some workarounds to skip unwanted resource execution. - -3rd option is manage dependencies based on what is provided by different -resources. For example input: some_service - -Please note that this format is used only to describe intentions. - -:: - image: - ref: - namespace: docker - value: base_image - -Practically it means that docker resource should be executed before -some_service. And if another_service needs to be connected to some_service - -:: - connect: - ref: - namespace: some_service - value: port - -But what if there is no data-dependencies? - -In such case we can add generic way to extend parameters with its -requirements, like: - -:: - - requires: - - ref: - namespace: node - -# (dshulyak) How to add backward dependency? (required_for) diff --git a/docs/spec/current_flow.spec b/docs/spec/current_flow.spec deleted file mode 100644 index 65b2cab6..00000000 --- a/docs/spec/current_flow.spec +++ /dev/null @@ -1,33 +0,0 @@ - - -1. Discovery (ansible all -m facter) -Read list of ips and store them, and search for different data on those -hosts - -2. Create environment ?? with profile, that provides roles (wraps resources) - -3. Add nodes to the env and distribute services - -Assign roles (partitions of services) from the profiles to the nodes. -Store history of applied resources. -Role only matters as initial template. - -4. Change settings provided by resource. - -Imporant/Non important settings ?? -We need defaults for some settings. -Different templates ?? for different backends of resources ?? - -5. Start management - -Periodicly applying stuff ?? - -6. Stop management - -We need to be able to stop things - -7. Run maintenance - -Resources should added to history and management graph will be changed - -8. Start management diff --git a/docs/spec/deployment_blocks.spec b/docs/spec/deployment_blocks.spec deleted file mode 100644 index 03a01ca5..00000000 --- a/docs/spec/deployment_blocks.spec +++ /dev/null @@ -1,60 +0,0 @@ - - -Profile is a global wrapper for all resources in environment. -Profile is versioned and executed by particular driver. -Profile is a container for resources. -Resources can be grouped by roles entities. - -:: - - id: HA - type: profile - version: 0.1 - # adapter for any application that satisfies our requirements - driver: ansible - - -Role is a logical wrapper of resources. -We will provide "opinionated" wrappers, but user should -be able to compose resource in any way. - -:: - - roles: - - id: controller - type: role - resources: [] - - -Resource should have deployment logic for several events: -main deployment, removal of resource, scale up of resource ? -Resource should have list of input parameters that resource provides. -Resources are isolated, and should be executable as long as -required data provided. - -:: - id: rabbitmq - type: resource - driver: ansible_playbook - actions: - run: $install_rabbitmq_playbook - input: - image: fuel/rabbitmq - port: 5572 - # we need to be able to select ip addresses - listen: [{{management.ip}}, {{public.ip}}] - - -:: - id: nova_compute - type: resource - driver: ansible_playbook - actions: - run: $link_to_ansible_playbook - remove: $link_to_another_playbook_that_will_migrate_vms - maintenance: $link_to_playbook_that_will_put_into_maintenance - input: - image: fuel/compute - driver: kvm - rabbitmq_hosts: [] - diff --git a/docs/spec/how_to_do_primary.md b/docs/spec/how_to_do_primary.md deleted file mode 100644 index d8ee2ea0..00000000 --- a/docs/spec/how_to_do_primary.md +++ /dev/null @@ -1,18 +0,0 @@ - -How to approach primary, non-primary resource mangement? --------------------------------------------------------- - -It should be possible to avoid storing primary/non-primary flag -for any particular resource. - -In ansible there is a way to execute particular task from playbook -only once and on concrete host. - -:: - - hosts: [mariadb] - tasks: - - debug: msg="Installing first node" - run_once: true - delegate_to: groups['mariadb'][0] - - debug: msg="Installing all other mariadb nodes" - when: inventory_hostname != groups['mariadb'][0] diff --git a/docs/spec/inventory.spec b/docs/spec/inventory.spec deleted file mode 100644 index 9c5182f2..00000000 --- a/docs/spec/inventory.spec +++ /dev/null @@ -1,14 +0,0 @@ - -Inventory mechanism should provide an easy way for user to change any -piece of deployment configuration. - -It means several things: -1. When writing modules - developer should take into account possibility -of modification it by user. Development may take a little bit longer, but we -are developing tool that will cover not single particular use case, -but a broad range customized production deployments. - -2. Each resource should define what is changeable. - -On the stage before deployment we will be able to know what resources -are used on the level of node/cluster and modify them the way we want. diff --git a/docs/spec/layers.spec b/docs/spec/layers.spec deleted file mode 100644 index 75ee54d3..00000000 --- a/docs/spec/layers.spec +++ /dev/null @@ -1,8 +0,0 @@ - -Layers - -1. REST API of our CORE service // managing basic information -1.1. Extension API // interface for extensions -2. Orchestration // run tasks, periodic tasks, lifecycle management ?? -3. Storage - diff --git a/docs/spec/networking.spec b/docs/spec/networking.spec deleted file mode 100644 index 982349f3..00000000 --- a/docs/spec/networking.spec +++ /dev/null @@ -1,81 +0,0 @@ - -We should make network as separate resource for which we should be -able to add custom handlers. - -This resource will actually serialize tasks, and provide inventory -information. - - -Input: - -Different entities in custom database, like networks and nodes, maybe -interfaces and other things. - -Another input is parameters, like ovs/linux (it may be parameters or -different tasks) - -Output: - - -List of ansible tasks for orhestrator to execute, like - -:: - - shell: ovs-vsctl add-br {{networks.management.bridge}} - -And data to inventory - - -Networking entities ------------------------ - -Network can have a list of subnets that are attached to different node racks. - -Each subnets stores l3 parameters, such as cidr/ip ranges. -L2 parameters such as vlans can be stored on network. - -Roles should be attached to network, and different subnets can not -be used as different roles per rack. - -How it should work: - -1. Untagged network created with some l2 parameters like vlan -2. Created subnet for this network with params (10.0.0.0/24) -3. User attaches network to cluster with roles public/management/storage -4. Role can store l2 parameters also (bridge, mtu) -5. User creates rack and uses this subnet -6. IPs assigned for each node in this rack from each subnet -7. During deployment we are creating bridges based on roles. - -URIs -------- - -/networks/ - -vlan -mtu - -/networks//subnets - -cidr -ip ranges -gateway - -/clusters//networks/ - -Subset of network attached to cluster - -/clusters//networks//network_roles - -Roles attached to particular network - -/network_roles/ - -bridge - -/clusters//racks//subnets - -/clusters//racks//nodes - - - diff --git a/docs/spec/orchestration_in_fuel.yaml b/docs/spec/orchestration_in_fuel.yaml deleted file mode 100644 index 7d260eac..00000000 --- a/docs/spec/orchestration_in_fuel.yaml +++ /dev/null @@ -1,94 +0,0 @@ -roles: - role-name: - name: "" - description: "" - conflicts: - - another_role - update_required: - - another_role - update_once: - - another_role - has_primary: true - limits: - min: int OR "<>" - overrides: - - condition: "<>" - max: 1 - - condition: "<>" - reccomended: 3 - message: "" - restrictions: - - condition: "<>" - message: "" - action: "hide" - fault_tolerance: "2%" - -task_groups: - #Stages - - id: stage_name - type: stage - requires: [another_stage] - #Groups - - id: task_group_name - type: group - role: [role_name] - requires: [stage_name_requirement] - required_for: [stage_name_complete_before] - parameters: - strategy: - type: one_by_one - #OR - type: parallel - amount: 6 #Optional concurency limit - -tasks: - - id: task_name_puppet - type: puppet - role: '*' #optional role to filter task on, used when in a pre or post deployment stage - groups: [task_group_name] - required_for: [task_name, stage_name] - requires: [task_name, task_group_name, stage_name] - condition: "<>" - parameters: - puppet_manifest: path_to_manifests - puppet_modules: path_to_modules - timeout: 3600 - cwd: / - test_pre: - cmd: bash style exec of command to run - test_post: - cmd: bash style exec of command to run - - #all have [roles|groups] and requires /// required_for - - id: task_name_shell - type: shell - parameters: - cmd: bash style exec - timeout: 180 - retries: 10 - interval: 2 - - - id: task_name_upload_file - type: upload_file - role: '*' - parameters: - path: /etc/hiera/nodes.yaml - - - id: task_name_sync - type: sync - role: '*' - parameters: - src: rsync://{MASTER_IP}:/puppet/version - dst: /etc/puppet - timeout: 180 - - - id: task_name_copy_files - type: copy_files - role: '*' - parameters: - files: - - src: source_file/{CLUSTER_ID}/ - dst: dest/localtion - permissions: '0600' - dir_permissions: '0700' - diff --git a/docs/spec/questions b/docs/spec/questions deleted file mode 100644 index f4cb9f9a..00000000 --- a/docs/spec/questions +++ /dev/null @@ -1,52 +0,0 @@ - -Entities ------------- -We clearly need orchestration entities like: -1. resources/roles/services/profiles - -Also we need inventory entities: -2. nodes/networks/ifaces/cluster/release ? - -Q: how to allow developer to extend this entities by modules? -Options: -1. Use completely schema-less data model -(i personally more comfortable with sql-like data models) -2. Dont allow anything except standart entities, if developer needs -to manage custom data - he can create its own micro-service and -then integrate it via custom type of resource -(one which perform query to third-part service) - - -Identities and namespaces ---------------------------- -Identities required for several reasons: -- allow reusage of created once entities -- provide clear api to operate with entities -- specify dependencies with identities - -Will be root namespace polluted with those entities? - -Options: -1. We can create some variable namespace explicitly -2. Or use something like namepsace/entity (example contrail/network) - - -Multiple options for configuration ----------------------------------- - -If there will be same parameters defined within different -modules, how this should behave? - -1. First option is concatenate several options and make a list of choices. -2. Raise a validation error that certain thing can be enabled with another. - -Looks like both should be supported. - - -Deployment code ----------------- - -We need to be able to expose all functionality of any -particular deployment tool. - -Current challenge: how to specify path to some deployment logic? diff --git a/example.py b/example.py new file mode 100644 index 00000000..2b499497 --- /dev/null +++ b/example.py @@ -0,0 +1,92 @@ +import shutil +import os + +from x import resource +from x import signals + + +signals.Connections.clear() + +if os.path.exists('rs'): + shutil.rmtree('rs') +os.mkdir('rs') + +node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.3', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) +node2 = resource.create('node2', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) +node3 = resource.create('node3', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.5', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) + +mariadb_service1 = resource.create('mariadb_service1', 'x/resources/mariadb_service', 'rs/', {'image':'mariadb', 'root_password' : 'mariadb', 'port' : '3306', 'ip': '', 'ssh_user': '', 'ssh_key': ''}) +keystone_db = resource.create('keystone_db', 'x/resources/mariadb_db/', 'rs/', {'db_name':'keystone_db', 'login_password':'', 'login_user':'root', 'login_port': '', 'ip':'', 'ssh_user':'', 'ssh_key':''}) +keystone_db_user = resource.create('keystone_db_user', 'x/resources/mariadb_user/', 'rs/', {'new_user_name' : 'keystone', 'new_user_password' : 'keystone', 'db_name':'', 'login_password':'', 'login_user':'root', 'login_port': '', 'ip':'', 'ssh_user':'', 'ssh_key':''}) + +keystone_config1 = resource.create('keystone_config1', 'x/resources/keystone_config/', 'rs/', {'config_dir' : '/etc/solar/keystone', 'ip':'', 'ssh_user':'', 'ssh_key':'', 'admin_token':'admin', 'db_password':'', 'db_name':'', 'db_user':'', 'db_host':''}) +keystone_service1 = resource.create('keystone_service1', 'x/resources/keystone_service/', 'rs/', {'port':'5000', 'admin_port':'35357', 'ip':'', 'ssh_key':'', 'ssh_user':'', 'config_dir':'', 'config_dir':''}) + +keystone_config2 = resource.create('keystone_config2', 'x/resources/keystone_config/', 'rs/', {'config_dir' : '/etc/solar/keystone', 'ip':'', 'ssh_user':'', 'ssh_key':'', 'admin_token':'admin', 'db_password':'', 'db_name':'', 'db_user':'', 'db_host':''}) +keystone_service2 = resource.create('keystone_service2', 'x/resources/keystone_service/', 'rs/', {'port':'5000', 'admin_port':'35357', 'ip':'', 'ssh_key':'', 'ssh_user':'', 'config_dir':'', 'config_dir':''}) + + +haproxy_keystone_config = resource.create('haproxy_keystone1_config', 'x/resources/haproxy_config/', 'rs/', {'name':'keystone_config', 'listen_port':'5000', 'servers':[], 'ports':[]}) +haproxy_config = resource.create('haproxy_config', 'x/resources/haproxy', 'rs/', {'ip':'', 'ssh_key':'', 'ssh_user':'', 'configs_names':[], 'configs_ports':[], 'listen_ports':[], 'configs':[]}) +haproxy_service = resource.create('haproxy_service', 'x/resources/docker_container/', 'rs/', {'image' : 'tutum/haproxy', 'ports': [], 'host_binds': [], 'volume_binds':[], 'ip':'', 'ssh_key':'', 'ssh_user':''}) + + +#### +# connections +#### + +#mariadb +signals.connect(node1, mariadb_service1) + +#keystone db +signals.connect(node1, keystone_db) +signals.connect(mariadb_service1, keystone_db, {'root_password':'login_password', 'port':'login_port'}) + +# keystone_db_user +signals.connect(node1, keystone_db_user) +signals.connect(mariadb_service1, keystone_db_user, {'root_password':'login_password', 'port':'login_port'}) +signals.connect(keystone_db, keystone_db_user, {'db_name':'db_name'}) + +signals.connect(node1, keystone_config1) +signals.connect(mariadb_service1, keystone_config1, {'ip':'db_host'}) +signals.connect(keystone_db_user, keystone_config1, {'db_name':'db_name', 'new_user_name':'db_user', 'new_user_password':'db_password'}) + +signals.connect(node1, keystone_service1) +signals.connect(keystone_config1, keystone_service1, {'config_dir': 'config_dir'}) + +signals.connect(node2, keystone_config2) +signals.connect(mariadb_service1, keystone_config2, {'ip':'db_host'}) +signals.connect(keystone_db_user, keystone_config2, {'db_name':'db_name', 'new_user_name':'db_user', 'new_user_password':'db_password'}) + +signals.connect(node2, keystone_service2) +signals.connect(keystone_config2, keystone_service2, {'config_dir': 'config_dir'}) + +signals.connect(keystone_service1, haproxy_keystone_config, {'ip':'servers', 'port':'ports'}) + +signals.connect(node1, haproxy_config) +signals.connect(haproxy_keystone_config, haproxy_config, {'listen_port': 'listen_ports', 'name':'configs_names', 'ports' : 'configs_ports', 'servers':'configs'}) + +signals.connect(node1, haproxy_service) +signals.connect(haproxy_config, haproxy_service, {'listen_ports':'ports', 'config_dir':'host_binds'}) + + +#run +from x import actions + +actions.resource_action(mariadb_service1, 'run') +actions.resource_action(keystone_db, 'run') +actions.resource_action(keystone_db_user, 'run') +actions.resource_action(keystone_config1, 'run') +actions.resource_action(keystone_service1, 'run') +actions.resource_action(haproxy_config, 'run') +actions.resource_action(haproxy_service, 'run') + + +#remove +actions.resource_action(haproxy_service, 'remove') +actions.resource_action(haproxy_config, 'remove') +actions.resource_action(keystone_service1, 'remove') +actions.resource_action(keystone_config1, 'remove') +actions.resource_action(keystone_db_user, 'remove') +actions.resource_action(keystone_db, 'remove') +actions.resource_action(mariadb_service1, 'remove') diff --git a/examples/resources/simple/group_vars/all b/examples/resources/simple/group_vars/all deleted file mode 100644 index 0448d6ad..00000000 --- a/examples/resources/simple/group_vars/all +++ /dev/null @@ -1,14 +0,0 @@ -docker: - base_image: ubuntu - -rabbitmq: - image: tutum/rabbitmq - name: rabbit-test1 - -user: - name: test_name - password: test_pass - -mariadb: - name: maria-test - image: tutum/mariadb diff --git a/examples/resources/simple/host_vars/first b/examples/resources/simple/host_vars/first deleted file mode 100644 index eb566b39..00000000 --- a/examples/resources/simple/host_vars/first +++ /dev/null @@ -1,6 +0,0 @@ - -networks: - default: - ip: 10.0.0.2 - cidr: 10.0.0.0/24 - interface: eth1 diff --git a/examples/resources/simple/hosts b/examples/resources/simple/hosts deleted file mode 100644 index 79cd2cb4..00000000 --- a/examples/resources/simple/hosts +++ /dev/null @@ -1,20 +0,0 @@ - -first ansible_connection=local ansible_ssh_host=10.0.0.2 -second ansible_ssh_host=10.0.0.3 - -[docker] - -first -second - -[rabbitmq] - -first - -[user] - -first - -[mariadb] - -first diff --git a/examples/resources/simple/mariadb/run.yml b/examples/resources/simple/mariadb/run.yml deleted file mode 100644 index 4d3f443d..00000000 --- a/examples/resources/simple/mariadb/run.yml +++ /dev/null @@ -1,17 +0,0 @@ - -- hosts: [service/mariadb] - sudo: yes - tasks: - - shell: echo {{name}} >> /var/lib/solar/containers_list - - shell: docker ps | grep -q {{name}} - ignore_errors: true - register: is_running - - shell: docker run \ - -d \ - --net="host" \ - --privileged \ - --name {{name}} \ - -e "MARIADB_ROOT_PASSWORD={{root_password}}" \ - -e "BIND_ADDRESS={{bind_ip}}" \ - {{image}} - when: is_running|failed diff --git a/examples/resources/simple/rabbitmq/remove.yml b/examples/resources/simple/rabbitmq/remove.yml deleted file mode 100644 index 0191fcac..00000000 --- a/examples/resources/simple/rabbitmq/remove.yml +++ /dev/null @@ -1,6 +0,0 @@ - -- hosts: [rabbitmq] - sudo: yes - tasks: - - shell: docker stop {{ rabbitmq.name }} - - shell: docker rm {{ rabbitmq.name }} diff --git a/examples/resources/simple/rabbitmq/run.yml b/examples/resources/simple/rabbitmq/run.yml deleted file mode 100644 index 5df459e6..00000000 --- a/examples/resources/simple/rabbitmq/run.yml +++ /dev/null @@ -1,6 +0,0 @@ - -- hosts: [rabbitmq] - sudo: yes - tasks: - - shell: docker run --net="host" --privileged \ - --name {{ rabbitmq.name }} -d {{ rabbitmq.image }} diff --git a/examples/resources/simple/remove.yml b/examples/resources/simple/remove.yml deleted file mode 100644 index d178fe9c..00000000 --- a/examples/resources/simple/remove.yml +++ /dev/null @@ -1,5 +0,0 @@ - -- include: user/remove.yml -- include: rabbitmq/remove.yml -- include: mariadb/remove.yml -- include: docker/remove.yml diff --git a/examples/resources/simple/run.yml b/examples/resources/simple/run.yml deleted file mode 100644 index a5fce714..00000000 --- a/examples/resources/simple/run.yml +++ /dev/null @@ -1,5 +0,0 @@ - -- include: docker/run.yml -- include: rabbitmq/run.yml -- include: mariadb/run.yml -- include: user/run.yml diff --git a/examples/resources/simple/user/remove.yml b/examples/resources/simple/user/remove.yml deleted file mode 100644 index 954a9623..00000000 --- a/examples/resources/simple/user/remove.yml +++ /dev/null @@ -1,12 +0,0 @@ - -- hosts: [rabbitmq] - sudo: yes - tasks: - - shell: docker exec -i {{rabbitmq.name}} /usr/sbin/rabbitmqctl delete_user {{user.name}} - run_once: true - -- hosts: [mariadb] - sudo: yes - tasks: - - command: docker exec -t {{mariadb.name}} \ - mysql -uroot -e "DROP USER '{{user.name}}'" diff --git a/examples/resources/simple/user/run.yml b/examples/resources/simple/user/run.yml deleted file mode 100644 index c1fe60e4..00000000 --- a/examples/resources/simple/user/run.yml +++ /dev/null @@ -1,6 +0,0 @@ - -- hosts: [rabbitmq] - sudo: yes - tasks: - - command: docker exec -t {{rabbitmq.name}} /usr/sbin/rabbitmqctl add_user {{user.name}} {{user.password}} - run_once: true diff --git a/haproxy.cfg b/haproxy.cfg new file mode 100644 index 00000000..ea258a27 --- /dev/null +++ b/haproxy.cfg @@ -0,0 +1,46 @@ +global + log 127.0.0.1 local0 + log 127.0.0.1 local1 notice + maxconn 4096 + tune.ssl.default-dh-param 2048 + pidfile /var/run/haproxy.pid + user haproxy + group haproxy + daemon + stats socket /var/run/haproxy.stats level admin + ssl-default-bind-options no-sslv3 + +defaults + log global + mode http + option redispatch + option httplog + option dontlognull + option forwardfor + timeout connect 5000 + timeout client 50000 + timeout server 50000 + +#frontend default_frontend +# bind 0.0.0.0:80 +# default_backend default_service + +#backend default_service +# balance roundrobin + +{% for service in haproxy_services %} +listen {{ service['name'] }} 0.0.0.0:{{ service['listen_port'] }} + mode http + stats enable + stats uri /haproxy?stats + stats realm Strictly\ Private + stats auth A_Username:YourPassword + stats auth Another_User:passwd + balance roundrobin + option httpclose + option forwardfor + {% for server in service['servers'] %} + server {{ server['name'] }} {{ server['ip'] }}:{{ server['port'] }} check + {% endfor %} + +{% endfor %} diff --git a/haproxy_deployment/__init__.py b/haproxy_deployment/__init__.py new file mode 100644 index 00000000..4bf2011a --- /dev/null +++ b/haproxy_deployment/__init__.py @@ -0,0 +1 @@ +__author__ = 'przemek' diff --git a/haproxy_deployment/haproxy-deployment.sh b/haproxy_deployment/haproxy-deployment.sh new file mode 100755 index 00000000..81e47b7b --- /dev/null +++ b/haproxy_deployment/haproxy-deployment.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# HAProxy deployment with Keystone and Nova + +set -e + +cd /vagrant + +rm clients.json +rm -Rf rs/* + +# Create resources +python cli.py resource create node1 x/resources/ro_node/ rs/ '{"ip":"10.0.0.3", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' +python cli.py resource create node2 x/resources/ro_node/ rs/ '{"ip":"10.0.0.4", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' +python cli.py resource create node3 x/resources/ro_node/ rs/ '{"ip":"10.0.0.5", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' +python cli.py resource create node4 x/resources/ro_node/ rs/ '{"ip":"10.0.0.6", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' +python cli.py resource create node5 x/resources/ro_node/ rs/ '{"ip":"10.0.0.7", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' + +python cli.py resource create mariadb_keystone1_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create mariadb_keystone2_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create keystone1 x/resources/keystone/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create keystone2 x/resources/keystone/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create haproxy_keystone_config x/resources/haproxy_config/ rs/ '{"servers": {}, "ssh_user": "", "ssh_key": ""}' + +python cli.py resource create mariadb_nova1_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create mariadb_nova2_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create nova1 x/resources/nova/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create nova2 x/resources/nova/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create haproxy_nova_config x/resources/haproxy_config/ rs/ '{"ip": "", "servers": {}, "ssh_user": "", "ssh_key": ""}' + +python cli.py resource create haproxy x/resources/haproxy/ rs/ '{"ip": "", "configs": {}, "ssh_user": "", "ssh_key": ""}' + + +# Connect resources +python cli.py connect rs/node1 rs/mariadb_keystone1_data +python cli.py connect rs/node2 rs/mariadb_keystone2_data +python cli.py connect rs/mariadb_keystone1_data rs/keystone1 +python cli.py connect rs/mariadb_keystone2_data rs/keystone2 +python cli.py connect rs/keystone1 rs/haproxy_keystone_config --mapping '{"ip": "servers"}' +python cli.py connect rs/keystone2 rs/haproxy_keystone_config --mapping '{"ip": "servers"}' + +python cli.py connect rs/node3 rs/mariadb_nova1_data +python cli.py connect rs/node4 rs/mariadb_nova2_data +python cli.py connect rs/mariadb_nova1_data rs/nova1 +python cli.py connect rs/mariadb_nova2_data rs/nova2 +python cli.py connect rs/nova1 rs/haproxy_nova_config --mapping '{"ip": "servers"}' +python cli.py connect rs/nova2 rs/haproxy_nova_config --mapping '{"ip": "servers"}' + +python cli.py connect rs/node5 rs/haproxy +python cli.py connect rs/haproxy_keystone_config rs/haproxy --mapping '{"server": "configs"}' +python cli.py connect rs/haproxy_nova_config rs/haproxy --mapping '{"server": "configs"}' diff --git a/haproxy_deployment/haproxy-deployment.yaml b/haproxy_deployment/haproxy-deployment.yaml new file mode 100755 index 00000000..d0350fdb --- /dev/null +++ b/haproxy_deployment/haproxy-deployment.yaml @@ -0,0 +1,216 @@ +# HAProxy deployment with MariaDB, Keystone and Nova + +workdir: /vagrant +resource-save-path: rs/ +test-suite: haproxy_deployment.haproxy_deployment + +resources: + - name: node1 + model: x/resources/ro_node/ + args: + ip: 10.0.0.3 + ssh_key: /vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key + ssh_user: vagrant + - name: node2 + model: x/resources/ro_node/ + args: + ip: 10.0.0.4 + ssh_key: /vagrant/.vagrant/machines/solar-dev3/virtualbox/private_key + ssh_user: vagrant + - name: node3 + model: x/resources/ro_node/ + args: + ip: 10.0.0.5 + ssh_key: /vagrant/.vagrant/machines/solar-dev4/virtualbox/private_key + ssh_user: vagrant + - name: node4 + model: x/resources/ro_node/ + args: + ip: 10.0.0.6 + ssh_key: /vagrant/.vagrant/machines/solar-dev5/virtualbox/private_key + ssh_user: vagrant + - name: node5 + model: x/resources/ro_node/ + args: + ip: 10.0.0.7 + ssh_key: /vagrant/.vagrant/machines/solar-dev6/virtualbox/private_key + ssh_user: vagrant + + - name: mariadb_keystone1_data + model: x/resources/data_container/ + args: + image: mariadb + export_volumes: + - /var/lib/mysql + ip: + ssh_user: + ssh_key: + - name: mariadb_keystone2_data + model: x/resources/data_container/ + args: + image: mariadb + export_volumes: + - /var/lib/mysql + ip: + ssh_user: + ssh_key: + - name: keystone1 + model: x/resources/keystone/ + args: + admin_port: 35357 + port: 5000 + image: TEST + config_dir: /etc/solar/keystone1 + ip: + ssh_user: + ssh_key: + - name: keystone2 + model: x/resources/keystone/ + args: + admin_port: 35357 + port: 5000 + config_dir: /etc/solar/keystone2 + image: TEST + ip: + ssh_user: + ssh_key: + - name: haproxy_keystone_config + model: x/resources/haproxy_config/ + args: + name: keystone + servers: [] + listen_port: 5000 + ports: [] + ssh_user: + ssh_key: + + - name: mariadb_nova1_data + model: x/resources/data_container/ + args: + image: mariadb + export_volumes: + - /var/lib/mysql + ip: + ssh_user: + ssh_key: + - name: mariadb_nova2_data + model: x/resources/data_container/ + args: + image: mariadb + export_volumes: + - /var/lib/mysql + ip: + ssh_user: + ssh_key: + - name: nova1 + model: x/resources/nova/ + args: + ip: + image: TEST + ssh_user: + ssh_key: + - name: nova2 + model: x/resources/nova/ + args: + ip: + image: TEST + ssh_user: + ssh_key: + - name: haproxy_nova_config + model: x/resources/haproxy_config/ + args: + name: nova + servers: [] + listen_port: 8774 + ports: [] + ssh_user: + ssh_key: + + - name: haproxy-config + model: x/resources/haproxy/ + args: + ip: + listen_ports: [] + configs: [] + configs_names: [] + configs_ports: [] + ssh_user: + ssh_key: + - name: haproxy + model: x/resources/docker_container + args: + ip: + image: tutum/haproxy + ports: [] + ssh_user: + ssh_key: + host_binds: [] + volume_binds: [] + + +connections: + - emitter: node1 + receiver: mariadb_keystone1_data + - emitter: node2 + receiver: mariadb_keystone2_data + - emitter: mariadb_keystone1_data + receiver: keystone1 + - emitter: mariadb_keystone2_data + receiver: keystone2 + - emitter: keystone1 + receiver: haproxy_keystone_config + mapping: + ip: servers + port: ports + - emitter: keystone2 + receiver: haproxy_keystone_config + mapping: + ip: servers + port: ports + + - emitter: node3 + receiver: mariadb_nova1_data + - emitter: node4 + receiver: mariadb_nova2_data + - emitter: mariadb_nova1_data + receiver: nova1 + - emitter: mariadb_nova2_data + receiver: nova2 + - emitter: nova1 + receiver: haproxy_nova_config + mapping: + ip: servers + port: ports + - emitter: nova2 + receiver: haproxy_nova_config + mapping: + ip: servers + port: ports + + # HAProxy config container + - emitter: node5 + receiver: haproxy-config + + - emitter: haproxy_keystone_config + receiver: haproxy-config + mapping: + listen_port: listen_ports + name: configs_names + ports: configs_ports + servers: configs + - emitter: haproxy_nova_config + receiver: haproxy-config + mapping: + listen_port: listen_ports + name: configs_names + ports: configs_ports + servers: configs + + - emitter: haproxy-config + receiver: haproxy + mapping: + ip: ip + listen_ports: ports + ssh_user: ssh_user + ssh_key: ssh_key + config_dir: host_binds diff --git a/haproxy_deployment/haproxy_deployment.py b/haproxy_deployment/haproxy_deployment.py new file mode 100644 index 00000000..120d19d5 --- /dev/null +++ b/haproxy_deployment/haproxy_deployment.py @@ -0,0 +1,111 @@ +import unittest + +from x import db + + +class TestHAProxyDeployment(unittest.TestCase): + def test_keystone_config(self): + node1 = db.get_resource('node1') + node2 = db.get_resource('node2') + keystone1 = db.get_resource('keystone1') + keystone2 = db.get_resource('keystone2') + + self.assertEqual(keystone1.args['ip'], node1.args['ip']) + self.assertEqual(keystone2.args['ip'], node2.args['ip']) + + def test_haproxy_keystone_config(self): + keystone1 = db.get_resource('keystone1') + keystone2 = db.get_resource('keystone2') + haproxy_keystone_config = db.get_resource('haproxy_keystone_config') + + self.assertEqual( + [ip['value'] for ip in haproxy_keystone_config.args['servers'].value], + [ + keystone1.args['ip'], + keystone2.args['ip'], + ] + ) + self.assertEqual( + [p['value'] for p in haproxy_keystone_config.args['ports'].value], + [ + keystone1.args['port'], + keystone2.args['port'], + ] + ) + + def test_nova_config(self): + node3 = db.get_resource('node3') + node4 = db.get_resource('node4') + nova1 = db.get_resource('nova1') + nova2 = db.get_resource('nova2') + + self.assertEqual(nova1.args['ip'], node3.args['ip']) + self.assertEqual(nova2.args['ip'], node4.args['ip']) + + def test_haproxy_nova_config(self): + nova1 = db.get_resource('nova1') + nova2 = db.get_resource('nova2') + haproxy_nova_config = db.get_resource('haproxy_nova_config') + + self.assertEqual( + [ip['value'] for ip in haproxy_nova_config.args['servers'].value], + [ + nova1.args['ip'], + nova2.args['ip'], + ] + ) + self.assertEqual( + [p['value'] for p in haproxy_nova_config.args['ports'].value], + [ + nova1.args['port'], + nova2.args['port'], + ] + ) + + def test_haproxy(self): + node5 = db.get_resource('node5') + haproxy_keystone_config = db.get_resource('haproxy_keystone_config') + haproxy_nova_config = db.get_resource('haproxy_nova_config') + haproxy = db.get_resource('haproxy') + haproxy_config = db.get_resource('haproxy-config') + + self.assertEqual(node5.args['ip'], haproxy.args['ip']) + self.assertEqual(node5.args['ssh_key'], haproxy.args['ssh_key']) + self.assertEqual(node5.args['ssh_user'], haproxy.args['ssh_user']) + self.assertEqual( + [c['value'] for c in haproxy_config.args['configs'].value], + [ + haproxy_keystone_config.args['servers'], + haproxy_nova_config.args['servers'], + ] + ) + self.assertEqual( + [cp['value'] for cp in haproxy_config.args['configs_ports'].value], + [ + haproxy_keystone_config.args['ports'], + haproxy_nova_config.args['ports'], + ] + ) + self.assertEqual( + [lp['value'] for lp in haproxy_config.args['listen_ports'].value], + [ + haproxy_keystone_config.args['listen_port'], + haproxy_nova_config.args['listen_port'], + ] + ) + self.assertEqual( + [ + haproxy_config.args['config_dir'], + ], + [hb['value'] for hb in haproxy.args['host_binds'].value] + ) + self.assertEqual( + haproxy.args['ports'], + haproxy_config.args['listen_ports'], + ) + + +def main(): + loader = unittest.TestLoader() + suite = loader.loadTestsFromTestCase(TestHAProxyDeployment) + unittest.TextTestRunner().run(suite) \ No newline at end of file diff --git a/main.yml b/main.yml index bc40a5a1..7dc87a07 100644 --- a/main.yml +++ b/main.yml @@ -11,5 +11,16 @@ - apt: name=virtualenvwrapper state=present - apt: name=ipython state=present - apt: name=python-pudb state=present + - apt: name=python-pip state=present + - apt: name=python-mysqldb state=present + - shell: pip install docker-py==1.1.0 + + # requirements + - shell: pip install -r /vagrant/requirements.txt + + # Graph drawing + #- apt: name=python-matplotlib state=present + - apt: name=python-pygraphviz state=present + # Setup development env for solar - - shell: python setup.py develop chdir=/vagrant/solar + #- shell: python setup.py develop chdir=/vagrant/solar diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..ca7d2def --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +click==4.0 +jinja2==2.7.3 +networkx==1.9.1 +PyYAML==3.11 diff --git a/simple-deployment.yaml b/simple-deployment.yaml new file mode 100755 index 00000000..7360183d --- /dev/null +++ b/simple-deployment.yaml @@ -0,0 +1,43 @@ +# HAProxy deployment with MariaDB, Keystone and Nova + +workdir: /vagrant +resource-save-path: rs/ +#test-suite: haproxy_deployment.haproxy_deployment + +resources: + - name: node1 + model: x/resources/ro_node/ + args: + ip: 10.0.0.3 + ssh_key: /vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key + ssh_user: vagrant + + - name: keystone1 + model: x/resources/keystone/ + args: + ip: + image: TEST + ssh_user: + ssh_key: + + - name: haproxy_keystone_config + model: x/resources/haproxy_config/ + args: + listen_port: 5000 + ports: {} + servers: {} + + +connections: + - emitter: node1 + receiver: keystone1 + + # Multiple subscription test + - emitter: node1 + receiver: keystone1 + + - emitter: keystone1 + receiver: haproxy_keystone_config + mapping: + ip: servers + port: ports diff --git a/solar/MANIFEST.in b/solar/MANIFEST.in deleted file mode 100644 index 0a079c06..00000000 --- a/solar/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include *.txt -recursive-include solar/ * diff --git a/solar/setup.py b/solar/setup.py deleted file mode 100644 index 9907857c..00000000 --- a/solar/setup.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from setuptools import find_packages -from setuptools import setup - -def find_requires(): - prj_root = os.path.dirname(os.path.realpath(__file__)) - requirements = [] - with open(u'{0}/requirements.txt'.format(prj_root), 'r') as reqs: - requirements = reqs.readlines() - return requirements - - -setup( - name='solar', - version='0.0.1', - description='Deployment tool', - long_description="""Deployment tool""", - classifiers=[ - "Development Status :: 1 - Beta", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python", - "Programming Language :: Python :: 2.6", - "Programming Language :: Python :: 2.7", - "Topic :: System :: Software Distribution"], - author='Mirantis Inc.', - author_email='product@mirantis.com', - url='http://mirantis.com', - keywords='deployment', - packages=find_packages(), - zip_safe=False, - install_requires=find_requires(), - include_package_data=True, - entry_points={ - 'console_scripts': [ - 'solar = solar.cli:main']}) diff --git a/solar/solar/core/__init__.py b/solar/solar/core/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/solar/solar/core/extensions_manager.py b/solar/solar/core/extensions_manager.py deleted file mode 100644 index 7d1f710a..00000000 --- a/solar/solar/core/extensions_manager.py +++ /dev/null @@ -1,17 +0,0 @@ -from solar import extensions -from solar import errors - - -class ExtensionsManager(object): - - def __init__(self, profile): - self.profile = profile - - def get_data(self, key): - """Finds data by extensions provider""" - providers = filter(lambda e: key in e.PROVIDES, extensions.get_all_extensions()) - - if not providers: - raise errors.CannotFindExtension('Cannot find extension which provides "{0}"'.format(key)) - - return getattr(providers[0](self.profile), key)() diff --git a/solar/solar/core/profile.py b/solar/solar/core/profile.py deleted file mode 100644 index f477aa28..00000000 --- a/solar/solar/core/profile.py +++ /dev/null @@ -1,10 +0,0 @@ - -class Profile(object): - - def __init__(self, profile): - self._profile = profile - self.tags = set(profile['tags']) - self.extensions = profile.get('extensions', []) - - def get(self, key): - return self._profile.get(key, None) diff --git a/solar/solar/errors.py b/solar/solar/errors.py deleted file mode 100644 index 022d8121..00000000 --- a/solar/solar/errors.py +++ /dev/null @@ -1,10 +0,0 @@ -class SolarError(Exception): - pass - - -class CannotFindID(SolarError): - pass - - -class CannotFindExtension(SolarError): - pass diff --git a/solar/solar/extensions/base.py b/solar/solar/extensions/base.py deleted file mode 100644 index 2437dc95..00000000 --- a/solar/solar/extensions/base.py +++ /dev/null @@ -1,28 +0,0 @@ -from solar.interfaces.db import get_db - - -class BaseExtension(object): - - ID = None - NAME = None - PROVIDES = [] - - def __init__(self, profile, core_manager=None, config=None): - self.config = config or {} - self.uid = self.ID - self.db = get_db() - self.profile = profile - - from solar.core.extensions_manager import ExtensionsManager - self.core = core_manager or ExtensionsManager(self.profile) - - def prepare(self): - """Make some changes in database state.""" - - @property - def input(self): - return self.config.get('input', {}) - - @property - def output(self): - return self.config.get('output', {}) diff --git a/solar/solar/extensions/modules/__init__.py b/solar/solar/extensions/modules/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/solar/solar/interfaces/__init__.py b/solar/solar/interfaces/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/solar/solar/interfaces/db/__init__.py b/solar/solar/interfaces/db/__init__.py deleted file mode 100644 index e633458d..00000000 --- a/solar/solar/interfaces/db/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from solar.interfaces.db.file_system_db import FileSystemDB - -mapping = { - 'file_system': FileSystemDB -} - -def get_db(): - # Should be retrieved from config - return mapping['file_system']() diff --git a/solar/solar/third_party/__init__.py b/solar/solar/third_party/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/solar/solar/third_party/dir_dbm.py b/solar/solar/third_party/dir_dbm.py deleted file mode 100644 index ba64c989..00000000 --- a/solar/solar/third_party/dir_dbm.py +++ /dev/null @@ -1,303 +0,0 @@ -# -*- test-case-name: twisted.test.test_dirdbm -*- -# -# Copyright (c) Twisted Matrix Laboratories. -# See LICENSE for details. - - - -""" -DBM-style interface to a directory. -Each key is stored as a single file. This is not expected to be very fast or -efficient, but it's good for easy debugging. -DirDBMs are *not* thread-safe, they should only be accessed by one thread at -a time. -No files should be placed in the working directory of a DirDBM save those -created by the DirDBM itself! -Maintainer: Itamar Shtull-Trauring -""" - - -import os -import types -import base64 -import glob - -try: - import cPickle as pickle -except ImportError: - import pickle - -try: - _open -except NameError: - _open = open - - -class DirDBM(object): - """A directory with a DBM interface. - - This class presents a hash-like interface to a directory of small, - flat files. It can only use strings as keys or values. - """ - - def __init__(self, name): - """ - @type name: str - @param name: Base path to use for the directory storage. - """ - self.dname = os.path.abspath(name) - if not os.path.isdir(self.dname): - os.mkdir(self.dname) - else: - # Run recovery, in case we crashed. we delete all files ending - # with ".new". Then we find all files who end with ".rpl". If a - # corresponding file exists without ".rpl", we assume the write - # failed and delete the ".rpl" file. If only a ".rpl" exist we - # assume the program crashed right after deleting the old entry - # but before renaming the replacement entry. - # - # NOTE: '.' is NOT in the base64 alphabet! - for f in glob.glob(os.path.join(self.dname, "*.new")): - os.remove(f) - replacements = glob.glob(os.path.join(self.dname, "*.rpl")) - for f in replacements: - old = f[:-4] - if os.path.exists(old): - os.remove(f) - else: - os.rename(f, old) - - def _encode(self, k): - """Encode a key so it can be used as a filename. - """ - # NOTE: '_' is NOT in the base64 alphabet! - return base64.encodestring(k).replace('\n', '_').replace("/", "-") - - def _decode(self, k): - """Decode a filename to get the key. - """ - return base64.decodestring(k.replace('_', '\n').replace("-", "/")) - - def _readFile(self, path): - """Read in the contents of a file. - - Override in subclasses to e.g. provide transparently encrypted dirdbm. - """ - f = _open(path, "rb") - s = f.read() - f.close() - return s - - def _writeFile(self, path, data): - """Write data to a file. - - Override in subclasses to e.g. provide transparently encrypted dirdbm. - """ - f = _open(path, "wb") - f.write(data) - f.flush() - f.close() - - def __len__(self): - """ - @return: The number of key/value pairs in this Shelf - """ - return len(os.listdir(self.dname)) - - def __setitem__(self, k, v): - """ - C{dirdbm[k] = v} - Create or modify a textfile in this directory - @type k: str - @param k: key to set - - @type v: str - @param v: value to associate with C{k} - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - # NOTE: Can be not a string if _writeFile in the child is redefined - # assert type(v) == types.StringType, "DirDBM value must be a string" - k = self._encode(k) - - # we create a new file with extension .new, write the data to it, and - # if the write succeeds delete the old file and rename the new one. - old = os.path.join(self.dname, k) - if os.path.exists(old): - new = old + ".rpl" # replacement entry - else: - new = old + ".new" # new entry - try: - self._writeFile(new, v) - except: - os.remove(new) - raise - else: - if os.path.exists(old): os.remove(old) - os.rename(new, old) - - def __getitem__(self, k): - """ - C{dirdbm[k]} - Get the contents of a file in this directory as a string. - - @type k: str - @param k: key to lookup - - @return: The value associated with C{k} - @raise KeyError: Raised when there is no such key - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - path = os.path.join(self.dname, self._encode(k)) - try: - return self._readFile(path) - except: - raise KeyError, k - - def __delitem__(self, k): - """ - C{del dirdbm[foo]} - Delete a file in this directory. - - @type k: str - @param k: key to delete - - @raise KeyError: Raised when there is no such key - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - k = self._encode(k) - try: os.remove(os.path.join(self.dname, k)) - except (OSError, IOError): raise KeyError(self._decode(k)) - - def keys(self): - """ - @return: a C{list} of filenames (keys). - """ - return map(self._decode, os.listdir(self.dname)) - - def values(self): - """ - @return: a C{list} of file-contents (values). - """ - vals = [] - keys = self.keys() - for key in keys: - vals.append(self[key]) - return vals - - def items(self): - """ - @return: a C{list} of 2-tuples containing key/value pairs. - """ - items = [] - keys = self.keys() - for key in keys: - items.append((key, self[key])) - return items - - def has_key(self, key): - """ - @type key: str - @param key: The key to test - - @return: A true value if this dirdbm has the specified key, a faluse - value otherwise. - """ - assert type(key) == types.StringType, "DirDBM key must be a string" - key = self._encode(key) - return os.path.isfile(os.path.join(self.dname, key)) - - def setdefault(self, key, value): - """ - @type key: str - @param key: The key to lookup - - @param value: The value to associate with key if key is not already - associated with a value. - """ - if not self.has_key(key): - self[key] = value - return value - return self[key] - - def get(self, key, default = None): - """ - @type key: str - @param key: The key to lookup - - @param default: The value to return if the given key does not exist - - @return: The value associated with C{key} or C{default} if not - C{self.has_key(key)} - """ - if self.has_key(key): - return self[key] - else: - return default - - def __contains__(self, key): - """ - C{key in dirdbm} - @type key: str - @param key: The key to test - - @return: A true value if C{self.has_key(key)}, a false value otherwise. - """ - assert type(key) == types.StringType, "DirDBM key must be a string" - key = self._encode(key) - return os.path.isfile(os.path.join(self.dname, key)) - - def update(self, dict): - """ - Add all the key/value pairs in C{dict} to this dirdbm. Any conflicting - keys will be overwritten with the values from C{dict}. - @type dict: mapping - @param dict: A mapping of key/value pairs to add to this dirdbm. - """ - for key, val in dict.items(): - self[key]=val - - def copyTo(self, path): - """ - Copy the contents of this dirdbm to the dirdbm at C{path}. - - @type path: C{str} - @param path: The path of the dirdbm to copy to. If a dirdbm - exists at the destination path, it is cleared first. - - @rtype: C{DirDBM} - @return: The dirdbm this dirdbm was copied to. - """ - path = os.path.abspath(path) - assert path != self.dname - - d = self.__class__(path) - d.clear() - for k in self.keys(): - d[k] = self[k] - return d - - def clear(self): - """ - Delete all key/value pairs in this dirdbm. - """ - for k in self.keys(): - del self[k] - - def close(self): - """ - Close this dbm: no-op, for dbm-style interface compliance. - """ - - def getModificationTime(self, key): - """ - Returns modification time of an entry. - - @return: Last modification date (seconds since epoch) of entry C{key} - @raise KeyError: Raised when there is no such key - """ - assert type(key) == types.StringType, "DirDBM key must be a string" - path = os.path.join(self.dname, self._encode(key)) - if os.path.isfile(path): - return os.path.getmtime(path) - else: - raise KeyError, key diff --git a/x/README.md b/x/README.md new file mode 100644 index 00000000..5f756ec7 --- /dev/null +++ b/x/README.md @@ -0,0 +1,118 @@ +# x + +## HAProxy deployment + +``` +cd /vagrant +python cli.py deploy haproxy_deployment/haproxy-deployment.yaml +``` + +or from Python shell: + +``` +from x import deployment + +deployment.deploy('/vagrant/haproxy_deployment/haproxy-deployment.yaml') +``` + +## Usage: + +Creating resources: + +``` +from x import resource + +node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.3', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) + +node2 = resource.create('node2', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) + +keystone_db_data = resource.create('mariadb_keystone_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'ip': '', 'ssh_user': '', 'ssh_key': ''}, connections={'ip' : 'node2.ip', 'ssh_key':'node2.ssh_key', 'ssh_user':'node2.ssh_user'}) + +nova_db_data = resource.create('mariadb_nova_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'ip': '', 'ssh_user': '', 'ssh_key': ''}, connections={'ip' : 'node1.ip', 'ssh_key':'node1.ssh_key', 'ssh_user':'node1.ssh_user'}) +``` + +to make connection after resource is created use `signal.connect` + +To test notifications: + +``` +keystone_db_data.args # displays node2 IP + +node2.update({'ip': '10.0.0.5'}) + +keystone_db_data.args # updated IP +``` + +If you close the Python shell you can load the resources like this: + +``` +from x import resource + +node1 = resource.load('rs/node1') + +node2 = resource.load('rs/node2') + +keystone_db_data = resource.load('rs/mariadn_keystone_data') + +nova_db_data = resource.load('rs/mariadb_nova_data') +``` + +Connections are loaded automatically. + + +You can also load all resources at once: + +``` +from x import resource + +all_resources = resource.load_all('rs') +``` + +## CLI + +You can do the above from the command-line client: + +``` +cd /vagrant + +python cli.py resource create node1 x/resources/ro_node/ rs/ '{"ip":"10.0.0.3", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' + +python cli.py resource create node2 x/resources/ro_node/ rs/ '{"ip":"10.0.0.4", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' + +python cli.py resource create mariadb_keystone_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' + +python cli.py resource create mariadb_nova_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' + +# View resources +python cli.py resource show rs/mariadb_keystone_data + +# Show all resources at location rs/ +python cli.py resource show rs/ --all + +# Show resources with specific tag +python cli.py resources show rs/ --tag test + +# Connect resources +python cli.py connect rs/node2 rs/mariadb_keystone_data + +python cli.py connect rs/node1 rs/mariadb_nova_data + +# Test update +python cli.py update rs/node2 '{"ip": "1.1.1.1"}' +python cli.py resource show rs/mariadb_keystone_data # --> IP is 1.1.1.1 + +# View connections +python cli.py connections show + +# Outputs graph to 'graph.png' file, please note that arrows don't have "normal" pointers, but just the line is thicker +# please see http://networkx.lanl.gov/_modules/networkx/drawing/nx_pylab.html +python cli.py connections graph + +# Disconnect +python cli.py disconnect rs/mariadb_nova_data rs/node1 + +# Tag a resource: +python cli.py resource tag rs/node1 test-tag +# Remove tag +python cli.py resource tag rs/node1 test-tag --delete +``` diff --git a/x/TODO.md b/x/TODO.md new file mode 100644 index 00000000..f52e48a1 --- /dev/null +++ b/x/TODO.md @@ -0,0 +1,18 @@ +# TODO + +- store all resource configurations somewhere globally (this is required to + correctly perform an update on one resource and bubble down to all others) +- config templates +- Handler also can require some data, for example ansible: ip, ssh_key, ssh_user +- tag-filtered graph generation +- separate resource for docker image -- this is e.g. to make automatic image removal + when some image is unused to conserve space + +# DONE +- Deploy HAProxy, Keystone and MariaDB +- ansible handler (loles) +- tags are kept in resource mata file (pkaminski) +- add 'list' connection type (pkaminski) +- connections are made automaticly(pkaminski) +- graph is build from CLIENT dict, clients are stored in JSON file (pkaminski) +- cli (pkaminski) diff --git a/solar/solar/__init__.py b/x/__init__.py similarity index 100% rename from solar/solar/__init__.py rename to x/__init__.py diff --git a/x/actions.py b/x/actions.py new file mode 100644 index 00000000..5c694a38 --- /dev/null +++ b/x/actions.py @@ -0,0 +1,13 @@ +# -*- coding: UTF-8 -*- +import handlers + + +def resource_action(resource, action): + handler = resource.metadata['handler'] + with handlers.get(handler)([resource]) as h: + h.action(resource, action) + + +def tag_action(tag, action): + #TODO + pass diff --git a/x/db.py b/x/db.py new file mode 100644 index 00000000..4ffcc490 --- /dev/null +++ b/x/db.py @@ -0,0 +1,19 @@ +# -*- coding: UTF-8 -*- + +RESOURCE_DB = {} + + +def resource_add(key, value): + if key in RESOURCE_DB: + raise Exception('Key `{0}` already exists'.format(key)) + RESOURCE_DB[key] = value + + +def get_resource(key): + return RESOURCE_DB.get(key, None) + + +def clear(): + global RESOURCE_DB + + RESOURCE_DB = {} diff --git a/x/deployment.py b/x/deployment.py new file mode 100644 index 00000000..b3034ec8 --- /dev/null +++ b/x/deployment.py @@ -0,0 +1,45 @@ +# Deploying stuff from YAML definition + +import os +import shutil +import yaml + +from x import db +from x import resource as xr +from x import signals as xs + + +def deploy(filename): + with open(filename) as f: + config = yaml.load(f) + + workdir = config['workdir'] + resource_save_path = os.path.join(workdir, config['resource-save-path']) + + # Clean stuff first + db.clear() + xs.Connections.clear() + shutil.rmtree(resource_save_path, ignore_errors=True) + os.makedirs(resource_save_path) + + # Create resources first + for resource_definition in config['resources']: + name = resource_definition['name'] + model = os.path.join(workdir, resource_definition['model']) + args = resource_definition.get('args', {}) + print 'Creating ', name, model, resource_save_path, args + xr.create(name, model, resource_save_path, args=args) + + # Create resource connections + for connection in config['connections']: + emitter = db.get_resource(connection['emitter']) + receiver = db.get_resource(connection['receiver']) + mapping = connection.get('mapping') + print 'Connecting ', emitter.name, receiver.name, mapping + xs.connect(emitter, receiver, mapping=mapping) + + # Run all tests + if 'test-suite' in config: + print 'Running tests from {}'.format(config['test-suite']) + test_suite = __import__(config['test-suite'], {}, {}, ['main']) + test_suite.main() diff --git a/x/handlers/__init__.py b/x/handlers/__init__.py new file mode 100644 index 00000000..1fc4e43d --- /dev/null +++ b/x/handlers/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: UTF-8 -*- +from x.handlers.ansible import Ansible +from x.handlers.base import Empty +from x.handlers.shell import Shell + + +HANDLERS = {'ansible': Ansible, + 'shell': Shell, + 'none': Empty} + +def get(handler_name): + handler = HANDLERS.get(handler_name, None) + if handler: + return handler + raise Exception('Handler {0} does not exist'.format(handler_name)) diff --git a/x/handlers/ansible.py b/x/handlers/ansible.py new file mode 100644 index 00000000..2a5a1b8e --- /dev/null +++ b/x/handlers/ansible.py @@ -0,0 +1,39 @@ +# -*- coding: UTF-8 -*- +import os +import subprocess +import yaml + +from x.handlers.base import BaseHandler + + +class Ansible(BaseHandler): + def action(self, resource, action_name): + inventory_file = self._create_inventory(resource) + playbook_file = self._create_playbook(resource, action_name) + print 'inventory_file', inventory_file + print 'playbook_file', playbook_file + call_args = ['ansible-playbook', '-i', inventory_file, playbook_file] + print 'EXECUTING: ', ' '.join(call_args) + subprocess.call(call_args) + + #def _get_connection(self, resource): + # return {'ssh_user': '', + # 'ssh_key': '', + # 'host': ''} + + def _create_inventory(self, r): + inventory = '{0} ansible_ssh_host={1} ansible_connection=ssh ansible_ssh_user={2} ansible_ssh_private_key_file={3}' + host, user, ssh_key = r.args['ip'].value, r.args['ssh_user'].value, r.args['ssh_key'].value + print host + print user + print ssh_key + inventory = inventory.format(host, host, user, ssh_key) + print inventory + directory = self.dirs[r.name] + inventory_path = os.path.join(directory, 'inventory') + with open(inventory_path, 'w') as inv: + inv.write(inventory) + return inventory_path + + def _create_playbook(self, resource, action): + return self._compile_action_file(resource, action) diff --git a/x/handlers/base.py b/x/handlers/base.py new file mode 100644 index 00000000..0572080f --- /dev/null +++ b/x/handlers/base.py @@ -0,0 +1,53 @@ +# -*- coding: UTF-8 -*- +import os +import shutil +import tempfile + +from jinja2 import Template + + +class BaseHandler(object): + def __init__(self, resources): + self.dst = tempfile.mkdtemp() + self.resources = resources + + def __enter__(self): + self.dirs = {} + for resource in self.resources: + resource_dir = tempfile.mkdtemp(suffix=resource.name, dir=self.dst) + self.dirs[resource.name] = resource_dir + return self + + def __exit__(self, type, value, traceback): + print self.dst + return + shutil.rmtree(self.dst) + + def _compile_action_file(self, resource, action): + action_file = resource.metadata['actions'][action] + action_file = os.path.join(resource.base_dir, 'actions', action_file) + dir_path = self.dirs[resource.name] + dest_file = tempfile.mkstemp(text=True, prefix=action, dir=dir_path)[1] + args = self._make_args(resource) + self._compile_file(action_file, dest_file, args) + return dest_file + + def _compile_file(self, template, dest_file, args): + print 'Rendering', template, args + with open(template) as f: + tpl = Template(f.read()) + tpl = tpl.render(args, zip=zip) + + with open(dest_file, 'w') as g: + g.write(tpl) + + def _make_args(self, resource): + args = {'name': resource.name} + args['resource_dir'] = resource.base_dir + args.update(resource.args) + return args + + +class Empty(BaseHandler): + def action(self, resource, action): + pass diff --git a/x/handlers/shell.py b/x/handlers/shell.py new file mode 100644 index 00000000..66690d35 --- /dev/null +++ b/x/handlers/shell.py @@ -0,0 +1,10 @@ +# -*- coding: UTF-8 -*- +import subprocess + +from x.handlers.base import BaseHandler + + +class Shell(BaseHandler): + def action(self, resource, action_name): + action_file = self._compile_action_file(resource, action_name) + subprocess.call(['bash', action_file]) diff --git a/x/observer.py b/x/observer.py new file mode 100644 index 00000000..7f0d7f0f --- /dev/null +++ b/x/observer.py @@ -0,0 +1,190 @@ +from x import signals + + +class BaseObserver(object): + type_ = None + + def __init__(self, attached_to, name, value): + """ + :param attached_to: resource.Resource + :param name: + :param value: + :return: + """ + self.attached_to = attached_to + self.name = name + self.value = value + self.receivers = [] + + def log(self, msg): + print '{} {}'.format(self, msg) + + def __repr__(self): + return '[{}:{}] {}'.format(self.attached_to.name, self.name, self.value) + + def __unicode__(self): + return self.value + + def __eq__(self, other): + if isinstance(other, BaseObserver): + return self.value == other.value + + return self.value == other + + def notify(self, emitter): + """ + :param emitter: Observer + :return: + """ + raise NotImplementedError + + def update(self, value): + """ + :param value: + :return: + """ + raise NotImplementedError + + def find_receiver(self, receiver): + fltr = [r for r in self.receivers + if r.attached_to == receiver.attached_to + and r.name == receiver.name] + if fltr: + return fltr[0] + + def subscribe(self, receiver): + """ + :param receiver: Observer + :return: + """ + self.log('Subscribe {}'.format(receiver)) + # No multiple subscriptions + if self.find_receiver(receiver): + self.log('No multiple subscriptions from {}'.format(receiver)) + return + self.receivers.append(receiver) + receiver.subscribed(self) + + signals.Connections.add( + self.attached_to, + self.name, + receiver.attached_to, + receiver.name + ) + + receiver.notify(self) + + def subscribed(self, emitter): + self.log('Subscribed {}'.format(emitter)) + + def unsubscribe(self, receiver): + """ + :param receiver: Observer + :return: + """ + self.log('Unsubscribe {}'.format(receiver)) + if self.find_receiver(receiver): + self.receivers.remove(receiver) + receiver.unsubscribed(self) + + signals.Connections.remove( + self.attached_to, + self.name, + receiver.attached_to, + receiver.name + ) + + # TODO: ? + #receiver.notify(self) + + def unsubscribed(self, emitter): + self.log('Unsubscribed {}'.format(emitter)) + + +class Observer(BaseObserver): + type_ = 'simple' + + def __init__(self, *args, **kwargs): + super(Observer, self).__init__(*args, **kwargs) + self.emitter = None + + def notify(self, emitter): + self.log('Notify from {} value {}'.format(emitter, emitter.value)) + # Copy emitter's values to receiver + self.value = emitter.value + for receiver in self.receivers: + receiver.notify(self) + self.attached_to.save() + + def update(self, value): + self.log('Updating to value {}'.format(value)) + self.value = value + for receiver in self.receivers: + receiver.notify(self) + self.attached_to.save() + + def subscribed(self, emitter): + super(Observer, self).subscribed(emitter) + # Simple observer can be attached to at most one emitter + if self.emitter is not None: + self.emitter.unsubscribe(self) + self.emitter = emitter + + def unsubscribed(self, emitter): + super(Observer, self).unsubscribed(emitter) + self.emitter = None + + +class ListObserver(BaseObserver): + type_ = 'list' + + def __unicode__(self): + return unicode(self.value) + + @staticmethod + def _format_value(emitter): + return { + 'emitter': emitter.name, + 'emitter_attached_to': emitter.attached_to.name, + 'value': emitter.value, + } + + def notify(self, emitter): + self.log('Notify from {} value {}'.format(emitter, emitter.value)) + # Copy emitter's values to receiver + #self.value[emitter.attached_to.name] = emitter.value + idx = self._emitter_idx(emitter) + self.value[idx] = self._format_value(emitter) + for receiver in self.receivers: + receiver.notify(self) + self.attached_to.save() + + def subscribed(self, emitter): + super(ListObserver, self).subscribed(emitter) + idx = self._emitter_idx(emitter) + if idx is None: + self.value.append(self._format_value(emitter)) + + def unsubscribed(self, emitter): + """ + :param receiver: Observer + :return: + """ + self.log('Unsubscribed emitter {}'.format(emitter)) + idx = self._emitter_idx(emitter) + self.value.pop(idx) + + def _emitter_idx(self, emitter): + try: + return [i for i, e in enumerate(self.value) + if e['emitter_attached_to'] == emitter.attached_to.name + ][0] + except IndexError: + return + + +def create(type_, *args, **kwargs): + for klass in BaseObserver.__subclasses__(): + if klass.type_ == type_: + return klass(*args, **kwargs) + raise NotImplementedError('No handling class for type {}'.format(type_)) diff --git a/x/resource.py b/x/resource.py new file mode 100644 index 00000000..437b5f32 --- /dev/null +++ b/x/resource.py @@ -0,0 +1,176 @@ +# -*- coding: UTF-8 -*- +import copy +import json +import os +import shutil + +import yaml + +from x import actions +from x import db +from x import observer +from x import signals +from x import utils + + +class Resource(object): + def __init__(self, name, metadata, args, base_dir, tags=None): + self.name = name + self.base_dir = base_dir + self.metadata = metadata + self.actions = metadata['actions'].keys() if metadata['actions'] else None + self.requires = metadata['input'].keys() + self._validate_args(args, metadata['input']) + self.args = {} + for arg_name, arg_value in args.items(): + type_ = metadata.get('input-types', {}).get(arg_name) or 'simple' + self.args[arg_name] = observer.create(type_, self, arg_name, arg_value) + self.metadata['input'] = args + self.input_types = metadata.get('input-types', {}) + self.changed = [] + self.tags = tags or [] + + def __repr__(self): + return ("Resource(name='{0}', metadata={1}, args={2}, " + "base_dir='{3}', tags={4})").format(self.name, + json.dumps(self.metadata), + json.dumps(self.args_show()), + self.base_dir, + self.tags) + + def args_show(self): + def formatter(v): + if isinstance(v, observer.ListObserver): + return v.value + elif isinstance(v, observer.Observer): + return { + 'emitter': v.emitter.attached_to.name if v.emitter else None, + 'value': v.value, + } + + return v + + return {k: formatter(v) for k, v in self.args.items()} + + def args_dict(self): + return {k: v.value for k, v in self.args.items()} + + def add_tag(self, tag): + if tag not in self.tags: + self.tags.append(tag) + + def remove_tag(self, tag): + try: + self.tags.remove(tag) + except ValueError: + pass + + def notify(self, emitter): + """Update resource's args from emitter's args. + + :param emitter: Resource + :return: + """ + for key, value in emitter.args.iteritems(): + self.args[key].notify(value) + + def update(self, args): + """This method updates resource's args with a simple dict. + + :param args: + :return: + """ + # Update will be blocked if this resource is listening + # on some input that is to be updated -- we should only listen + # to the emitter and not be able to change the input's value + + for key, value in args.iteritems(): + self.args[key].update(value) + + def action(self, action): + if action in self.actions: + actions.resource_action(self, action) + else: + raise Exception('Uuups, action is not available') + + def _validate_args(self, args, inputs): + for req in self.requires: + if req not in args: + # If metadata input is filled with a value, use it as default + # and don't report an error + if inputs.get(req): + args[req] = inputs[req] + else: + raise Exception('Requirement `{0}` is missing in args'.format(req)) + + # TODO: versioning + def save(self): + metadata = copy.deepcopy(self.metadata) + + metadata['tags'] = self.tags + metadata['input'] = self.args_dict() + + meta_file = os.path.join(self.base_dir, 'meta.yaml') + with open(meta_file, 'w') as f: + f.write(yaml.dump(metadata)) + f.write(yaml.dump(metadata, default_flow_style=False)) + + +def create(name, base_path, dest_path, args, connections={}): + if not os.path.exists(base_path): + raise Exception('Base resource does not exist: {0}'.format(base_path)) + if not os.path.exists(dest_path): + raise Exception('Dest dir does not exist: {0}'.format(dest_path)) + if not os.path.isdir(dest_path): + raise Exception('Dest path is not a directory: {0}'.format(dest_path)) + + dest_path = os.path.abspath(os.path.join(dest_path, name)) + base_meta_file = os.path.join(base_path, 'meta.yaml') + actions_path = os.path.join(base_path, 'actions') + + meta = yaml.load(open(base_meta_file).read()) + meta['id'] = name + meta['version'] = '1.0.0' + meta['actions'] = {} + meta['tags'] = [] + + if os.path.exists(actions_path): + for f in os.listdir(actions_path): + meta['actions'][os.path.splitext(f)[0]] = f + + resource = Resource(name, meta, args, dest_path) + signals.assign_connections(resource, connections) + + # save + shutil.copytree(base_path, dest_path) + resource.save() + db.resource_add(name, resource) + + return resource + + +def load(dest_path): + meta_file = os.path.join(dest_path, 'meta.yaml') + meta = utils.load_file(meta_file) + name = meta['id'] + args = meta['input'] + tags = meta.get('tags', []) + + resource = Resource(name, meta, args, dest_path, tags=tags) + + db.resource_add(name, resource) + + return resource + + +def load_all(dest_path): + ret = {} + + for name in os.listdir(dest_path): + resource_path = os.path.join(dest_path, name) + resource = load(resource_path) + ret[resource.name] = resource + + signals.Connections.reconnect_all() + + return ret diff --git a/x/resources/data_container/actions/echo.yml b/x/resources/data_container/actions/echo.yml new file mode 100644 index 00000000..59b540ae --- /dev/null +++ b/x/resources/data_container/actions/echo.yml @@ -0,0 +1,5 @@ + +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: echo `/sbin/ifconfig` diff --git a/x/resources/data_container/actions/remove.yml b/x/resources/data_container/actions/remove.yml new file mode 100644 index 00000000..d3c3149f --- /dev/null +++ b/x/resources/data_container/actions/remove.yml @@ -0,0 +1,6 @@ + +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker stop {{ name }} + - shell: docker rm {{ name }} diff --git a/x/resources/data_container/actions/run.yml b/x/resources/data_container/actions/run.yml new file mode 100644 index 00000000..3a63f6fc --- /dev/null +++ b/x/resources/data_container/actions/run.yml @@ -0,0 +1,20 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - docker: + name: {{ name }} + image: {{ image }} + state: running + net: host + ports: + {% for port in ports.value %} + - {{ port['value'] }}:{{ port['value'] }} + {% endfor %} + volumes: + # TODO: host_binds might need more work + # Currently it's not that trivial to pass custom src: dst here + # (when a config variable is passed here from other resource) + # so we mount it to the same directory as on host + {% for bind in host_binds.value %} + - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} + {% endfor %} \ No newline at end of file diff --git a/x/resources/data_container/meta.yaml b/x/resources/data_container/meta.yaml new file mode 100644 index 00000000..d4185a1a --- /dev/null +++ b/x/resources/data_container/meta.yaml @@ -0,0 +1,7 @@ +id: data_container +handler: ansible +version: 1.0.0 +input: + ip: + image: + export_volumes: diff --git a/x/resources/docker_container/actions/remove.yml b/x/resources/docker_container/actions/remove.yml new file mode 100644 index 00000000..d3c3149f --- /dev/null +++ b/x/resources/docker_container/actions/remove.yml @@ -0,0 +1,6 @@ + +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker stop {{ name }} + - shell: docker rm {{ name }} diff --git a/x/resources/docker_container/actions/run.yml b/x/resources/docker_container/actions/run.yml new file mode 100644 index 00000000..9156b900 --- /dev/null +++ b/x/resources/docker_container/actions/run.yml @@ -0,0 +1,21 @@ + +- hosts: [{{ ip }}] + sudo: yes + tasks: + - docker: + name: {{ name }} + image: {{ image }} + state: running + net: host + ports: + {% for port in ports.value %} + - {{ port['value'] }}:{{ port['value'] }} + {% endfor %} + volumes: + # TODO: host_binds might need more work + # Currently it's not that trivial to pass custom src: dst here + # (when a config variable is passed here from other resource) + # so we mount it to the same directory as on host + {% for bind in host_binds.value %} + - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} + {% endfor %} diff --git a/x/resources/docker_container/meta.yaml b/x/resources/docker_container/meta.yaml new file mode 100644 index 00000000..182c872c --- /dev/null +++ b/x/resources/docker_container/meta.yaml @@ -0,0 +1,15 @@ +id: container +handler: ansible +version: 1.0.0 +input: + ip: + image: + ports: + host_binds: + volume_binds: + ssh_user: + ssh_key: +input-types: + ports: + host_binds: list + volume_binds: list diff --git a/x/resources/file/actions/remove.sh b/x/resources/file/actions/remove.sh new file mode 100644 index 00000000..dc21c836 --- /dev/null +++ b/x/resources/file/actions/remove.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +rm {{ path }} diff --git a/x/resources/file/actions/run.sh b/x/resources/file/actions/run.sh new file mode 100644 index 00000000..461a550e --- /dev/null +++ b/x/resources/file/actions/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +touch {{ path }} diff --git a/x/resources/file/meta.yaml b/x/resources/file/meta.yaml new file mode 100644 index 00000000..14eb2e2c --- /dev/null +++ b/x/resources/file/meta.yaml @@ -0,0 +1,5 @@ +id: file +handler: shell +version: 1.0.0 +input: + path: /tmp/test_file diff --git a/x/resources/haproxy/actions/remove.yml b/x/resources/haproxy/actions/remove.yml new file mode 100644 index 00000000..f6c1f6e5 --- /dev/null +++ b/x/resources/haproxy/actions/remove.yml @@ -0,0 +1,5 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + tasks: + - file: path={{ config_dir.value['src'] }} state=absent diff --git a/x/resources/haproxy/actions/run.yml b/x/resources/haproxy/actions/run.yml new file mode 100644 index 00000000..8b112a09 --- /dev/null +++ b/x/resources/haproxy/actions/run.yml @@ -0,0 +1,21 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + vars: + config_dir: {src: {{ config_dir.value['src'] }}, dst: {{ config_dir.value['dst'] }}} + haproxy_ip: {{ ip }} + haproxy_services: + {% for service, ports, listen_port in zip(configs.value, configs_ports.value, listen_ports.value) %} + - name: {{ service['emitter_attached_to'] }} + listen_port: {{ listen_port['value'] }} + servers: + {% for server_ip, server_port in zip(service['value'], ports['value']) %} + - name: {{ server_ip['emitter_attached_to'] }} + ip: {{ server_ip['value'] }} + port: {{ server_port['value'] }} + {% endfor %} + {% endfor %} + tasks: + - file: path={{ config_dir.value['src'] }}/ state=directory + - file: path={{ config_dir.value['src'] }}/haproxy.cfg state=touch + - template: src=/vagrant/haproxy.cfg dest={{ config_dir.value['src'] }}/haproxy.cfg diff --git a/x/resources/haproxy/meta.yaml b/x/resources/haproxy/meta.yaml new file mode 100644 index 00000000..57f53a69 --- /dev/null +++ b/x/resources/haproxy/meta.yaml @@ -0,0 +1,17 @@ +id: haproxy +handler: ansible +version: 1.0.0 +input: + ip: + config_dir: {src: /etc/solar/haproxy, dst: /etc/haproxy} + listen_ports: + configs: + configs_names: + configs_ports: + ssh_user: + ssh_key: +input-types: + listen_ports: list + configs: list + configs_names: list + configs_ports: list diff --git a/x/resources/haproxy_config/meta.yaml b/x/resources/haproxy_config/meta.yaml new file mode 100644 index 00000000..a7584600 --- /dev/null +++ b/x/resources/haproxy_config/meta.yaml @@ -0,0 +1,11 @@ +id: haproxy_config +handler: none +version: 1.0.0 +input: + name: + listen_port: + ports: + servers: +input-types: + ports: list + servers: list diff --git a/x/resources/keystone_config/actions/remove.yml b/x/resources/keystone_config/actions/remove.yml new file mode 100644 index 00000000..7e452a44 --- /dev/null +++ b/x/resources/keystone_config/actions/remove.yml @@ -0,0 +1,4 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - file: path={{config_dir}} state=absent diff --git a/x/resources/keystone_config/actions/run.yml b/x/resources/keystone_config/actions/run.yml new file mode 100644 index 00000000..e24d0fae --- /dev/null +++ b/x/resources/keystone_config/actions/run.yml @@ -0,0 +1,14 @@ +- hosts: [{{ ip }}] + sudo: yes + vars: + admin_token: {{admin_token}} + db_user: {{db_user}} + db_password: {{db_password}} + db_host: {{db_host}} + db_name: {{db_name}} + tasks: + - file: path={{config_dir}} state=directory + - template: src={{resource_dir}}/templates/keystone.conf dest={{config_dir}}/keystone.conf + - template: src={{resource_dir}}/templates/default_catalog.templates dest={{config_dir}}/default_catalog.templates + - template: src={{resource_dir}}/templates/logging.conf dest={{config_dir}}/logging.conf + - template: src={{resource_dir}}/templates/policy.json dest={{config_dir}}/policy.json diff --git a/x/resources/keystone_config/meta.yaml b/x/resources/keystone_config/meta.yaml new file mode 100644 index 00000000..b4ea7ce6 --- /dev/null +++ b/x/resources/keystone_config/meta.yaml @@ -0,0 +1,13 @@ +id: keystone_config +handler: ansible +version: 1.0.0 +input: + config_dir: + admin_token: + db_user: + db_password: + db_host: + db_name: + ip: + ssh_key: + ssh_user: diff --git a/x/resources/keystone_config/templates/default_catalog.templates b/x/resources/keystone_config/templates/default_catalog.templates new file mode 100644 index 00000000..a69b7f06 --- /dev/null +++ b/x/resources/keystone_config/templates/default_catalog.templates @@ -0,0 +1,27 @@ +# config for templated.Catalog, using camelCase because I don't want to do +# translations for keystone compat +catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0 +catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0 +catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0 +catalog.RegionOne.identity.name = Identity Service + +# fake compute service for now to help novaclient tests work +catalog.RegionOne.compute.publicURL = http://localhost:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.adminURL = http://localhost:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.internalURL = http://localhost:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.name = Compute Service + +catalog.RegionOne.volume.publicURL = http://localhost:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.adminURL = http://localhost:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.internalURL = http://localhost:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.name = Volume Service + +catalog.RegionOne.ec2.publicURL = http://localhost:8773/services/Cloud +catalog.RegionOne.ec2.adminURL = http://localhost:8773/services/Admin +catalog.RegionOne.ec2.internalURL = http://localhost:8773/services/Cloud +catalog.RegionOne.ec2.name = EC2 Service + +catalog.RegionOne.image.publicURL = http://localhost:9292/v1 +catalog.RegionOne.image.adminURL = http://localhost:9292/v1 +catalog.RegionOne.image.internalURL = http://localhost:9292/v1 +catalog.RegionOne.image.name = Image Service diff --git a/x/resources/keystone_config/templates/keystone.conf b/x/resources/keystone_config/templates/keystone.conf new file mode 100644 index 00000000..e8bfb466 --- /dev/null +++ b/x/resources/keystone_config/templates/keystone.conf @@ -0,0 +1,1589 @@ +[DEFAULT] + +# +# Options defined in keystone +# + +# A "shared secret" that can be used to bootstrap Keystone. +# This "token" does not represent a user, and carries no +# explicit authorization. To disable in production (highly +# recommended), remove AdminTokenAuthMiddleware from your +# paste application pipelines (for example, in keystone- +# paste.ini). (string value) +admin_token={{admin_token}} + +# The IP address of the network interface for the public +# service to listen on. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#public_bind_host=0.0.0.0 + +# The IP address of the network interface for the admin +# service to listen on. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#admin_bind_host=0.0.0.0 + +# (Deprecated) The port which the OpenStack Compute service +# listens on. This option was only used for string replacement +# in the templated catalog backend. Templated catalogs should +# replace the "$(compute_port)s" substitution with the static +# port of the compute service. As of Juno, this option is +# deprecated and will be removed in the L release. (integer +# value) +#compute_port=8774 + +# The port number which the admin service listens on. (integer +# value) +admin_port=35357 + +# The port number which the public service listens on. +# (integer value) +public_port=5000 + +# The base public endpoint URL for Keystone that is advertised +# to clients (NOTE: this does NOT affect how Keystone listens +# for connections). Defaults to the base host URL of the +# request. E.g. a request to http://server:5000/v2.0/users +# will default to http://server:5000. You should only need to +# set this value if the base URL contains a path (e.g. +# /prefix/v2.0) or the endpoint should be found on a different +# server. (string value) +#public_endpoint= + +# The base admin endpoint URL for Keystone that is advertised +# to clients (NOTE: this does NOT affect how Keystone listens +# for connections). Defaults to the base host URL of the +# request. E.g. a request to http://server:35357/v2.0/users +# will default to http://server:35357. You should only need to +# set this value if the base URL contains a path (e.g. +# /prefix/v2.0) or the endpoint should be found on a different +# server. (string value) +#admin_endpoint= + +# The number of worker processes to serve the public WSGI +# application. Defaults to number of CPUs (minimum of 2). +# (integer value) +#public_workers= + +# The number of worker processes to serve the admin WSGI +# application. Defaults to number of CPUs (minimum of 2). +# (integer value) +#admin_workers= + +# Enforced by optional sizelimit middleware +# (keystone.middleware:RequestBodySizeLimiter). (integer +# value) +#max_request_body_size=114688 + +# Limit the sizes of user & project ID/names. (integer value) +#max_param_size=64 + +# Similar to max_param_size, but provides an exception for +# token values. (integer value) +#max_token_size=8192 + +# During a SQL upgrade member_role_id will be used to create a +# new role that will replace records in the assignment table +# with explicit role grants. After migration, the +# member_role_id will be used in the API add_user_to_project. +# (string value) +#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab + +# During a SQL upgrade member_role_name will be used to create +# a new role that will replace records in the assignment table +# with explicit role grants. After migration, member_role_name +# will be ignored. (string value) +#member_role_name=_member_ + +# The value passed as the keyword "rounds" to passlib's +# encrypt method. (integer value) +#crypt_strength=40000 + +# Set this to true if you want to enable TCP_KEEPALIVE on +# server sockets, i.e. sockets used by the Keystone wsgi +# server for client connections. (boolean value) +#tcp_keepalive=false + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Only applies if tcp_keepalive is true. Not supported +# on OS X. (integer value) +#tcp_keepidle=600 + +# The maximum number of entities that will be returned in a +# collection, with no limit set by default. This global limit +# may be then overridden for a specific driver, by specifying +# a list_limit in the appropriate section (e.g. [assignment]). +# (integer value) +#list_limit= + +# Set this to false if you want to enable the ability for +# user, group and project entities to be moved between domains +# by updating their domain_id. Allowing such movement is not +# recommended if the scope of a domain admin is being +# restricted by use of an appropriate policy file (see +# policy.v3cloudsample as an example). (boolean value) +#domain_id_immutable=true + +# If set to true, strict password length checking is performed +# for password manipulation. If a password exceeds the maximum +# length, the operation will fail with an HTTP 403 Forbidden +# error. If set to false, passwords are automatically +# truncated to the maximum length. (boolean value) +#strict_password_check=false + + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The number of prefetched messages held by receiver. (integer +# value) +#qpid_receiver_capacity=1 + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +#rabbit_host=localhost + +# The RabbitMQ broker port where a single node is used. +# (integer value) +#rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +#rabbit_userid=guest + +# The RabbitMQ password. (string value) +#rabbit_password=guest + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=keystone + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +#rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=keystone + + +# +# Options defined in keystone.notifications +# + +# Default publisher_id for outgoing notifications (string +# value) +#default_publisher_id= + + +# +# Options defined in keystone.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in keystone.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + +# Log output to standard error. (boolean value) +#use_stderr=false + +# Format string to use for log messages with context. (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN + +# Enables or disables publication of error events. (boolean +# value) +#publish_errors=false + +# Enables or disables fatal status of deprecations. (boolean +# value) +#fatal_deprecations=false + +# The format for an instance that is passed with the log +# message. (string value) +#instance_format="[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log +# message. (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of a logging configuration file. This file is +# appended to any existing logging configuration files. For +# details about logging configuration files, see the Python +# logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s . (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. (string value) +# If not set here, logging will go to /var/log/keystone/keystone.log, +# default from keystone-dist.conf. +# Deprecated group/name - [DEFAULT]/logfile +#log_file=/var/log/keystone/keystone.log + +# (Optional) The base directory used for relative --log-file +# paths. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and will change in J to honor RFC5424. (boolean +# value) +#use_syslog=false + +# (Optional) Enables or disables syslog rfc5424 format for +# logging. If enabled, prefixes the MSG part of the syslog +# message with APP-NAME (RFC5424). The format without the APP- +# NAME is deprecated in I, and will be removed in J. (boolean +# value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in keystone.openstack.common.policy +# + +# The JSON file that defines policies. (string value) +#policy_file=policy.json + +# Default rule. Enforced when a requested rule is not found. +# (string value) +#policy_default_rule=default + + +[assignment] + +# +# Options defined in keystone +# + +# Assignment backend driver. (string value) +#driver= + +# Toggle for assignment caching. This has no effect unless +# global caching is enabled. (boolean value) +#caching=true + +# TTL (in seconds) to cache assignment data. This has no +# effect unless global caching is enabled. (integer value) +#cache_time= + +# Maximum number of entities that will be returned in an +# assignment collection. (integer value) +#list_limit= + + +[auth] + +# +# Options defined in keystone +# + +# Default auth methods. (list value) +#methods=external,password,token + +# The password auth plugin module. (string value) +#password=keystone.auth.plugins.password.Password + +# The token auth plugin module. (string value) +#token=keystone.auth.plugins.token.Token + +# The external (REMOTE_USER) auth plugin module. (string +# value) +#external=keystone.auth.plugins.external.DefaultDomain + + +[cache] + +# +# Options defined in keystone +# + +# Prefix for building the configuration dictionary for the +# cache region. This should not need to be changed unless +# there is another dogpile.cache region with the same +# configuration name. (string value) +#config_prefix=cache.keystone + +# Default TTL, in seconds, for any cached item in the +# dogpile.cache region. This applies to any cached method that +# doesn't have an explicit cache expiration time defined for +# it. (integer value) +#expiration_time=600 + +# Dogpile.cache backend module. It is recommended that +# Memcache with pooling (keystone.cache.memcache_pool) or +# Redis (dogpile.cache.redis) be used in production +# deployments. Small workloads (single process) like devstack +# can use the dogpile.cache.memory backend. (string value) +#backend=keystone.common.cache.noop + +# Arguments supplied to the backend module. Specify this +# option once per argument to be passed to the dogpile.cache +# backend. Example format: ":". (multi valued) +#backend_argument= + +# Proxy classes to import that will affect the way the +# dogpile.cache backend functions. See the dogpile.cache +# documentation on changing-backend-behavior. (list value) +#proxies= + +# Global toggle for all caching using the should_cache_fn +# mechanism. (boolean value) +#enabled=false + +# Extra debugging from the cache backend (cache keys, +# get/set/delete/etc calls). This is only really useful if you +# need to see the specific cache-backend get/set/delete calls +# with the keys/values. Typically this should be left set to +# false. (boolean value) +#debug_cache_backend=false + +# Memcache servers in the format of "host:port". +# (dogpile.cache.memcache and keystone.cache.memcache_pool +# backends only) (list value) +#memcache_servers=localhost:11211 + +# Number of seconds memcached server is considered dead before +# it is tried again. (dogpile.cache.memcache and +# keystone.cache.memcache_pool backends only) (integer value) +#memcache_dead_retry=300 + +# Timeout in seconds for every call to a server. +# (dogpile.cache.memcache and keystone.cache.memcache_pool +# backends only) (integer value) +#memcache_socket_timeout=3 + +# Max total number of open connections to every memcached +# server. (keystone.cache.memcache_pool backend only) (integer +# value) +#memcache_pool_maxsize=10 + +# Number of seconds a connection to memcached is held unused +# in the pool before it is closed. +# (keystone.cache.memcache_pool backend only) (integer value) +#memcache_pool_unused_timeout=60 + +# Number of seconds that an operation will wait to get a +# memcache client connection. (integer value) +#memcache_pool_connection_get_timeout=10 + + +[catalog] + +# +# Options defined in keystone +# + +# Catalog template file name for use with the template catalog +# backend. (string value) +#template_file=/etc/keystone/default_catalog.templates + +# Catalog backend driver. (string value) +#driver=keystone.catalog.backends.sql.Catalog + +# Toggle for catalog caching. This has no effect unless global +# caching is enabled. (boolean value) +#caching=true + +# Time to cache catalog data (in seconds). This has no effect +# unless global and catalog caching are enabled. (integer +# value) +#cache_time= + +# Maximum number of entities that will be returned in a +# catalog collection. (integer value) +#list_limit= + +# (Deprecated) List of possible substitutions for use in +# formatting endpoints. Use caution when modifying this list. +# It will give users with permission to create endpoints the +# ability to see those values in your configuration file. This +# option will be removed in Juno. (list value) +#endpoint_substitution_whitelist=tenant_id,user_id,public_bind_host,admin_bind_host,compute_host,compute_port,admin_port,public_port,public_endpoint,admin_endpoint + + +[credential] + +# +# Options defined in keystone +# + +# Credential backend driver. (string value) +#driver=keystone.credential.backends.sql.Credential + + +[database] + +# +# Options defined in oslo.db +# + +# The file name to use with SQLite. (string value) +#sqlite_db=oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous=true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string to use to connect to the +# database. (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection=mysql://keystone:keystone@localhost/keystone +connection=mysql://{{db_user}}:{{db_password}}@{{db_host}}/{{db_name}} + +# The SQLAlchemy connection string to use to connect to the +# slave database. (string value) +#slave_connection= + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a SQL connection. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information: 0=None, +# 100=Everything. (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add Python stack traces to SQL as comment strings. (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost. (boolean value) +#use_db_reconnect=false + +# Seconds between database connection retries. (integer value) +#db_retry_interval=1 + +# If True, increases the interval between database connection +# retries up to db_max_retry_interval. (boolean value) +#db_inc_retry_interval=true + +# If db_inc_retry_interval is set, the maximum seconds between +# database connection retries. (integer value) +#db_max_retry_interval=10 + +# Maximum database connection retries before error is raised. +# Set to -1 to specify an infinite retry count. (integer +# value) +#db_max_retries=20 + + +[ec2] + +# +# Options defined in keystone +# + +# EC2Credential backend driver. (string value) +#driver=keystone.contrib.ec2.backends.sql.Ec2 + + +[endpoint_filter] + +# +# Options defined in keystone +# + +# Endpoint Filter backend driver (string value) +#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter + +# Toggle to return all active endpoints if no filter exists. +# (boolean value) +#return_all_endpoints_if_no_filter=true + + +[endpoint_policy] + +# +# Options defined in keystone +# + +# Endpoint policy backend driver (string value) +#driver=keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy + + +[federation] + +# +# Options defined in keystone +# + +# Federation backend driver. (string value) +#driver=keystone.contrib.federation.backends.sql.Federation + +# Value to be used when filtering assertion parameters from +# the environment. (string value) +#assertion_prefix= + + +[identity] + +# +# Options defined in keystone +# + +# This references the domain to use for all Identity API v2 +# requests (which are not aware of domains). A domain with +# this ID will be created for you by keystone-manage db_sync +# in migration 008. The domain referenced by this ID cannot be +# deleted on the v3 API, to prevent accidentally breaking the +# v2 API. There is nothing special about this domain, other +# than the fact that it must exist to order to maintain +# support for your v2 clients. (string value) +#default_domain_id=default + +# A subset (or all) of domains can have their own identity +# driver, each with their own partial configuration file in a +# domain configuration directory. Only values specific to the +# domain need to be placed in the domain specific +# configuration file. This feature is disabled by default; set +# to true to enable. (boolean value) +#domain_specific_drivers_enabled=false + +# Path for Keystone to locate the domain specific identity +# configuration files if domain_specific_drivers_enabled is +# set to true. (string value) +#domain_config_dir=/etc/keystone/domains + +# Identity backend driver. (string value) +#driver=keystone.identity.backends.sql.Identity + +# Maximum supported length for user passwords; decrease to +# improve performance. (integer value) +#max_password_length=4096 + +# Maximum number of entities that will be returned in an +# identity collection. (integer value) +#list_limit= + + +[identity_mapping] + +# +# Options defined in keystone +# + +# Keystone Identity Mapping backend driver. (string value) +#driver=keystone.identity.mapping_backends.sql.Mapping + +# Public ID generator for user and group entities. The +# Keystone identity mapper only supports generators that +# produce no more than 64 characters. (string value) +#generator=keystone.identity.id_generators.sha256.Generator + +# The format of user and group IDs changed in Juno for +# backends that do not generate UUIDs (e.g. LDAP), with +# keystone providing a hash mapping to the underlying +# attribute in LDAP. By default this mapping is disabled, +# which ensures that existing IDs will not change. Even when +# the mapping is enabled by using domain specific drivers, any +# users and groups from the default domain being handled by +# LDAP will still not be mapped to ensure their IDs remain +# backward compatible. Setting this value to False will enable +# the mapping for even the default LDAP driver. It is only +# safe to do this if you do not already have assignments for +# users and groups from the default LDAP domain, and it is +# acceptable for Keystone to provide the different IDs to +# clients than it did previously. Typically this means that +# the only time you can set this value to False is when +# configuring a fresh installation. (boolean value) +#backward_compatible_ids=true + + +[kvs] + +# +# Options defined in keystone +# + +# Extra dogpile.cache backend modules to register with the +# dogpile.cache library. (list value) +#backends= + +# Prefix for building the configuration dictionary for the KVS +# region. This should not need to be changed unless there is +# another dogpile.cache region with the same configuration +# name. (string value) +#config_prefix=keystone.kvs + +# Toggle to disable using a key-mangling function to ensure +# fixed length keys. This is toggle-able for debugging +# purposes, it is highly recommended to always leave this set +# to true. (boolean value) +#enable_key_mangler=true + +# Default lock timeout for distributed locking. (integer +# value) +#default_lock_timeout=5 + + +[ldap] + +# +# Options defined in keystone +# + +# URL for connecting to the LDAP server. (string value) +#url=ldap://localhost + +# User BindDN to query the LDAP server. (string value) +#user= + +# Password for the BindDN to query the LDAP server. (string +# value) +#password= + +# LDAP server suffix (string value) +#suffix=cn=example,cn=com + +# If true, will add a dummy member to groups. This is required +# if the objectclass for groups requires the "member" +# attribute. (boolean value) +#use_dumb_member=false + +# DN of the "dummy member" to use when "use_dumb_member" is +# enabled. (string value) +#dumb_member=cn=dumb,dc=nonexistent + +# Delete subtrees using the subtree delete control. Only +# enable this option if your LDAP server supports subtree +# deletion. (boolean value) +#allow_subtree_delete=false + +# The LDAP scope for queries, this can be either "one" +# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree). +# (string value) +#query_scope=one + +# Maximum results per page; a value of zero ("0") disables +# paging. (integer value) +#page_size=0 + +# The LDAP dereferencing option for queries. This can be +# either "never", "searching", "always", "finding" or +# "default". The "default" option falls back to using default +# dereferencing configured by your ldap.conf. (string value) +#alias_dereferencing=default + +# Sets the LDAP debugging level for LDAP calls. A value of 0 +# means that debugging is not enabled. This value is a +# bitmask, consult your LDAP documentation for possible +# values. (integer value) +#debug_level= + +# Override the system's default referral chasing behavior for +# queries. (boolean value) +#chase_referrals= + +# Search base for users. (string value) +#user_tree_dn= + +# LDAP search filter for users. (string value) +#user_filter= + +# LDAP objectclass for users. (string value) +#user_objectclass=inetOrgPerson + +# LDAP attribute mapped to user id. WARNING: must not be a +# multivalued attribute. (string value) +#user_id_attribute=cn + +# LDAP attribute mapped to user name. (string value) +#user_name_attribute=sn + +# LDAP attribute mapped to user email. (string value) +#user_mail_attribute=mail + +# LDAP attribute mapped to password. (string value) +#user_pass_attribute=userPassword + +# LDAP attribute mapped to user enabled flag. (string value) +#user_enabled_attribute=enabled + +# Invert the meaning of the boolean enabled values. Some LDAP +# servers use a boolean lock attribute where "true" means an +# account is disabled. Setting "user_enabled_invert = true" +# will allow these lock attributes to be used. This setting +# will have no effect if "user_enabled_mask" or +# "user_enabled_emulation" settings are in use. (boolean +# value) +#user_enabled_invert=false + +# Bitmask integer to indicate the bit that the enabled value +# is stored in if the LDAP server represents "enabled" as a +# bit on an integer rather than a boolean. A value of "0" +# indicates the mask is not used. If this is not set to "0" +# the typical value is "2". This is typically used when +# "user_enabled_attribute = userAccountControl". (integer +# value) +#user_enabled_mask=0 + +# Default value to enable users. This should match an +# appropriate int value if the LDAP server uses non-boolean +# (bitmask) values to indicate if a user is enabled or +# disabled. If this is not set to "True" the typical value is +# "512". This is typically used when "user_enabled_attribute = +# userAccountControl". (string value) +#user_enabled_default=True + +# List of attributes stripped off the user on update. (list +# value) +#user_attribute_ignore=default_project_id,tenants + +# LDAP attribute mapped to default_project_id for users. +# (string value) +#user_default_project_id_attribute= + +# Allow user creation in LDAP backend. (boolean value) +#user_allow_create=true + +# Allow user updates in LDAP backend. (boolean value) +#user_allow_update=true + +# Allow user deletion in LDAP backend. (boolean value) +#user_allow_delete=true + +# If true, Keystone uses an alternative method to determine if +# a user is enabled or not by checking if they are a member of +# the "user_enabled_emulation_dn" group. (boolean value) +#user_enabled_emulation=false + +# DN of the group entry to hold enabled users when using +# enabled emulation. (string value) +#user_enabled_emulation_dn= + +# List of additional LDAP attributes used for mapping +# additional attribute mappings for users. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#user_additional_attribute_mapping= + +# Search base for projects (string value) +# Deprecated group/name - [ldap]/tenant_tree_dn +#project_tree_dn= + +# LDAP search filter for projects. (string value) +# Deprecated group/name - [ldap]/tenant_filter +#project_filter= + +# LDAP objectclass for projects. (string value) +# Deprecated group/name - [ldap]/tenant_objectclass +#project_objectclass=groupOfNames + +# LDAP attribute mapped to project id. (string value) +# Deprecated group/name - [ldap]/tenant_id_attribute +#project_id_attribute=cn + +# LDAP attribute mapped to project membership for user. +# (string value) +# Deprecated group/name - [ldap]/tenant_member_attribute +#project_member_attribute=member + +# LDAP attribute mapped to project name. (string value) +# Deprecated group/name - [ldap]/tenant_name_attribute +#project_name_attribute=ou + +# LDAP attribute mapped to project description. (string value) +# Deprecated group/name - [ldap]/tenant_desc_attribute +#project_desc_attribute=description + +# LDAP attribute mapped to project enabled. (string value) +# Deprecated group/name - [ldap]/tenant_enabled_attribute +#project_enabled_attribute=enabled + +# LDAP attribute mapped to project domain_id. (string value) +# Deprecated group/name - [ldap]/tenant_domain_id_attribute +#project_domain_id_attribute=businessCategory + +# List of attributes stripped off the project on update. (list +# value) +# Deprecated group/name - [ldap]/tenant_attribute_ignore +#project_attribute_ignore= + +# Allow project creation in LDAP backend. (boolean value) +# Deprecated group/name - [ldap]/tenant_allow_create +#project_allow_create=true + +# Allow project update in LDAP backend. (boolean value) +# Deprecated group/name - [ldap]/tenant_allow_update +#project_allow_update=true + +# Allow project deletion in LDAP backend. (boolean value) +# Deprecated group/name - [ldap]/tenant_allow_delete +#project_allow_delete=true + +# If true, Keystone uses an alternative method to determine if +# a project is enabled or not by checking if they are a member +# of the "project_enabled_emulation_dn" group. (boolean value) +# Deprecated group/name - [ldap]/tenant_enabled_emulation +#project_enabled_emulation=false + +# DN of the group entry to hold enabled projects when using +# enabled emulation. (string value) +# Deprecated group/name - [ldap]/tenant_enabled_emulation_dn +#project_enabled_emulation_dn= + +# Additional attribute mappings for projects. Attribute +# mapping format is :, where ldap_attr +# is the attribute in the LDAP entry and user_attr is the +# Identity API attribute. (list value) +# Deprecated group/name - [ldap]/tenant_additional_attribute_mapping +#project_additional_attribute_mapping= + +# Search base for roles. (string value) +#role_tree_dn= + +# LDAP search filter for roles. (string value) +#role_filter= + +# LDAP objectclass for roles. (string value) +#role_objectclass=organizationalRole + +# LDAP attribute mapped to role id. (string value) +#role_id_attribute=cn + +# LDAP attribute mapped to role name. (string value) +#role_name_attribute=ou + +# LDAP attribute mapped to role membership. (string value) +#role_member_attribute=roleOccupant + +# List of attributes stripped off the role on update. (list +# value) +#role_attribute_ignore= + +# Allow role creation in LDAP backend. (boolean value) +#role_allow_create=true + +# Allow role update in LDAP backend. (boolean value) +#role_allow_update=true + +# Allow role deletion in LDAP backend. (boolean value) +#role_allow_delete=true + +# Additional attribute mappings for roles. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#role_additional_attribute_mapping= + +# Search base for groups. (string value) +#group_tree_dn= + +# LDAP search filter for groups. (string value) +#group_filter= + +# LDAP objectclass for groups. (string value) +#group_objectclass=groupOfNames + +# LDAP attribute mapped to group id. (string value) +#group_id_attribute=cn + +# LDAP attribute mapped to group name. (string value) +#group_name_attribute=ou + +# LDAP attribute mapped to show group membership. (string +# value) +#group_member_attribute=member + +# LDAP attribute mapped to group description. (string value) +#group_desc_attribute=description + +# List of attributes stripped off the group on update. (list +# value) +#group_attribute_ignore= + +# Allow group creation in LDAP backend. (boolean value) +#group_allow_create=true + +# Allow group update in LDAP backend. (boolean value) +#group_allow_update=true + +# Allow group deletion in LDAP backend. (boolean value) +#group_allow_delete=true + +# Additional attribute mappings for groups. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#group_additional_attribute_mapping= + +# CA certificate file path for communicating with LDAP +# servers. (string value) +#tls_cacertfile= + +# CA certificate directory path for communicating with LDAP +# servers. (string value) +#tls_cacertdir= + +# Enable TLS for communicating with LDAP servers. (boolean +# value) +#use_tls=false + +# Valid options for tls_req_cert are demand, never, and allow. +# (string value) +#tls_req_cert=demand + +# Enable LDAP connection pooling. (boolean value) +#use_pool=false + +# Connection pool size. (integer value) +#pool_size=10 + +# Maximum count of reconnect trials. (integer value) +#pool_retry_max=3 + +# Time span in seconds to wait between two reconnect trials. +# (floating point value) +#pool_retry_delay=0.1 + +# Connector timeout in seconds. Value -1 indicates indefinite +# wait for response. (integer value) +#pool_connection_timeout=-1 + +# Connection lifetime in seconds. (integer value) +#pool_connection_lifetime=600 + +# Enable LDAP connection pooling for end user authentication. +# If use_pool is disabled, then this setting is meaningless +# and is not used at all. (boolean value) +#use_auth_pool=false + +# End user auth connection pool size. (integer value) +#auth_pool_size=100 + +# End user auth connection lifetime in seconds. (integer +# value) +#auth_pool_connection_lifetime=60 + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[memcache] + +# +# Options defined in keystone +# + +# Memcache servers in the format of "host:port". (list value) +#servers=localhost:11211 + +# Number of seconds memcached server is considered dead before +# it is tried again. This is used by the key value store +# system (e.g. token pooled memcached persistence backend). +# (integer value) +#dead_retry=300 + +# Timeout in seconds for every call to a server. This is used +# by the key value store system (e.g. token pooled memcached +# persistence backend). (integer value) +#socket_timeout=3 + +# Max total number of open connections to every memcached +# server. This is used by the key value store system (e.g. +# token pooled memcached persistence backend). (integer value) +#pool_maxsize=10 + +# Number of seconds a connection to memcached is held unused +# in the pool before it is closed. This is used by the key +# value store system (e.g. token pooled memcached persistence +# backend). (integer value) +#pool_unused_timeout=60 + +# Number of seconds that an operation will wait to get a +# memcache client connection. This is used by the key value +# store system (e.g. token pooled memcached persistence +# backend). (integer value) +#pool_connection_get_timeout=10 + + +[oauth1] + +# +# Options defined in keystone +# + +# Credential backend driver. (string value) +#driver=keystone.contrib.oauth1.backends.sql.OAuth1 + +# Duration (in seconds) for the OAuth Request Token. (integer +# value) +#request_token_duration=28800 + +# Duration (in seconds) for the OAuth Access Token. (integer +# value) +#access_token_duration=86400 + + +[os_inherit] + +# +# Options defined in keystone +# + +# role-assignment inheritance to projects from owning domain +# can be optionally enabled. (boolean value) +#enabled=false + + +[paste_deploy] + +# +# Options defined in keystone +# + +# Name of the paste configuration file that defines the +# available pipelines. (string value) +#config_file=/usr/share/keystone/keystone-dist-paste.ini + + +[policy] + +# +# Options defined in keystone +# + +# Policy backend driver. (string value) +#driver=keystone.policy.backends.sql.Policy + +# Maximum number of entities that will be returned in a policy +# collection. (integer value) +#list_limit= + + +[revoke] + +# +# Options defined in keystone +# + +# An implementation of the backend for persisting revocation +# events. (string value) +#driver=keystone.contrib.revoke.backends.kvs.Revoke + +# This value (calculated in seconds) is added to token +# expiration before a revocation event may be removed from the +# backend. (integer value) +#expiration_buffer=1800 + +# Toggle for revocation event caching. This has no effect +# unless global caching is enabled. (boolean value) +#caching=true + + +[saml] + +# +# Options defined in keystone +# + +# Default TTL, in seconds, for any generated SAML assertion +# created by Keystone. (integer value) +#assertion_expiration_time=3600 + +# Binary to be called for XML signing. Install the appropriate +# package, specify absolute path or adjust your PATH +# environment variable if the binary cannot be found. (string +# value) +#xmlsec1_binary=xmlsec1 + +# Path of the certfile for SAML signing. For non-production +# environments, you may be interested in using `keystone- +# manage pki_setup` to generate self-signed certificates. +# Note, the path cannot contain a comma. (string value) +#certfile=/etc/keystone/ssl/certs/signing_cert.pem + +# Path of the keyfile for SAML signing. Note, the path cannot +# contain a comma. (string value) +#keyfile=/etc/keystone/ssl/private/signing_key.pem + +# Entity ID value for unique Identity Provider identification. +# Usually FQDN is set with a suffix. A value is required to +# generate IDP Metadata. For example: +# https://keystone.example.com/v3/OS-FEDERATION/saml2/idp +# (string value) +#idp_entity_id= + +# Identity Provider Single-Sign-On service value, required in +# the Identity Provider's metadata. A value is required to +# generate IDP Metadata. For example: +# https://keystone.example.com/v3/OS-FEDERATION/saml2/sso +# (string value) +#idp_sso_endpoint= + +# Language used by the organization. (string value) +#idp_lang=en + +# Organization name the installation belongs to. (string +# value) +#idp_organization_name= + +# Organization name to be displayed. (string value) +#idp_organization_display_name= + +# URL of the organization. (string value) +#idp_organization_url= + +# Company of contact person. (string value) +#idp_contact_company= + +# Given name of contact person (string value) +#idp_contact_name= + +# Surname of contact person. (string value) +#idp_contact_surname= + +# Email address of contact person. (string value) +#idp_contact_email= + +# Telephone number of contact person. (string value) +#idp_contact_telephone= + +# Contact type. Allowed values are: technical, support, +# administrative billing, and other (string value) +#idp_contact_type=other + +# Path to the Identity Provider Metadata file. This file +# should be generated with the keystone-manage +# saml_idp_metadata command. (string value) +#idp_metadata_path=/etc/keystone/saml2_idp_metadata.xml + + +[signing] + +# +# Options defined in keystone +# + +# Deprecated in favor of provider in the [token] section. +# (string value) +#token_format= + +# Path of the certfile for token signing. For non-production +# environments, you may be interested in using `keystone- +# manage pki_setup` to generate self-signed certificates. +# (string value) +#certfile=/etc/keystone/ssl/certs/signing_cert.pem + +# Path of the keyfile for token signing. (string value) +#keyfile=/etc/keystone/ssl/private/signing_key.pem + +# Path of the CA for token signing. (string value) +#ca_certs=/etc/keystone/ssl/certs/ca.pem + +# Path of the CA key for token signing. (string value) +#ca_key=/etc/keystone/ssl/private/cakey.pem + +# Key size (in bits) for token signing cert (auto generated +# certificate). (integer value) +#key_size=2048 + +# Days the token signing cert is valid for (auto generated +# certificate). (integer value) +#valid_days=3650 + +# Certificate subject (auto generated certificate) for token +# signing. (string value) +#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com + + +[ssl] + +# +# Options defined in keystone +# + +# Toggle for SSL support on the Keystone eventlet servers. +# (boolean value) +#enable=false + +# Path of the certfile for SSL. For non-production +# environments, you may be interested in using `keystone- +# manage ssl_setup` to generate self-signed certificates. +# (string value) +#certfile=/etc/keystone/ssl/certs/keystone.pem + +# Path of the keyfile for SSL. (string value) +#keyfile=/etc/keystone/ssl/private/keystonekey.pem + +# Path of the ca cert file for SSL. (string value) +#ca_certs=/etc/keystone/ssl/certs/ca.pem + +# Path of the CA key file for SSL. (string value) +#ca_key=/etc/keystone/ssl/private/cakey.pem + +# Require client certificate. (boolean value) +#cert_required=false + +# SSL key length (in bits) (auto generated certificate). +# (integer value) +#key_size=1024 + +# Days the certificate is valid for once signed (auto +# generated certificate). (integer value) +#valid_days=3650 + +# SSL certificate subject (auto generated certificate). +# (string value) +#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost + + +[stats] + +# +# Options defined in keystone +# + +# Stats backend driver. (string value) +#driver=keystone.contrib.stats.backends.kvs.Stats + + +[token] + +# +# Options defined in keystone +# + +# External auth mechanisms that should add bind information to +# token, e.g., kerberos,x509. (list value) +#bind= + +# Enforcement policy on tokens presented to Keystone with bind +# information. One of disabled, permissive, strict, required +# or a specifically required bind mode, e.g., kerberos or x509 +# to require binding to that authentication. (string value) +#enforce_token_bind=permissive + +# Amount of time a token should remain valid (in seconds). +# (integer value) +#expiration=3600 + +# Controls the token construction, validation, and revocation +# operations. Core providers are +# "keystone.token.providers.[pkiz|pki|uuid].Provider". The +# default provider is uuid. (string value) +#provider= + +# Token persistence backend driver. (string value) +#driver=keystone.token.persistence.backends.sql.Token + +# Toggle for token system caching. This has no effect unless +# global caching is enabled. (boolean value) +#caching=true + +# Time to cache the revocation list and the revocation events +# if revoke extension is enabled (in seconds). This has no +# effect unless global and token caching are enabled. (integer +# value) +#revocation_cache_time=3600 + +# Time to cache tokens (in seconds). This has no effect unless +# global and token caching are enabled. (integer value) +#cache_time= + +# Revoke token by token identifier. Setting revoke_by_id to +# true enables various forms of enumerating tokens, e.g. `list +# tokens for user`. These enumerations are processed to +# determine the list of tokens to revoke. Only disable if you +# are switching to using the Revoke extension with a backend +# other than KVS, which stores events in memory. (boolean +# value) +#revoke_by_id=true + +# The hash algorithm to use for PKI tokens. This can be set to +# any algorithm that hashlib supports. WARNING: Before +# changing this value, the auth_token middleware must be +# configured with the hash_algorithms, otherwise token +# revocation will not be processed correctly. (string value) +#hash_algorithm=md5 + + +[trust] + +# +# Options defined in keystone +# + +# Delegation and impersonation features can be optionally +# disabled. (boolean value) +#enabled=true + +# Trust backend driver. (string value) +#driver=keystone.trust.backends.sql.Trust + + diff --git a/x/resources/keystone_config/templates/logging.conf b/x/resources/keystone_config/templates/logging.conf new file mode 100644 index 00000000..6cb8c425 --- /dev/null +++ b/x/resources/keystone_config/templates/logging.conf @@ -0,0 +1,65 @@ +[loggers] +keys=root,access + +[handlers] +keys=production,file,access_file,devel + +[formatters] +keys=minimal,normal,debug + + +########### +# Loggers # +########### + +[logger_root] +level=WARNING +handlers=file + +[logger_access] +level=INFO +qualname=access +handlers=access_file + + +################ +# Log Handlers # +################ + +[handler_production] +class=handlers.SysLogHandler +level=ERROR +formatter=normal +args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) + +[handler_file] +class=handlers.WatchedFileHandler +level=WARNING +formatter=normal +args=('error.log',) + +[handler_access_file] +class=handlers.WatchedFileHandler +level=INFO +formatter=minimal +args=('access.log',) + +[handler_devel] +class=StreamHandler +level=NOTSET +formatter=debug +args=(sys.stdout,) + + +################## +# Log Formatters # +################## + +[formatter_minimal] +format=%(message)s + +[formatter_normal] +format=(%(name)s): %(asctime)s %(levelname)s %(message)s + +[formatter_debug] +format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/x/resources/keystone_config/templates/policy.json b/x/resources/keystone_config/templates/policy.json new file mode 100644 index 00000000..af65205e --- /dev/null +++ b/x/resources/keystone_config/templates/policy.json @@ -0,0 +1,171 @@ +{ + "admin_required": "role:admin or is_admin:1", + "service_role": "role:service", + "service_or_admin": "rule:admin_required or rule:service_role", + "owner" : "user_id:%(user_id)s", + "admin_or_owner": "rule:admin_required or rule:owner", + + "default": "rule:admin_required", + + "identity:get_region": "", + "identity:list_regions": "", + "identity:create_region": "rule:admin_required", + "identity:update_region": "rule:admin_required", + "identity:delete_region": "rule:admin_required", + + "identity:get_service": "rule:admin_required", + "identity:list_services": "rule:admin_required", + "identity:create_service": "rule:admin_required", + "identity:update_service": "rule:admin_required", + "identity:delete_service": "rule:admin_required", + + "identity:get_endpoint": "rule:admin_required", + "identity:list_endpoints": "rule:admin_required", + "identity:create_endpoint": "rule:admin_required", + "identity:update_endpoint": "rule:admin_required", + "identity:delete_endpoint": "rule:admin_required", + + "identity:get_domain": "rule:admin_required", + "identity:list_domains": "rule:admin_required", + "identity:create_domain": "rule:admin_required", + "identity:update_domain": "rule:admin_required", + "identity:delete_domain": "rule:admin_required", + + "identity:get_project": "rule:admin_required", + "identity:list_projects": "rule:admin_required", + "identity:list_user_projects": "rule:admin_or_owner", + "identity:create_project": "rule:admin_required", + "identity:update_project": "rule:admin_required", + "identity:delete_project": "rule:admin_required", + + "identity:get_user": "rule:admin_required", + "identity:list_users": "rule:admin_required", + "identity:create_user": "rule:admin_required", + "identity:update_user": "rule:admin_required", + "identity:delete_user": "rule:admin_required", + "identity:change_password": "rule:admin_or_owner", + + "identity:get_group": "rule:admin_required", + "identity:list_groups": "rule:admin_required", + "identity:list_groups_for_user": "rule:admin_or_owner", + "identity:create_group": "rule:admin_required", + "identity:update_group": "rule:admin_required", + "identity:delete_group": "rule:admin_required", + "identity:list_users_in_group": "rule:admin_required", + "identity:remove_user_from_group": "rule:admin_required", + "identity:check_user_in_group": "rule:admin_required", + "identity:add_user_to_group": "rule:admin_required", + + "identity:get_credential": "rule:admin_required", + "identity:list_credentials": "rule:admin_required", + "identity:create_credential": "rule:admin_required", + "identity:update_credential": "rule:admin_required", + "identity:delete_credential": "rule:admin_required", + + "identity:ec2_get_credential": "rule:admin_or_owner", + "identity:ec2_list_credentials": "rule:admin_or_owner", + "identity:ec2_create_credential": "rule:admin_or_owner", + "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", + + "identity:get_role": "rule:admin_required", + "identity:list_roles": "rule:admin_required", + "identity:create_role": "rule:admin_required", + "identity:update_role": "rule:admin_required", + "identity:delete_role": "rule:admin_required", + + "identity:check_grant": "rule:admin_required", + "identity:list_grants": "rule:admin_required", + "identity:create_grant": "rule:admin_required", + "identity:revoke_grant": "rule:admin_required", + + "identity:list_role_assignments": "rule:admin_required", + + "identity:get_policy": "rule:admin_required", + "identity:list_policies": "rule:admin_required", + "identity:create_policy": "rule:admin_required", + "identity:update_policy": "rule:admin_required", + "identity:delete_policy": "rule:admin_required", + + "identity:check_token": "rule:admin_required", + "identity:validate_token": "rule:service_or_admin", + "identity:validate_token_head": "rule:service_or_admin", + "identity:revocation_list": "rule:service_or_admin", + "identity:revoke_token": "rule:admin_or_owner", + + "identity:create_trust": "user_id:%(trust.trustor_user_id)s", + "identity:get_trust": "rule:admin_or_owner", + "identity:list_trusts": "", + "identity:list_roles_for_trust": "", + "identity:check_role_for_trust": "", + "identity:get_role_for_trust": "", + "identity:delete_trust": "", + + "identity:create_consumer": "rule:admin_required", + "identity:get_consumer": "rule:admin_required", + "identity:list_consumers": "rule:admin_required", + "identity:delete_consumer": "rule:admin_required", + "identity:update_consumer": "rule:admin_required", + + "identity:authorize_request_token": "rule:admin_required", + "identity:list_access_token_roles": "rule:admin_required", + "identity:get_access_token_role": "rule:admin_required", + "identity:list_access_tokens": "rule:admin_required", + "identity:get_access_token": "rule:admin_required", + "identity:delete_access_token": "rule:admin_required", + + "identity:list_projects_for_endpoint": "rule:admin_required", + "identity:add_endpoint_to_project": "rule:admin_required", + "identity:check_endpoint_in_project": "rule:admin_required", + "identity:list_endpoints_for_project": "rule:admin_required", + "identity:remove_endpoint_from_project": "rule:admin_required", + + "identity:create_endpoint_group": "rule:admin_required", + "identity:list_endpoint_groups": "rule:admin_required", + "identity:get_endpoint_group": "rule:admin_required", + "identity:update_endpoint_group": "rule:admin_required", + "identity:delete_endpoint_group": "rule:admin_required", + "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", + "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", + "identity:list_endpoint_groups_for_project": "rule:admin_required", + "identity:add_endpoint_group_to_project": "rule:admin_required", + "identity:remove_endpoint_group_from_project": "rule:admin_required", + + "identity:create_identity_provider": "rule:admin_required", + "identity:list_identity_providers": "rule:admin_required", + "identity:get_identity_providers": "rule:admin_required", + "identity:update_identity_provider": "rule:admin_required", + "identity:delete_identity_provider": "rule:admin_required", + + "identity:create_protocol": "rule:admin_required", + "identity:update_protocol": "rule:admin_required", + "identity:get_protocol": "rule:admin_required", + "identity:list_protocols": "rule:admin_required", + "identity:delete_protocol": "rule:admin_required", + + "identity:create_mapping": "rule:admin_required", + "identity:get_mapping": "rule:admin_required", + "identity:list_mappings": "rule:admin_required", + "identity:delete_mapping": "rule:admin_required", + "identity:update_mapping": "rule:admin_required", + + "identity:get_auth_catalog": "", + "identity:get_auth_projects": "", + "identity:get_auth_domains": "", + + "identity:list_projects_for_groups": "", + "identity:list_domains_for_groups": "", + + "identity:list_revoke_events": "", + + "identity:create_policy_association_for_endpoint": "rule:admin_required", + "identity:check_policy_association_for_endpoint": "rule:admin_required", + "identity:delete_policy_association_for_endpoint": "rule:admin_required", + "identity:create_policy_association_for_service": "rule:admin_required", + "identity:check_policy_association_for_service": "rule:admin_required", + "identity:delete_policy_association_for_service": "rule:admin_required", + "identity:create_policy_association_for_region_and_service": "rule:admin_required", + "identity:check_policy_association_for_region_and_service": "rule:admin_required", + "identity:delete_policy_association_for_region_and_service": "rule:admin_required", + "identity:get_policy_for_endpoint": "rule:admin_required", + "identity:list_endpoints_for_policy": "rule:admin_required" +} diff --git a/x/resources/keystone_service/actions/remove.yml b/x/resources/keystone_service/actions/remove.yml new file mode 100644 index 00000000..76142acf --- /dev/null +++ b/x/resources/keystone_service/actions/remove.yml @@ -0,0 +1,6 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker stop {{ name }} + - shell: docker rm {{ name }} diff --git a/x/resources/keystone_service/actions/run.yml b/x/resources/keystone_service/actions/run.yml new file mode 100644 index 00000000..305ff7e1 --- /dev/null +++ b/x/resources/keystone_service/actions/run.yml @@ -0,0 +1,17 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: keystone container + docker: + command: /bin/bash -c "keystone-manage db_sync && /usr/bin/keystone-all" + name: {{ name }} + image: {{ image }} + state: running + expose: + - 5000 + - 35357 + ports: + - {{ port }}:5000 + - {{ admin_port }}:35357 + volumes: + - {{ config_dir }}:/etc/keystone diff --git a/x/resources/keystone_service/meta.yaml b/x/resources/keystone_service/meta.yaml new file mode 100644 index 00000000..1e3add8d --- /dev/null +++ b/x/resources/keystone_service/meta.yaml @@ -0,0 +1,11 @@ +id: keystone +handler: ansible +version: 1.0.0 +input: + image: kollaglue/centos-rdo-keystone + config_dir: + port: + admin_port: + ip: + ssh_key: + ssh_user: diff --git a/x/resources/keystone_user/actions/remove.yml b/x/resources/keystone_user/actions/remove.yml new file mode 100644 index 00000000..492749ef --- /dev/null +++ b/x/resources/keystone_user/actions/remove.yml @@ -0,0 +1,6 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: keystone user + - keystone_user: endpoint=http://{keystone_host}}:{{keystone_port}}/v2.0/ user={{user_name}} tenant={{tenant_name}} state=absent + - keystone_user: endpoint=http://{keystone_host}}:{{keystone_port}}/v2.0/ tenant={{tenant_name}} state=absent diff --git a/x/resources/keystone_user/actions/run.yml b/x/resources/keystone_user/actions/run.yml new file mode 100644 index 00000000..1a7a5469 --- /dev/null +++ b/x/resources/keystone_user/actions/run.yml @@ -0,0 +1,6 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: keystone user + - keystone_user: endpoint=http://{keystone_host}}:{{keystone_port}}/v2.0/ tenant={{tenant_name}} state=present + - keystone_user: endpoint=http://{keystone_host}}:{{keystone_port}}/v2.0/ user={{user_name}} password={{user_password}} tenant={{tenant_name}} state=present diff --git a/x/resources/keystone_user/meta.yaml b/x/resources/keystone_user/meta.yaml new file mode 100644 index 00000000..6293937c --- /dev/null +++ b/x/resources/keystone_user/meta.yaml @@ -0,0 +1,14 @@ +id: keystone_user +handler: ansible +version: 1.0.0 +input: + keystone_host: + keystone_port: + login_user: + login_token: + user_name: + user_password: + tenant_name: + ip: + ssh_key: + ssh_user: diff --git a/x/resources/mariadb_db/actions/remove.yml b/x/resources/mariadb_db/actions/remove.yml new file mode 100644 index 00000000..594061a4 --- /dev/null +++ b/x/resources/mariadb_db/actions/remove.yml @@ -0,0 +1,11 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: mariadb db + mysql_db: + name: {{db_name}} + state: absent + login_user: root + login_password: {{login_password}} + login_port: {{login_port}} + login_host: 127.0.0.1 diff --git a/x/resources/mariadb_db/actions/run.yml b/x/resources/mariadb_db/actions/run.yml new file mode 100644 index 00000000..0efb73ed --- /dev/null +++ b/x/resources/mariadb_db/actions/run.yml @@ -0,0 +1,11 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: mariadb db + mysql_db: + name: {{db_name}} + state: present + login_user: root + login_password: {{login_password}} + login_port: {{login_port}} + login_host: 127.0.0.1 diff --git a/x/resources/mariadb_db/meta.yaml b/x/resources/mariadb_db/meta.yaml new file mode 100644 index 00000000..609814ca --- /dev/null +++ b/x/resources/mariadb_db/meta.yaml @@ -0,0 +1,14 @@ +id: mariadb_table +handler: ansible +version: 1.0.0 +actions: + run: run.yml + remove: remove.yml +input: + db_name: + login_password: + login_port: + login_user: + ip: + ssh_key: + ssh_user: diff --git a/x/resources/mariadb_service/actions/remove.yml b/x/resources/mariadb_service/actions/remove.yml new file mode 100644 index 00000000..cb1bc73b --- /dev/null +++ b/x/resources/mariadb_service/actions/remove.yml @@ -0,0 +1,8 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: mariadb container + docker: + name: {{ name }} + image: {{ image }} + state: absent diff --git a/x/resources/mariadb_service/actions/run.yml b/x/resources/mariadb_service/actions/run.yml new file mode 100644 index 00000000..b1e9d87f --- /dev/null +++ b/x/resources/mariadb_service/actions/run.yml @@ -0,0 +1,12 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: mariadb container + docker: + name: {{ name }} + image: {{ image }} + state: running + ports: + - {{ port }}:3306 + env: + MYSQL_ROOT_PASSWORD: {{ root_password }} diff --git a/x/resources/mariadb_service/meta.yaml b/x/resources/mariadb_service/meta.yaml new file mode 100644 index 00000000..6fa200d0 --- /dev/null +++ b/x/resources/mariadb_service/meta.yaml @@ -0,0 +1,10 @@ +id: mariadb +handler: ansible +version: 1.0.0 +input: + image: + root_password: + port: + ip: + ssh_key: + ssh_user: diff --git a/x/resources/mariadb_user/actions/remove.yml b/x/resources/mariadb_user/actions/remove.yml new file mode 100644 index 00000000..7f6939d4 --- /dev/null +++ b/x/resources/mariadb_user/actions/remove.yml @@ -0,0 +1,11 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: mariadb user + mysql_user: + name: {{new_user_name}} + state: absent + login_user: root + login_password: {{login_password}} + login_port: {{login_port}} + login_host: 127.0.0.1 diff --git a/x/resources/mariadb_user/actions/run.yml b/x/resources/mariadb_user/actions/run.yml new file mode 100644 index 00000000..b0981803 --- /dev/null +++ b/x/resources/mariadb_user/actions/run.yml @@ -0,0 +1,14 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: mariadb user + mysql_user: + name: {{new_user_name}} + password: {{new_user_password}} + priv: {{db_name}}.*:ALL + host: '%' + state: present + login_user: root + login_password: {{login_password}} + login_port: {{login_port}} + login_host: 127.0.0.1 diff --git a/x/resources/mariadb_user/meta.yaml b/x/resources/mariadb_user/meta.yaml new file mode 100644 index 00000000..b45f8e47 --- /dev/null +++ b/x/resources/mariadb_user/meta.yaml @@ -0,0 +1,16 @@ +id: mariadb_user +handler: ansible +version: 1.0.0 +actions: + run: run.yml + remove: remove.yml +input: + new_user_password: + new_user_name: + db_name: + login_password: + login_port: + login_user: + ip: + ssh_key: + ssh_user: diff --git a/x/resources/nova/actions/remove.yml b/x/resources/nova/actions/remove.yml new file mode 100644 index 00000000..76142acf --- /dev/null +++ b/x/resources/nova/actions/remove.yml @@ -0,0 +1,6 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker stop {{ name }} + - shell: docker rm {{ name }} diff --git a/x/resources/nova/actions/run.yml b/x/resources/nova/actions/run.yml new file mode 100644 index 00000000..e223fe8f --- /dev/null +++ b/x/resources/nova/actions/run.yml @@ -0,0 +1,6 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker run -d --net="host" --privileged \ + --name {{ name }} {{ image }} diff --git a/x/resources/nova/meta.yaml b/x/resources/nova/meta.yaml new file mode 100644 index 00000000..0591a410 --- /dev/null +++ b/x/resources/nova/meta.yaml @@ -0,0 +1,7 @@ +id: nova +handler: ansible +version: 1.0.0 +input: + ip: + port: 8774 + image: # TODO diff --git a/x/resources/ro_node/meta.yaml b/x/resources/ro_node/meta.yaml new file mode 100644 index 00000000..1ceaa0fc --- /dev/null +++ b/x/resources/ro_node/meta.yaml @@ -0,0 +1,8 @@ +id: mariadb +handler: none +version: 1.0.0 +actions: +input: + ip: + ssh_key: + ssh_user: diff --git a/x/signals.py b/x/signals.py new file mode 100644 index 00000000..111afd68 --- /dev/null +++ b/x/signals.py @@ -0,0 +1,201 @@ +# -*- coding: UTF-8 -*- +from collections import defaultdict +import itertools +import networkx as nx +import os + +import db + +from x import utils + + +CLIENTS_CONFIG_KEY = 'clients-data-file' +CLIENTS = utils.read_config_file(CLIENTS_CONFIG_KEY) + + +class Connections(object): + @staticmethod + def add(emitter, src, receiver, dst): + if src not in emitter.args: + return + + # TODO: implement general circular detection, this one is simple + if [emitter.name, src] in CLIENTS.get(receiver.name, {}).get(dst, []): + raise Exception('Attempted to create cycle in dependencies. Not nice.') + + CLIENTS.setdefault(emitter.name, {}) + CLIENTS[emitter.name].setdefault(src, []) + if [receiver.name, dst] not in CLIENTS[emitter.name][src]: + CLIENTS[emitter.name][src].append([receiver.name, dst]) + + utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) + + @staticmethod + def remove(emitter, src, receiver, dst): + CLIENTS[emitter.name][src] = [ + destination for destination in CLIENTS[emitter.name][src] + if destination != [receiver.name, dst] + ] + + utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) + + @staticmethod + def reconnect_all(): + """Reconstruct connections for resource inputs from CLIENTS. + + :return: + """ + for emitter_name, dest_dict in CLIENTS.items(): + emitter = db.get_resource(emitter_name) + for emitter_input, destinations in dest_dict.items(): + for receiver_name, receiver_input in destinations: + receiver = db.get_resource(receiver_name) + emitter.args[emitter_input].subscribe( + receiver.args[receiver_input]) + + @staticmethod + def clear(): + global CLIENTS + + CLIENTS = {} + + path = utils.read_config()[CLIENTS_CONFIG_KEY] + if os.path.exists(path): + os.remove(path) + + +def guess_mapping(emitter, receiver): + """Guess connection mapping between emitter and receiver. + + Suppose emitter and receiver have common inputs: + ip, ssh_key, ssh_user + + Then we return a connection mapping like this: + + { + 'ip': '.ip', + 'ssh_key': '.ssh_key', + 'ssh_user': '.ssh_user' + } + + :param emitter: + :param receiver: + :return: + """ + guessed = {} + for key in emitter.requires: + if key in receiver.requires: + guessed[key] = key + + return guessed + + +def connect(emitter, receiver, mapping=None): + guessed = guess_mapping(emitter, receiver) + mapping = mapping or guessed + + for src, dst in mapping.items(): + # Disconnect all receiver inputs + # Check if receiver input is of list type first + if receiver.args[dst].type_ != 'list': + disconnect_receiver_by_input(receiver, dst) + + emitter.args[src].subscribe(receiver.args[dst]) + + receiver.save() + + +def disconnect(emitter, receiver): + for src, destinations in CLIENTS[emitter.name].items(): + disconnect_by_src(emitter, src, receiver) + + for destination in destinations: + receiver_input = destination[1] + if receiver.args[receiver_input].type_ != 'list': + print 'Removing input {} from {}'.format(receiver_input, receiver.name) + emitter.args[src].unsubscribe(receiver.args[receiver_input]) + + +def disconnect_receiver_by_input(receiver, input): + """Find receiver connection by input and disconnect it. + + :param receiver: + :param input: + :return: + """ + for emitter_name, inputs in CLIENTS.items(): + emitter = db.get_resource(emitter_name) + disconnect_by_src(emitter, input, receiver) + + +def disconnect_by_src(emitter, src, receiver): + if src in CLIENTS[emitter.name]: + CLIENTS[emitter.name][src] = [ + destination for destination in CLIENTS[emitter.name][src] + if destination[0] != receiver.name + ] + + utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) + + +def notify(source, key, value): + CLIENTS.setdefault(source.name, {}) + print 'Notify', source.name, key, value, CLIENTS[source.name] + if key in CLIENTS[source.name]: + for client, r_key in CLIENTS[source.name][key]: + resource = db.get_resource(client) + print 'Resource found', client + if resource: + resource.update({r_key: value}, emitter=source) + else: + print 'Resource {} deleted?'.format(client) + pass + + +def assign_connections(receiver, connections): + mappings = defaultdict(list) + for key, dest in connections.iteritems(): + resource, r_key = dest.split('.') + mappings[resource].append([r_key, key]) + for resource, r_mappings in mappings.iteritems(): + connect(resource, receiver, r_mappings) + + +def connection_graph(): + resource_dependencies = {} + + for source, destination_values in CLIENTS.items(): + resource_dependencies.setdefault(source, set()) + for src, destinations in destination_values.items(): + resource_dependencies[source].update([ + destination[0] for destination in destinations + ]) + + g = nx.DiGraph() + + # TODO: tags as graph node attributes + for source, destinations in resource_dependencies.items(): + g.add_node(source) + g.add_nodes_from(destinations) + g.add_edges_from( + itertools.izip( + itertools.repeat(source), + destinations + ) + ) + + return g + + +def detailed_connection_graph(): + g = nx.MultiDiGraph() + + for emitter_name, destination_values in CLIENTS.items(): + for emitter_input, receivers in CLIENTS[emitter_name].items(): + for receiver_name, receiver_input in receivers: + label = emitter_input + if emitter_input != receiver_input: + label = '{}:{}'.format(emitter_input, receiver_input) + g.add_edge(emitter_name, receiver_name, label=label) + + return g diff --git a/x/test/__init__.py b/x/test/__init__.py new file mode 100644 index 00000000..4bf2011a --- /dev/null +++ b/x/test/__init__.py @@ -0,0 +1 @@ +__author__ = 'przemek' diff --git a/x/test/base.py b/x/test/base.py new file mode 100644 index 00000000..f58e727b --- /dev/null +++ b/x/test/base.py @@ -0,0 +1,36 @@ +import os +import shutil +import tempfile +import unittest +import yaml + +from x import db +from x import resource as xr +from x import signals as xs + + +class BaseResourceTest(unittest.TestCase): + def setUp(self): + self.storage_dir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.storage_dir) + db.clear() + xs.Connections.clear() + + def make_resource_meta(self, meta_yaml): + meta = yaml.load(meta_yaml) + + path = os.path.join(self.storage_dir, meta['id']) + os.makedirs(path) + with open(os.path.join(path, 'meta.yaml'), 'w') as f: + f.write(meta_yaml) + + return path + + def create_resource(self, name, src, args): + dst = os.path.join(self.storage_dir, 'rs', name) + os.makedirs(dst) + + return xr.create(name, src, dst, args) + diff --git a/x/test/test_signals.py b/x/test/test_signals.py new file mode 100644 index 00000000..6647fb5d --- /dev/null +++ b/x/test/test_signals.py @@ -0,0 +1,344 @@ +import unittest + +import base + +from x import signals as xs + + +class TestBaseInput(base.BaseResourceTest): + def test_input_dict_type(self): + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + values: {} + """) + + sample1 = self.create_resource( + 'sample1', sample_meta_dir, {'values': {'a': 1, 'b': 2}} + ) + sample2 = self.create_resource( + 'sample2', sample_meta_dir, {'values': None} + ) + xs.connect(sample1, sample2) + self.assertEqual( + sample1.args['values'], + sample2.args['values'], + ) + self.assertEqual( + sample2.args['values'].emitter, + sample1.args['values'] + ) + + # Check update + sample1.update({'values': {'a': 2}}) + self.assertEqual( + sample1.args['values'], + {'a': 2} + ) + self.assertEqual( + sample1.args['values'], + sample2.args['values'], + ) + + # Check disconnect + # TODO: should sample2.value be reverted to original value? + xs.disconnect(sample1, sample2) + sample1.update({'values': {'a': 3}}) + self.assertEqual( + sample1.args['values'], + {'a': 3} + ) + self.assertEqual( + sample2.args['values'], + {'a': 2} + ) + self.assertEqual(sample2.args['values'].emitter, None) + + def test_multiple_resource_disjoint_connect(self): + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + ip: + port: + """) + sample_ip_meta_dir = self.make_resource_meta(""" +id: sample-ip +handler: ansible +version: 1.0.0 +input: + ip: + """) + sample_port_meta_dir = self.make_resource_meta(""" +id: sample-port +handler: ansible +version: 1.0.0 +input: + port: + """) + + sample = self.create_resource( + 'sample', sample_meta_dir, {'ip': None, 'port': None} + ) + sample_ip = self.create_resource( + 'sample-ip', sample_ip_meta_dir, {'ip': '10.0.0.1'} + ) + sample_port = self.create_resource( + 'sample-port', sample_port_meta_dir, {'port': '8000'} + ) + xs.connect(sample_ip, sample) + xs.connect(sample_port, sample) + self.assertEqual(sample.args['ip'], sample_ip.args['ip']) + self.assertEqual(sample.args['port'], sample_port.args['port']) + self.assertEqual( + sample.args['ip'].emitter, + sample_ip.args['ip'] + ) + self.assertEqual( + sample.args['port'].emitter, + sample_port.args['port'] + ) + + def test_simple_observer_unsubscription(self): + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + ip: + """) + + sample = self.create_resource( + 'sample', sample_meta_dir, {'ip': None} + ) + sample1 = self.create_resource( + 'sample1', sample_meta_dir, {'ip': '10.0.0.1'} + ) + sample2 = self.create_resource( + 'sample2', sample_meta_dir, {'ip': '10.0.0.2'} + ) + + xs.connect(sample1, sample) + self.assertEqual(sample1.args['ip'], sample.args['ip']) + self.assertEqual(len(sample1.args['ip'].receivers), 1) + self.assertEqual( + sample.args['ip'].emitter, + sample1.args['ip'] + ) + + xs.connect(sample2, sample) + self.assertEqual(sample2.args['ip'], sample.args['ip']) + # sample should be unsubscribed from sample1 and subscribed to sample2 + self.assertEqual(len(sample1.args['ip'].receivers), 0) + self.assertEqual( + sample.args['ip'].emitter, + sample2.args['ip'] + ) + + sample1.update({'ip': '10.0.0.3'}) + self.assertEqual(sample2.args['ip'], sample.args['ip']) + + def test_circular_connection_prevention(self): + # TODO: more complex cases + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + ip: + """) + + sample1 = self.create_resource( + 'sample1', sample_meta_dir, {'ip': '10.0.0.1'} + ) + sample2 = self.create_resource( + 'sample2', sample_meta_dir, {'ip': '10.0.0.2'} + ) + xs.connect(sample1, sample2) + + with self.assertRaises(Exception): + xs.connect(sample2, sample1) + + +class TestListInput(base.BaseResourceTest): + def test_list_input_single(self): + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + ip: + """) + list_input_single_meta_dir = self.make_resource_meta(""" +id: list-input-single +handler: ansible +version: 1.0.0 +input: + ips: +input-types: + ips: list + """) + + sample1 = self.create_resource( + 'sample1', sample_meta_dir, {'ip': '10.0.0.1'} + ) + sample2 = self.create_resource( + 'sample2', sample_meta_dir, {'ip': '10.0.0.2'} + ) + list_input_single = self.create_resource( + 'list-input-single', list_input_single_meta_dir, {'ips': []} + ) + + xs.connect(sample1, list_input_single, mapping={'ip': 'ips'}) + self.assertEqual( + [ip['value'] for ip in list_input_single.args['ips'].value], + [ + sample1.args['ip'], + ] + ) + self.assertListEqual( + [(e['emitter_attached_to'], e['emitter']) for e in list_input_single.args['ips'].value], + [(sample1.args['ip'].attached_to.name, 'ip')] + ) + + xs.connect(sample2, list_input_single, mapping={'ip': 'ips'}) + self.assertEqual( + [ip['value'] for ip in list_input_single.args['ips'].value], + [ + sample1.args['ip'], + sample2.args['ip'], + ] + ) + self.assertListEqual( + [(e['emitter_attached_to'], e['emitter']) for e in list_input_single.args['ips'].value], + [(sample1.args['ip'].attached_to.name, 'ip'), + (sample2.args['ip'].attached_to.name, 'ip')] + ) + + # Test update + sample2.update({'ip': '10.0.0.3'}) + self.assertEqual( + [ip['value'] for ip in list_input_single.args['ips'].value], + [ + sample1.args['ip'], + sample2.args['ip'], + ] + ) + + # Test disconnect + xs.disconnect(sample2, list_input_single) + self.assertEqual( + [ip['value'] for ip in list_input_single.args['ips'].value], + [ + sample1.args['ip'], + ] + ) + self.assertListEqual( + [(e['emitter_attached_to'], e['emitter']) for e in list_input_single.args['ips'].value], + [(sample1.args['ip'].attached_to.name, 'ip')] + ) + + def test_list_input_multi(self): + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + ip: + port: + """) + list_input_multi_meta_dir = self.make_resource_meta(""" +id: list-input-multi +handler: ansible +version: 1.0.0 +input: + ips: + ports: +input-types: + ips: list + ports: list + """) + + sample1 = self.create_resource( + 'sample1', sample_meta_dir, {'ip': '10.0.0.1', 'port': '1000'} + ) + sample2 = self.create_resource( + 'sample2', sample_meta_dir, {'ip': '10.0.0.2', 'port': '1001'} + ) + list_input_multi = self.create_resource( + 'list-input-multi', list_input_multi_meta_dir, {'ips': [], 'ports': []} + ) + + xs.connect(sample1, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) + self.assertEqual( + [ip['value'] for ip in list_input_multi.args['ips'].value], + [sample1.args['ip']] + ) + self.assertEqual( + [p['value'] for p in list_input_multi.args['ports'].value], + [sample1.args['port']] + ) + + xs.connect(sample2, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) + self.assertEqual( + [ip['value'] for ip in list_input_multi.args['ips'].value], + [ + sample1.args['ip'], + sample2.args['ip'], + ] + ) + self.assertListEqual( + [(e['emitter_attached_to'], e['emitter']) for e in list_input_multi.args['ips'].value], + [(sample1.args['ip'].attached_to.name, 'ip'), + (sample2.args['ip'].attached_to.name, 'ip')] + ) + self.assertEqual( + [p['value'] for p in list_input_multi.args['ports'].value], + [ + sample1.args['port'], + sample2.args['port'], + ] + ) + self.assertListEqual( + [(e['emitter_attached_to'], e['emitter']) for e in list_input_multi.args['ports'].value], + [(sample1.args['port'].attached_to.name, 'port'), + (sample2.args['port'].attached_to.name, 'port')] + ) + + +class TestMultiInput(base.BaseResourceTest): + def test_multi_input(self): + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + ip: + port: + """) + receiver_meta_dir = self.make_resource_meta(""" +id: receiver +handler: ansible +version: 1.0.0 +input: + server: + """) + + sample = self.create_resource( + 'sample', sample_meta_dir, {'ip': '10.0.0.1', 'port': '5000'} + ) + receiver = self.create_resource( + 'receiver', receiver_meta_dir, {'server': None} + ) + xs.connect(sample, receiver, mapping={'ip, port': 'server'}) + self.assertItemsEqual( + (sample.args['ip'], sample.args['port']), + receiver.args['server'], + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/x/utils.py b/x/utils.py new file mode 100644 index 00000000..04b9873f --- /dev/null +++ b/x/utils.py @@ -0,0 +1,42 @@ +import json +import os +import yaml + + +def ext_encoder(fpath): + ext = os.path.splitext(os.path.basename(fpath))[1].strip('.') + if ext in ['json']: + return json + elif ext in ['yaml', 'yml']: + return yaml + + raise Exception('Unknown extension {}'.format(ext)) + + +def load_file(fpath): + encoder = ext_encoder(fpath) + + try: + with open(fpath) as f: + return encoder.load(f) + except IOError: + return {} + + +def read_config(): + return load_file('/vagrant/config.yaml') + + +def read_config_file(key): + fpath = read_config()[key] + + return load_file(fpath) + + +def save_to_config_file(key, data): + fpath = read_config()[key] + + with open(fpath, 'w') as f: + encoder = ext_encoder(fpath) + encoder.dump(data, f) +