From 4060b36feddabe1b743497665f0022e3080970f1 Mon Sep 17 00:00:00 2001 From: Jedrzej Nowak Date: Wed, 13 Jan 2016 13:08:28 +0100 Subject: [PATCH] Moved examples, resources and templates new location is https://github.com/Mirantis/solar-resources, later will be changed to openstack one. - vagrant stuff assumes that solar-resources is cloned into /vagrant/solar-resources - adjusted docker compose file - added solar-resources to .gitignore Change-Id: If2fea99145395606e6c15c9adbc127ecff4823f9 --- .gitignore | 3 + README.md | 6 +- bootstrap/playbooks/solar.yaml | 8 +- doc/source/resource.rst | 8 +- docker-compose.yml | 4 +- examples/bootstrap/README.md | 13 - examples/bootstrap/example-bootstrap.py | 89 - examples/bootstrap/vagrant-settings.yaml | 5 - examples/cli/README | 10 - examples/cli/example.sh | 18 - examples/hosts_file/README.md | 95 - examples/hosts_file/hosts.py | 40 - examples/librarian/README | 15 - examples/librarian/librarian.yaml | 22 - examples/library_ceph/README.md | 30 - examples/library_ceph/ceph.py | 73 - examples/lxc/README.md | 25 - examples/lxc/example-lxc.py | 144 -- examples/openstack/README.md | 15 - examples/openstack/openstack.py | 873 --------- examples/openstack/rabbitmq_user.yaml | 22 - examples/provisioning/provision.py | 79 - examples/provisioning/provision.sh | 17 - examples/riak/README.md | 47 - examples/riak/haproxy_riak_config.yaml | 50 - examples/riak/riak_cluster.yaml | 54 - examples/riak/riak_service.yaml | 61 - examples/riak/riaks-template.py | 174 -- examples/riak/riaks.py | 266 --- examples/riak/riaks_big.py | 103 -- examples/solar_agent/example.py | 61 - examples/torrent/README.md | 25 - examples/torrent/example.py | 70 - .../ansible_local/0.0.1/actions/run.yaml | 9 - .../actions/test_role/defaults/main.yaml | 4 - .../0.0.1/actions/test_role/tasks/main.yaml | 1 - resources/ansible_local/0.0.1/meta.yaml | 10 - .../ansible_remote/0.0.1/actions/run.yaml | 6 - resources/ansible_remote/0.0.1/meta.yaml | 15 - resources/apache_puppet/1.0.0/README.md | 4 - .../apache_puppet/1.0.0/actions/remove.pp | 5 - resources/apache_puppet/1.0.0/actions/run.pp | 120 -- resources/apache_puppet/1.0.0/meta.yaml | 184 -- resources/apache_puppet/1.0.0/test.py | 11 - resources/apt_repo/1.0.0/actions/remove.yaml | 9 - resources/apt_repo/1.0.0/actions/run.yaml | 11 - resources/apt_repo/1.0.0/meta.yaml | 24 - .../apt_repo/1.0.0/templates/preferences | 3 - resources/apt_repo/1.0.0/templates/source | 1 - resources/ceph_keys/1.0.0/actions/run.sh | 17 - resources/ceph_keys/1.0.0/meta.yaml | 16 - resources/ceph_mon/1.0.0/actions/run.pp | 95 - resources/ceph_mon/1.0.0/actions/test.pp | 4 - resources/ceph_mon/1.0.0/meta.yaml | 37 - resources/cinder_api_puppet/1.0.0/README.md | 98 - .../cinder_api_puppet/1.0.0/actions/remove.pp | 12 - .../cinder_api_puppet/1.0.0/actions/run.pp | 52 - .../cinder_api_puppet/1.0.0/actions/update.pp | 56 - resources/cinder_api_puppet/1.0.0/meta.yaml | 76 - resources/cinder_api_puppet/1.0.0/test.py | 10 - .../cinder_glance_puppet/1.0.0/README.md | 39 - .../1.0.0/actions/remove.pp | 1 - .../cinder_glance_puppet/1.0.0/actions/run.pp | 18 - .../cinder_glance_puppet/1.0.0/meta.yaml | 41 - resources/cinder_puppet/1.0.0/README.md | 112 -- .../cinder_puppet/1.0.0/actions/remove.pp | 4 - resources/cinder_puppet/1.0.0/actions/run.pp | 116 -- resources/cinder_puppet/1.0.0/meta.yaml | 215 --- resources/cinder_puppet/1.0.0/test.py | 10 - .../cinder_scheduler_puppet/1.0.0/README.md | 3 - .../1.0.0/actions/remove.pp | 4 - .../1.0.0/actions/run.pp | 18 - .../1.0.0/actions/update.pp | 22 - .../cinder_scheduler_puppet/1.0.0/meta.yaml | 25 - .../cinder_scheduler_puppet/1.0.0/test.py | 12 - .../cinder_volume_puppet/1.0.0/README.md | 8 - .../1.0.0/actions/remove.pp | 4 - .../cinder_volume_puppet/1.0.0/actions/run.pp | 31 - .../1.0.0/actions/update.pp | 26 - .../cinder_volume_puppet/1.0.0/meta.yaml | 38 - resources/cinder_volume_puppet/1.0.0/test.py | 12 - .../container_networks/1.0.0/actions/run.yaml | 22 - resources/container_networks/1.0.0/meta.yaml | 16 - .../data_container/1.0.0/actions/echo.yaml | 5 - .../data_container/1.0.0/actions/remove.yaml | 6 - .../data_container/1.0.0/actions/run.yaml | 24 - resources/data_container/1.0.0/meta.yaml | 12 - .../1.0.0/actions/exclude_mac_pxe.yaml | 6 - resources/dnsmasq/1.0.0/actions/run.yaml | 2 - resources/dnsmasq/1.0.0/meta.yaml | 17 - resources/docker/1.0.0/actions/run.yaml | 9 - resources/docker/1.0.0/meta.yaml | 15 - .../1.0.0/actions/remove.yaml | 6 - .../docker_container/1.0.0/actions/run.yaml | 37 - .../1.0.0/actions/update.yaml | 37 - resources/docker_container/1.0.0/meta.yaml | 29 - .../ex_managed/1.0.0/managers/manager.py | 10 - resources/ex_managed/1.0.0/meta.yaml | 11 - resources/file/1.0.0/actions/remove.sh | 3 - resources/file/1.0.0/actions/run.sh | 3 - resources/file/1.0.0/meta.yaml | 6 - resources/fuel_library/1.0.0/actions/run.sh | 20 - resources/fuel_library/1.0.0/meta.yaml | 17 - .../1.0.0/actions/remove.yaml | 8 - .../glance_api_service/1.0.0/actions/run.yaml | 27 - resources/glance_api_service/1.0.0/meta.yaml | 42 - resources/glance_api_service/1.0.0/test.py | 24 - .../glance_config/1.0.0/actions/remove.yaml | 5 - .../glance_config/1.0.0/actions/run.yaml | 37 - resources/glance_config/1.0.0/meta.yaml | 52 - .../glance_config/1.0.0/templates/exports | 4 - .../1.0.0/templates/glance-api-paste.ini | 82 - .../1.0.0/templates/glance-api.conf | 35 - .../1.0.0/templates/glance-cache.conf | 200 --- .../1.0.0/templates/glance-registry-paste.ini | 35 - .../1.0.0/templates/glance-registry.conf | 22 - .../1.0.0/templates/glance-scrubber.conf | 108 -- .../glance_config/1.0.0/templates/policy.json | 52 - .../1.0.0/templates/schema-image.json | 28 - resources/glance_puppet/1.0.0/README.md | 194 -- .../glance_puppet/1.0.0/actions/remove.pp | 11 - resources/glance_puppet/1.0.0/actions/run.pp | 101 -- .../glance_puppet/1.0.0/actions/update.pp | 101 -- resources/glance_puppet/1.0.0/meta.yaml | 165 -- resources/glance_puppet/1.0.0/test.py | 22 - .../glance_registry_puppet/1.0.0/README.md | 116 -- .../1.0.0/actions/remove.pp | 7 - .../1.0.0/actions/run.pp | 74 - .../1.0.0/actions/update.pp | 78 - .../glance_registry_puppet/1.0.0/meta.yaml | 129 -- .../glance_registry_puppet/1.0.0/test.py | 10 - .../1.0.0/actions/remove.yaml | 8 - .../1.0.0/actions/run.yaml | 32 - .../glance_registry_service/1.0.0/meta.yaml | 26 - resources/haproxy_config/1.0.0/README.md | 11 - .../haproxy_config/1.0.0/actions/remove.yaml | 5 - .../haproxy_config/1.0.0/actions/run.yaml | 22 - .../haproxy_config/1.0.0/actions/update.yaml | 22 - resources/haproxy_config/1.0.0/meta.yaml | 20 - .../1.0.0/templates/haproxy.cfg | 59 - resources/haproxy_service/1.0.0/README.md | 8 - .../1.0.0/actions/apply_config.yaml | 7 - .../1.0.0/actions/install.yaml | 10 - .../haproxy_service/1.0.0/actions/remove.yaml | 8 - .../haproxy_service/1.0.0/actions/run.yaml | 10 - .../haproxy_service/1.0.0/actions/update.yaml | 7 - resources/haproxy_service/1.0.0/meta.yaml | 14 - resources/haproxy_service/1.0.0/test.py | 10 - .../haproxy_service_config/1.0.0/README.md | 5 - .../haproxy_service_config/1.0.0/meta.yaml | 22 - .../hosts_file/1.0.0/actions/remove.yaml | 5 - resources/hosts_file/1.0.0/actions/run.yaml | 11 - .../hosts_file/1.0.0/actions/update.yaml | 11 - resources/hosts_file/1.0.0/meta.yaml | 9 - .../keystone_config/1.0.0/actions/remove.yaml | 4 - .../keystone_config/1.0.0/actions/run.yaml | 17 - resources/keystone_config/1.0.0/meta.yaml | 36 - .../1.0.0/templates/default_catalog.templates | 27 - .../keystone_config/1.0.0/templates/exports | 2 - .../1.0.0/templates/keystone.conf | 1589 ----------------- .../1.0.0/templates/logging.conf | 65 - .../1.0.0/templates/policy.json | 171 -- resources/keystone_puppet/1.0.0/README.md | 6 - .../keystone_puppet/1.0.0/actions/remove.pp | 4 - .../keystone_puppet/1.0.0/actions/run.pp | 28 - .../keystone_puppet/1.0.0/actions/update.pp | 21 - resources/keystone_puppet/1.0.0/meta.yaml | 44 - resources/keystone_puppet/1.0.0/test.py | 10 - .../keystone_role/1.0.0/actions/remove.yaml | 6 - .../keystone_role/1.0.0/actions/run.yaml | 7 - resources/keystone_role/1.0.0/meta.yaml | 32 - resources/keystone_service/1.0.0/README.md | 11 - .../1.0.0/actions/remove.yaml | 9 - .../keystone_service/1.0.0/actions/run.yaml | 19 - resources/keystone_service/1.0.0/meta.yaml | 26 - resources/keystone_service/1.0.0/test.py | 10 - .../1.0.0/actions/remove.yaml | 20 - .../1.0.0/actions/run.yaml | 15 - .../keystone_service_endpoint/1.0.0/meta.yaml | 61 - .../keystone_service_endpoint/1.0.0/test.py | 48 - .../keystone_tenant/1.0.0/actions/remove.yaml | 5 - .../keystone_tenant/1.0.0/actions/run.yaml | 7 - resources/keystone_tenant/1.0.0/meta.yaml | 26 - .../keystone_user/1.0.0/actions/remove.yaml | 6 - .../keystone_user/1.0.0/actions/run.yaml | 7 - resources/keystone_user/1.0.0/meta.yaml | 32 - resources/keystone_user/1.0.0/test.py | 18 - resources/librarian/0.0.1/actions/remove.yaml | 6 - resources/librarian/0.0.1/actions/run.yaml | 10 - resources/librarian/0.0.1/meta.yaml | 16 - .../librarian/0.0.1/templates/Puppetfile | 24 - .../lxc_container/1.0.0/actions/run.yaml | 25 - resources/lxc_container/1.0.0/meta.yaml | 54 - resources/lxc_host/1.0.0/actions/run.yaml | 6 - resources/lxc_host/1.0.0/meta.yaml | 22 - resources/managed_apt/1.0.0/actions/run.yaml | 7 - resources/managed_apt/1.0.0/meta.yaml | 16 - .../mariadb_db/1.0.0/actions/remove.yaml | 13 - resources/mariadb_db/1.0.0/actions/run.yaml | 13 - resources/mariadb_db/1.0.0/meta.yaml | 39 - .../mariadb_service/1.0.0/actions/remove.yaml | 9 - .../mariadb_service/1.0.0/actions/run.yaml | 22 - resources/mariadb_service/1.0.0/meta.yaml | 29 - .../mariadb_user/1.0.0/actions/remove.yaml | 11 - resources/mariadb_user/1.0.0/actions/run.yaml | 14 - .../mariadb_user/1.0.0/actions/update.yaml | 15 - resources/mariadb_user/1.0.0/meta.yaml | 40 - .../1.0.0/README.md | 57 - .../1.0.0/actions/remove.pp | 16 - .../1.0.0/actions/run.pp | 45 - .../1.0.0/meta.yaml | 55 - .../neutron_agents_l3_puppet/1.0.0/README.md | 100 -- .../1.0.0/actions/remove.pp | 16 - .../1.0.0/actions/run.pp | 63 - .../neutron_agents_l3_puppet/1.0.0/meta.yaml | 82 - .../1.0.0/README.md | 65 - .../1.0.0/actions/remove.pp | 16 - .../1.0.0/actions/run.pp | 53 - .../1.0.0/meta.yaml | 68 - .../1.0.0/README.md | 74 - .../1.0.0/actions/remove.pp | 4 - .../1.0.0/actions/run.pp | 45 - .../1.0.0/meta.yaml | 64 - .../1.0.0/README.md | 83 - .../1.0.0/actions/remove.pp | 3 - .../1.0.0/actions/run.pp | 54 - .../1.0.0/meta.yaml | 55 - resources/neutron_puppet/1.0.0/README.md | 194 -- .../neutron_puppet/1.0.0/actions/remove.pp | 5 - resources/neutron_puppet/1.0.0/actions/run.pp | 120 -- resources/neutron_puppet/1.0.0/meta.yaml | 187 -- .../neutron_server_puppet/1.0.0/README.md | 167 -- .../1.0.0/actions/remove.pp | 10 - .../1.0.0/actions/run.pp | 93 - .../neutron_server_puppet/1.0.0/meta.yaml | 146 -- resources/node_network_puppet/1.0.0/README.md | 9 - .../1.0.0/actions/remove.pp | 3 - .../node_network_puppet/1.0.0/actions/run.pp | 41 - resources/node_network_puppet/1.0.0/meta.yaml | 58 - resources/node_network_puppet/1.0.0/test.py | 11 - .../1.0.0/actions/provision.sh | 10 - .../1.0.0/actions/reboot.sh | 6 - .../not_provisioned_node/1.0.0/actions/run.sh | 8 - .../not_provisioned_node/1.0.0/meta.yaml | 33 - .../1.0.0/templates/agent.config | 4 - .../boothook_centos.jinja2 | 55 - .../boothook_ubuntu.jinja2 | 55 - .../cloud_config_centos.jinja2 | 26 - .../cloud_config_ubuntu.jinja2 | 25 - .../meta-data_centos.jinja2 | 11 - .../meta-data_ubuntu.jinja2 | 11 - .../1.0.0/templates/provisioning.json.jinja | 156 -- resources/nova_api_puppet/1.0.0/README.md | 141 -- .../nova_api_puppet/1.0.0/actions/remove.pp | 17 - .../nova_api_puppet/1.0.0/actions/run.pp | 78 - .../nova_api_puppet/1.0.0/actions/update.pp | 82 - resources/nova_api_puppet/1.0.0/meta.yaml | 109 -- resources/nova_api_puppet/1.0.0/test.py | 91 - .../1.0.0/README.md | 84 - .../1.0.0/actions/remove.pp | 1 - .../1.0.0/actions/run.pp | 56 - .../1.0.0/actions/update.pp | 60 - .../1.0.0/meta.yaml | 61 - resources/nova_compute_puppet/1.0.0/README.md | 105 -- .../1.0.0/actions/remove.pp | 20 - .../nova_compute_puppet/1.0.0/actions/run.pp | 64 - .../1.0.0/actions/update.pp | 68 - resources/nova_compute_puppet/1.0.0/meta.yaml | 82 - .../nova_conductor_puppet/1.0.0/README.md | 16 - .../1.0.0/actions/remove.pp | 11 - .../1.0.0/actions/run.pp | 22 - .../1.0.0/actions/update.pp | 26 - .../nova_conductor_puppet/1.0.0/meta.yaml | 25 - .../1.0.0/README.md | 5 - .../1.0.0/actions/remove.pp | 21 - .../1.0.0/actions/run.pp | 25 - .../1.0.0/actions/update.pp | 29 - .../1.0.0/meta.yaml | 31 - resources/nova_neutron_puppet/1.0.0/README.md | 115 -- .../1.0.0/actions/remove.pp | 1 - .../nova_neutron_puppet/1.0.0/actions/run.pp | 54 - resources/nova_neutron_puppet/1.0.0/meta.yaml | 92 - resources/nova_puppet/1.0.0/README.md | 275 --- resources/nova_puppet/1.0.0/actions/remove.pp | 4 - resources/nova_puppet/1.0.0/actions/run.pp | 138 -- resources/nova_puppet/1.0.0/meta.yaml | 254 --- .../openrc_file/1.0.0/actions/remove.yaml | 5 - resources/openrc_file/1.0.0/actions/run.yaml | 10 - resources/openrc_file/1.0.0/meta.yaml | 28 - .../1.0.0/templates/openrc.template | 10 - .../rabbitmq_config/1.0.0/actions/remove.yaml | 4 - .../rabbitmq_config/1.0.0/actions/run.yaml | 8 - resources/rabbitmq_config/1.0.0/meta.yaml | 21 - .../1.0.0/templates/rabbitmq.conf | 30 - .../rabbitmq_service/1.0.0/actions/remove.pp | 9 - .../rabbitmq_service/1.0.0/actions/run.pp | 11 - resources/rabbitmq_service/1.0.0/meta.yaml | 24 - .../rabbitmq_user/1.0.0/actions/remove.yaml | 6 - .../rabbitmq_user/1.0.0/actions/run.yaml | 11 - resources/rabbitmq_user/1.0.0/meta.yaml | 26 - .../rabbitmq_vhost/1.0.0/actions/remove.yaml | 5 - .../rabbitmq_vhost/1.0.0/actions/run.yaml | 5 - resources/rabbitmq_vhost/1.0.0/meta.yaml | 17 - resources/remote_file/1.0.0/actions/run.sh | 10 - resources/remote_file/1.0.0/meta.yaml | 19 - .../riak_join_single/1.0.0/actions/join.yaml | 4 - resources/riak_join_single/1.0.0/meta.yaml | 17 - resources/riak_node/1.0.0/actions/commit.yaml | 6 - resources/riak_node/1.0.0/actions/join.yaml | 15 - resources/riak_node/1.0.0/actions/remove.yaml | 6 - resources/riak_node/1.0.0/actions/run.yaml | 28 - resources/riak_node/1.0.0/actions/update.yaml | 12 - resources/riak_node/1.0.0/meta.yaml | 38 - .../riak_node/1.0.0/templates/riak.conf.jinja | 494 ----- resources/ro_node/1.0.0/meta.yaml | 22 - .../solar_bootstrap/1.0.0/actions/run.yaml | 11 - resources/solar_bootstrap/1.0.0/meta.yaml | 17 - resources/sources/1.0.0/meta.yaml | 6 - resources/ssh_key/1.0.0/actions/run.yaml | 14 - resources/ssh_key/1.0.0/meta.yaml | 25 - resources/transport_rsync/1.0.0/meta.yaml | 25 - .../1.0.0/actions/run.yaml | 7 - .../1.0.0/actions/update.yaml | 7 - .../transport_solar_agent/1.0.0/meta.yaml | 25 - resources/transport_ssh/1.0.0/meta.yaml | 25 - .../transport_torrent/1.0.0/actions/run.yaml | 9 - resources/transport_torrent/1.0.0/meta.yaml | 17 - .../1.0.0/scripts/solar_torrent.py | 194 -- resources/transports/1.0.0/meta.yaml | 12 - .../volume_group/1.0.0/actions/remove.yaml | 16 - resources/volume_group/1.0.0/actions/run.yaml | 20 - resources/volume_group/1.0.0/meta.yaml | 26 - resources/vxlan_mesh/1.0.0/actions/run.yaml | 16 - resources/vxlan_mesh/1.0.0/meta.yaml | 22 - templates/controller/1.0.0/controller.yaml | 76 - templates/glance/1.0.0/glance.yaml | 23 - templates/glance_base/1.0.0/glance_base.yaml | 26 - templates/glance_db/1.0.0/glance_db.yaml | 30 - .../1.0.0/glance_registry.yaml | 26 - templates/haproxy/1.0.0/haproxy.yaml | 46 - .../keystone_api/1.0.0/keystone_api.yaml | 20 - .../keystone_base/1.0.0/keystone_base.yaml | 28 - templates/mos_repos/1.0.0/mos_repos.yaml | 43 - templates/nodes/1.0.0/nodes.yaml | 37 - .../nodes_network/1.0.0/nodes_network.yaml | 71 - .../1.0.0/nodes_with_transports.yaml | 32 - .../1.0.0/not_provisioned_nodes.yaml | 43 - .../openstack_base/1.0.0/openstack_base.yaml | 74 - templates/profile/1.0.0/profile.yaml | 112 -- templates/seed_node/1.0.0/seed_node.yaml | 8 - templates/sources/1.0.0/sources.yaml | 8 - 351 files changed, 19 insertions(+), 16182 deletions(-) delete mode 100644 examples/bootstrap/README.md delete mode 100755 examples/bootstrap/example-bootstrap.py delete mode 100644 examples/bootstrap/vagrant-settings.yaml delete mode 100644 examples/cli/README delete mode 100755 examples/cli/example.sh delete mode 100644 examples/hosts_file/README.md delete mode 100755 examples/hosts_file/hosts.py delete mode 100644 examples/librarian/README delete mode 100644 examples/librarian/librarian.yaml delete mode 100644 examples/library_ceph/README.md delete mode 100644 examples/library_ceph/ceph.py delete mode 100644 examples/lxc/README.md delete mode 100755 examples/lxc/example-lxc.py delete mode 100644 examples/openstack/README.md delete mode 100755 examples/openstack/openstack.py delete mode 100644 examples/openstack/rabbitmq_user.yaml delete mode 100755 examples/provisioning/provision.py delete mode 100755 examples/provisioning/provision.sh delete mode 100644 examples/riak/README.md delete mode 100644 examples/riak/haproxy_riak_config.yaml delete mode 100644 examples/riak/riak_cluster.yaml delete mode 100644 examples/riak/riak_service.yaml delete mode 100755 examples/riak/riaks-template.py delete mode 100755 examples/riak/riaks.py delete mode 100755 examples/riak/riaks_big.py delete mode 100644 examples/solar_agent/example.py delete mode 100644 examples/torrent/README.md delete mode 100644 examples/torrent/example.py delete mode 100644 resources/ansible_local/0.0.1/actions/run.yaml delete mode 100644 resources/ansible_local/0.0.1/actions/test_role/defaults/main.yaml delete mode 100644 resources/ansible_local/0.0.1/actions/test_role/tasks/main.yaml delete mode 100644 resources/ansible_local/0.0.1/meta.yaml delete mode 100644 resources/ansible_remote/0.0.1/actions/run.yaml delete mode 100644 resources/ansible_remote/0.0.1/meta.yaml delete mode 100644 resources/apache_puppet/1.0.0/README.md delete mode 100644 resources/apache_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/apache_puppet/1.0.0/actions/run.pp delete mode 100644 resources/apache_puppet/1.0.0/meta.yaml delete mode 100644 resources/apache_puppet/1.0.0/test.py delete mode 100644 resources/apt_repo/1.0.0/actions/remove.yaml delete mode 100644 resources/apt_repo/1.0.0/actions/run.yaml delete mode 100644 resources/apt_repo/1.0.0/meta.yaml delete mode 100644 resources/apt_repo/1.0.0/templates/preferences delete mode 100644 resources/apt_repo/1.0.0/templates/source delete mode 100644 resources/ceph_keys/1.0.0/actions/run.sh delete mode 100644 resources/ceph_keys/1.0.0/meta.yaml delete mode 100644 resources/ceph_mon/1.0.0/actions/run.pp delete mode 100644 resources/ceph_mon/1.0.0/actions/test.pp delete mode 100644 resources/ceph_mon/1.0.0/meta.yaml delete mode 100644 resources/cinder_api_puppet/1.0.0/README.md delete mode 100644 resources/cinder_api_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/cinder_api_puppet/1.0.0/actions/run.pp delete mode 100644 resources/cinder_api_puppet/1.0.0/actions/update.pp delete mode 100644 resources/cinder_api_puppet/1.0.0/meta.yaml delete mode 100644 resources/cinder_api_puppet/1.0.0/test.py delete mode 100644 resources/cinder_glance_puppet/1.0.0/README.md delete mode 100644 resources/cinder_glance_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/cinder_glance_puppet/1.0.0/actions/run.pp delete mode 100644 resources/cinder_glance_puppet/1.0.0/meta.yaml delete mode 100644 resources/cinder_puppet/1.0.0/README.md delete mode 100644 resources/cinder_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/cinder_puppet/1.0.0/actions/run.pp delete mode 100644 resources/cinder_puppet/1.0.0/meta.yaml delete mode 100644 resources/cinder_puppet/1.0.0/test.py delete mode 100644 resources/cinder_scheduler_puppet/1.0.0/README.md delete mode 100644 resources/cinder_scheduler_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/cinder_scheduler_puppet/1.0.0/actions/run.pp delete mode 100644 resources/cinder_scheduler_puppet/1.0.0/actions/update.pp delete mode 100644 resources/cinder_scheduler_puppet/1.0.0/meta.yaml delete mode 100644 resources/cinder_scheduler_puppet/1.0.0/test.py delete mode 100644 resources/cinder_volume_puppet/1.0.0/README.md delete mode 100644 resources/cinder_volume_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/cinder_volume_puppet/1.0.0/actions/run.pp delete mode 100644 resources/cinder_volume_puppet/1.0.0/actions/update.pp delete mode 100644 resources/cinder_volume_puppet/1.0.0/meta.yaml delete mode 100644 resources/cinder_volume_puppet/1.0.0/test.py delete mode 100644 resources/container_networks/1.0.0/actions/run.yaml delete mode 100644 resources/container_networks/1.0.0/meta.yaml delete mode 100644 resources/data_container/1.0.0/actions/echo.yaml delete mode 100644 resources/data_container/1.0.0/actions/remove.yaml delete mode 100644 resources/data_container/1.0.0/actions/run.yaml delete mode 100644 resources/data_container/1.0.0/meta.yaml delete mode 100644 resources/dnsmasq/1.0.0/actions/exclude_mac_pxe.yaml delete mode 100644 resources/dnsmasq/1.0.0/actions/run.yaml delete mode 100644 resources/dnsmasq/1.0.0/meta.yaml delete mode 100644 resources/docker/1.0.0/actions/run.yaml delete mode 100644 resources/docker/1.0.0/meta.yaml delete mode 100644 resources/docker_container/1.0.0/actions/remove.yaml delete mode 100644 resources/docker_container/1.0.0/actions/run.yaml delete mode 100644 resources/docker_container/1.0.0/actions/update.yaml delete mode 100644 resources/docker_container/1.0.0/meta.yaml delete mode 100755 resources/ex_managed/1.0.0/managers/manager.py delete mode 100644 resources/ex_managed/1.0.0/meta.yaml delete mode 100644 resources/file/1.0.0/actions/remove.sh delete mode 100644 resources/file/1.0.0/actions/run.sh delete mode 100644 resources/file/1.0.0/meta.yaml delete mode 100644 resources/fuel_library/1.0.0/actions/run.sh delete mode 100644 resources/fuel_library/1.0.0/meta.yaml delete mode 100644 resources/glance_api_service/1.0.0/actions/remove.yaml delete mode 100644 resources/glance_api_service/1.0.0/actions/run.yaml delete mode 100644 resources/glance_api_service/1.0.0/meta.yaml delete mode 100644 resources/glance_api_service/1.0.0/test.py delete mode 100644 resources/glance_config/1.0.0/actions/remove.yaml delete mode 100644 resources/glance_config/1.0.0/actions/run.yaml delete mode 100644 resources/glance_config/1.0.0/meta.yaml delete mode 100644 resources/glance_config/1.0.0/templates/exports delete mode 100644 resources/glance_config/1.0.0/templates/glance-api-paste.ini delete mode 100644 resources/glance_config/1.0.0/templates/glance-api.conf delete mode 100644 resources/glance_config/1.0.0/templates/glance-cache.conf delete mode 100644 resources/glance_config/1.0.0/templates/glance-registry-paste.ini delete mode 100644 resources/glance_config/1.0.0/templates/glance-registry.conf delete mode 100644 resources/glance_config/1.0.0/templates/glance-scrubber.conf delete mode 100644 resources/glance_config/1.0.0/templates/policy.json delete mode 100644 resources/glance_config/1.0.0/templates/schema-image.json delete mode 100644 resources/glance_puppet/1.0.0/README.md delete mode 100644 resources/glance_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/glance_puppet/1.0.0/actions/run.pp delete mode 100644 resources/glance_puppet/1.0.0/actions/update.pp delete mode 100644 resources/glance_puppet/1.0.0/meta.yaml delete mode 100644 resources/glance_puppet/1.0.0/test.py delete mode 100644 resources/glance_registry_puppet/1.0.0/README.md delete mode 100644 resources/glance_registry_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/glance_registry_puppet/1.0.0/actions/run.pp delete mode 100644 resources/glance_registry_puppet/1.0.0/actions/update.pp delete mode 100644 resources/glance_registry_puppet/1.0.0/meta.yaml delete mode 100644 resources/glance_registry_puppet/1.0.0/test.py delete mode 100644 resources/glance_registry_service/1.0.0/actions/remove.yaml delete mode 100644 resources/glance_registry_service/1.0.0/actions/run.yaml delete mode 100644 resources/glance_registry_service/1.0.0/meta.yaml delete mode 100644 resources/haproxy_config/1.0.0/README.md delete mode 100644 resources/haproxy_config/1.0.0/actions/remove.yaml delete mode 100644 resources/haproxy_config/1.0.0/actions/run.yaml delete mode 100644 resources/haproxy_config/1.0.0/actions/update.yaml delete mode 100644 resources/haproxy_config/1.0.0/meta.yaml delete mode 100644 resources/haproxy_config/1.0.0/templates/haproxy.cfg delete mode 100644 resources/haproxy_service/1.0.0/README.md delete mode 100644 resources/haproxy_service/1.0.0/actions/apply_config.yaml delete mode 100644 resources/haproxy_service/1.0.0/actions/install.yaml delete mode 100644 resources/haproxy_service/1.0.0/actions/remove.yaml delete mode 100644 resources/haproxy_service/1.0.0/actions/run.yaml delete mode 100644 resources/haproxy_service/1.0.0/actions/update.yaml delete mode 100644 resources/haproxy_service/1.0.0/meta.yaml delete mode 100644 resources/haproxy_service/1.0.0/test.py delete mode 100644 resources/haproxy_service_config/1.0.0/README.md delete mode 100644 resources/haproxy_service_config/1.0.0/meta.yaml delete mode 100644 resources/hosts_file/1.0.0/actions/remove.yaml delete mode 100644 resources/hosts_file/1.0.0/actions/run.yaml delete mode 100644 resources/hosts_file/1.0.0/actions/update.yaml delete mode 100644 resources/hosts_file/1.0.0/meta.yaml delete mode 100644 resources/keystone_config/1.0.0/actions/remove.yaml delete mode 100644 resources/keystone_config/1.0.0/actions/run.yaml delete mode 100644 resources/keystone_config/1.0.0/meta.yaml delete mode 100644 resources/keystone_config/1.0.0/templates/default_catalog.templates delete mode 100644 resources/keystone_config/1.0.0/templates/exports delete mode 100644 resources/keystone_config/1.0.0/templates/keystone.conf delete mode 100644 resources/keystone_config/1.0.0/templates/logging.conf delete mode 100644 resources/keystone_config/1.0.0/templates/policy.json delete mode 100644 resources/keystone_puppet/1.0.0/README.md delete mode 100644 resources/keystone_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/keystone_puppet/1.0.0/actions/run.pp delete mode 100644 resources/keystone_puppet/1.0.0/actions/update.pp delete mode 100644 resources/keystone_puppet/1.0.0/meta.yaml delete mode 100644 resources/keystone_puppet/1.0.0/test.py delete mode 100644 resources/keystone_role/1.0.0/actions/remove.yaml delete mode 100644 resources/keystone_role/1.0.0/actions/run.yaml delete mode 100644 resources/keystone_role/1.0.0/meta.yaml delete mode 100644 resources/keystone_service/1.0.0/README.md delete mode 100644 resources/keystone_service/1.0.0/actions/remove.yaml delete mode 100644 resources/keystone_service/1.0.0/actions/run.yaml delete mode 100644 resources/keystone_service/1.0.0/meta.yaml delete mode 100644 resources/keystone_service/1.0.0/test.py delete mode 100644 resources/keystone_service_endpoint/1.0.0/actions/remove.yaml delete mode 100644 resources/keystone_service_endpoint/1.0.0/actions/run.yaml delete mode 100644 resources/keystone_service_endpoint/1.0.0/meta.yaml delete mode 100644 resources/keystone_service_endpoint/1.0.0/test.py delete mode 100644 resources/keystone_tenant/1.0.0/actions/remove.yaml delete mode 100644 resources/keystone_tenant/1.0.0/actions/run.yaml delete mode 100644 resources/keystone_tenant/1.0.0/meta.yaml delete mode 100644 resources/keystone_user/1.0.0/actions/remove.yaml delete mode 100644 resources/keystone_user/1.0.0/actions/run.yaml delete mode 100644 resources/keystone_user/1.0.0/meta.yaml delete mode 100644 resources/keystone_user/1.0.0/test.py delete mode 100644 resources/librarian/0.0.1/actions/remove.yaml delete mode 100644 resources/librarian/0.0.1/actions/run.yaml delete mode 100644 resources/librarian/0.0.1/meta.yaml delete mode 100644 resources/librarian/0.0.1/templates/Puppetfile delete mode 100644 resources/lxc_container/1.0.0/actions/run.yaml delete mode 100644 resources/lxc_container/1.0.0/meta.yaml delete mode 100644 resources/lxc_host/1.0.0/actions/run.yaml delete mode 100644 resources/lxc_host/1.0.0/meta.yaml delete mode 100644 resources/managed_apt/1.0.0/actions/run.yaml delete mode 100644 resources/managed_apt/1.0.0/meta.yaml delete mode 100644 resources/mariadb_db/1.0.0/actions/remove.yaml delete mode 100644 resources/mariadb_db/1.0.0/actions/run.yaml delete mode 100644 resources/mariadb_db/1.0.0/meta.yaml delete mode 100644 resources/mariadb_service/1.0.0/actions/remove.yaml delete mode 100644 resources/mariadb_service/1.0.0/actions/run.yaml delete mode 100644 resources/mariadb_service/1.0.0/meta.yaml delete mode 100644 resources/mariadb_user/1.0.0/actions/remove.yaml delete mode 100644 resources/mariadb_user/1.0.0/actions/run.yaml delete mode 100644 resources/mariadb_user/1.0.0/actions/update.yaml delete mode 100644 resources/mariadb_user/1.0.0/meta.yaml delete mode 100644 resources/neutron_agents_dhcp_puppet/1.0.0/README.md delete mode 100644 resources/neutron_agents_dhcp_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/neutron_agents_dhcp_puppet/1.0.0/actions/run.pp delete mode 100644 resources/neutron_agents_dhcp_puppet/1.0.0/meta.yaml delete mode 100644 resources/neutron_agents_l3_puppet/1.0.0/README.md delete mode 100644 resources/neutron_agents_l3_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/neutron_agents_l3_puppet/1.0.0/actions/run.pp delete mode 100644 resources/neutron_agents_l3_puppet/1.0.0/meta.yaml delete mode 100644 resources/neutron_agents_metadata_puppet/1.0.0/README.md delete mode 100644 resources/neutron_agents_metadata_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/neutron_agents_metadata_puppet/1.0.0/actions/run.pp delete mode 100644 resources/neutron_agents_metadata_puppet/1.0.0/meta.yaml delete mode 100644 resources/neutron_agents_ml2_ovs_puppet/1.0.0/README.md delete mode 100644 resources/neutron_agents_ml2_ovs_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/neutron_agents_ml2_ovs_puppet/1.0.0/actions/run.pp delete mode 100644 resources/neutron_agents_ml2_ovs_puppet/1.0.0/meta.yaml delete mode 100644 resources/neutron_plugins_ml2_puppet/1.0.0/README.md delete mode 100644 resources/neutron_plugins_ml2_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/neutron_plugins_ml2_puppet/1.0.0/actions/run.pp delete mode 100644 resources/neutron_plugins_ml2_puppet/1.0.0/meta.yaml delete mode 100644 resources/neutron_puppet/1.0.0/README.md delete mode 100644 resources/neutron_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/neutron_puppet/1.0.0/actions/run.pp delete mode 100644 resources/neutron_puppet/1.0.0/meta.yaml delete mode 100644 resources/neutron_server_puppet/1.0.0/README.md delete mode 100644 resources/neutron_server_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/neutron_server_puppet/1.0.0/actions/run.pp delete mode 100644 resources/neutron_server_puppet/1.0.0/meta.yaml delete mode 100644 resources/node_network_puppet/1.0.0/README.md delete mode 100644 resources/node_network_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/node_network_puppet/1.0.0/actions/run.pp delete mode 100644 resources/node_network_puppet/1.0.0/meta.yaml delete mode 100644 resources/node_network_puppet/1.0.0/test.py delete mode 100644 resources/not_provisioned_node/1.0.0/actions/provision.sh delete mode 100644 resources/not_provisioned_node/1.0.0/actions/reboot.sh delete mode 100644 resources/not_provisioned_node/1.0.0/actions/run.sh delete mode 100644 resources/not_provisioned_node/1.0.0/meta.yaml delete mode 100644 resources/not_provisioned_node/1.0.0/templates/agent.config delete mode 100644 resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/boothook_centos.jinja2 delete mode 100644 resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/boothook_ubuntu.jinja2 delete mode 100644 resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/cloud_config_centos.jinja2 delete mode 100644 resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/cloud_config_ubuntu.jinja2 delete mode 100644 resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/meta-data_centos.jinja2 delete mode 100644 resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/meta-data_ubuntu.jinja2 delete mode 100644 resources/not_provisioned_node/1.0.0/templates/provisioning.json.jinja delete mode 100644 resources/nova_api_puppet/1.0.0/README.md delete mode 100644 resources/nova_api_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/nova_api_puppet/1.0.0/actions/run.pp delete mode 100644 resources/nova_api_puppet/1.0.0/actions/update.pp delete mode 100644 resources/nova_api_puppet/1.0.0/meta.yaml delete mode 100644 resources/nova_api_puppet/1.0.0/test.py delete mode 100644 resources/nova_compute_libvirt_puppet/1.0.0/README.md delete mode 100644 resources/nova_compute_libvirt_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/nova_compute_libvirt_puppet/1.0.0/actions/run.pp delete mode 100644 resources/nova_compute_libvirt_puppet/1.0.0/actions/update.pp delete mode 100644 resources/nova_compute_libvirt_puppet/1.0.0/meta.yaml delete mode 100644 resources/nova_compute_puppet/1.0.0/README.md delete mode 100644 resources/nova_compute_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/nova_compute_puppet/1.0.0/actions/run.pp delete mode 100644 resources/nova_compute_puppet/1.0.0/actions/update.pp delete mode 100644 resources/nova_compute_puppet/1.0.0/meta.yaml delete mode 100644 resources/nova_conductor_puppet/1.0.0/README.md delete mode 100644 resources/nova_conductor_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/nova_conductor_puppet/1.0.0/actions/run.pp delete mode 100644 resources/nova_conductor_puppet/1.0.0/actions/update.pp delete mode 100644 resources/nova_conductor_puppet/1.0.0/meta.yaml delete mode 100644 resources/nova_generic_service_puppet/1.0.0/README.md delete mode 100644 resources/nova_generic_service_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/nova_generic_service_puppet/1.0.0/actions/run.pp delete mode 100644 resources/nova_generic_service_puppet/1.0.0/actions/update.pp delete mode 100644 resources/nova_generic_service_puppet/1.0.0/meta.yaml delete mode 100644 resources/nova_neutron_puppet/1.0.0/README.md delete mode 100644 resources/nova_neutron_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/nova_neutron_puppet/1.0.0/actions/run.pp delete mode 100644 resources/nova_neutron_puppet/1.0.0/meta.yaml delete mode 100644 resources/nova_puppet/1.0.0/README.md delete mode 100644 resources/nova_puppet/1.0.0/actions/remove.pp delete mode 100644 resources/nova_puppet/1.0.0/actions/run.pp delete mode 100644 resources/nova_puppet/1.0.0/meta.yaml delete mode 100644 resources/openrc_file/1.0.0/actions/remove.yaml delete mode 100644 resources/openrc_file/1.0.0/actions/run.yaml delete mode 100644 resources/openrc_file/1.0.0/meta.yaml delete mode 100644 resources/openrc_file/1.0.0/templates/openrc.template delete mode 100644 resources/rabbitmq_config/1.0.0/actions/remove.yaml delete mode 100644 resources/rabbitmq_config/1.0.0/actions/run.yaml delete mode 100644 resources/rabbitmq_config/1.0.0/meta.yaml delete mode 100644 resources/rabbitmq_config/1.0.0/templates/rabbitmq.conf delete mode 100644 resources/rabbitmq_service/1.0.0/actions/remove.pp delete mode 100644 resources/rabbitmq_service/1.0.0/actions/run.pp delete mode 100644 resources/rabbitmq_service/1.0.0/meta.yaml delete mode 100644 resources/rabbitmq_user/1.0.0/actions/remove.yaml delete mode 100644 resources/rabbitmq_user/1.0.0/actions/run.yaml delete mode 100644 resources/rabbitmq_user/1.0.0/meta.yaml delete mode 100644 resources/rabbitmq_vhost/1.0.0/actions/remove.yaml delete mode 100644 resources/rabbitmq_vhost/1.0.0/actions/run.yaml delete mode 100644 resources/rabbitmq_vhost/1.0.0/meta.yaml delete mode 100644 resources/remote_file/1.0.0/actions/run.sh delete mode 100644 resources/remote_file/1.0.0/meta.yaml delete mode 100644 resources/riak_join_single/1.0.0/actions/join.yaml delete mode 100644 resources/riak_join_single/1.0.0/meta.yaml delete mode 100644 resources/riak_node/1.0.0/actions/commit.yaml delete mode 100644 resources/riak_node/1.0.0/actions/join.yaml delete mode 100644 resources/riak_node/1.0.0/actions/remove.yaml delete mode 100644 resources/riak_node/1.0.0/actions/run.yaml delete mode 100644 resources/riak_node/1.0.0/actions/update.yaml delete mode 100644 resources/riak_node/1.0.0/meta.yaml delete mode 100644 resources/riak_node/1.0.0/templates/riak.conf.jinja delete mode 100644 resources/ro_node/1.0.0/meta.yaml delete mode 100644 resources/solar_bootstrap/1.0.0/actions/run.yaml delete mode 100644 resources/solar_bootstrap/1.0.0/meta.yaml delete mode 100644 resources/sources/1.0.0/meta.yaml delete mode 100644 resources/ssh_key/1.0.0/actions/run.yaml delete mode 100644 resources/ssh_key/1.0.0/meta.yaml delete mode 100644 resources/transport_rsync/1.0.0/meta.yaml delete mode 100644 resources/transport_solar_agent/1.0.0/actions/run.yaml delete mode 100644 resources/transport_solar_agent/1.0.0/actions/update.yaml delete mode 100644 resources/transport_solar_agent/1.0.0/meta.yaml delete mode 100644 resources/transport_ssh/1.0.0/meta.yaml delete mode 100644 resources/transport_torrent/1.0.0/actions/run.yaml delete mode 100644 resources/transport_torrent/1.0.0/meta.yaml delete mode 100644 resources/transport_torrent/1.0.0/scripts/solar_torrent.py delete mode 100644 resources/transports/1.0.0/meta.yaml delete mode 100644 resources/volume_group/1.0.0/actions/remove.yaml delete mode 100644 resources/volume_group/1.0.0/actions/run.yaml delete mode 100644 resources/volume_group/1.0.0/meta.yaml delete mode 100644 resources/vxlan_mesh/1.0.0/actions/run.yaml delete mode 100644 resources/vxlan_mesh/1.0.0/meta.yaml delete mode 100644 templates/controller/1.0.0/controller.yaml delete mode 100644 templates/glance/1.0.0/glance.yaml delete mode 100644 templates/glance_base/1.0.0/glance_base.yaml delete mode 100644 templates/glance_db/1.0.0/glance_db.yaml delete mode 100644 templates/glance_registry/1.0.0/glance_registry.yaml delete mode 100644 templates/haproxy/1.0.0/haproxy.yaml delete mode 100644 templates/keystone_api/1.0.0/keystone_api.yaml delete mode 100644 templates/keystone_base/1.0.0/keystone_base.yaml delete mode 100644 templates/mos_repos/1.0.0/mos_repos.yaml delete mode 100644 templates/nodes/1.0.0/nodes.yaml delete mode 100644 templates/nodes_network/1.0.0/nodes_network.yaml delete mode 100644 templates/nodes_with_transports/1.0.0/nodes_with_transports.yaml delete mode 100644 templates/not_provisioned_nodes/1.0.0/not_provisioned_nodes.yaml delete mode 100644 templates/openstack_base/1.0.0/openstack_base.yaml delete mode 100644 templates/profile/1.0.0/profile.yaml delete mode 100644 templates/seed_node/1.0.0/seed_node.yaml delete mode 100644 templates/sources/1.0.0/sources.yaml diff --git a/.gitignore b/.gitignore index 8e43ecd3..0eda1d05 100644 --- a/.gitignore +++ b/.gitignore @@ -57,3 +57,6 @@ solar/.cache .solar_config_override .ropeproject + +# for simplicity let's keep solar-resources there +solar-resources diff --git a/README.md b/README.md index d97293ca..ba96f79f 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ For now all commands should be executed from `solar-dev` machine from `/vagrant Basic flow is: -1. Create some resources (look at `examples/openstack/openstack.py`) and connect +1. Create some resources (look at `solar-resources/examples/openstack/openstack.py`) and connect them between each other, and place them on nodes. 1. Run `solar changes stage` (this stages the changes) 1. Run `solar changes process` (this prepares orchestrator graph, returning @@ -288,7 +288,7 @@ riak_master_service.connect_list( ) ``` -For full Riak example, please look at `examples/riak/riaks-template.py`. +For full Riak example, please look at `solar-resources/examples/riak/riaks-template.py`. Full documentation of individual functions is found in the `solar/template.py` file. @@ -301,4 +301,4 @@ Solar is shipped with sane defaults in `vagrant-setting.yaml_defaults`. If you n * In `vagrant-setting.yaml_defaults` or `vagrant-settings.yaml` file uncomment `preprovisioned: false` line. * Run `vagrant up`, it will take some time because it builds image for bootstrap and IBP images. -* Now you can run provisioning `/vagrant/examples/provisioning/provision.sh` +* Now you can run provisioning `/vagrant/solar-resources/examples/provisioning/provision.sh` diff --git a/bootstrap/playbooks/solar.yaml b/bootstrap/playbooks/solar.yaml index 6753fd9b..93b2b3b9 100644 --- a/bootstrap/playbooks/solar.yaml +++ b/bootstrap/playbooks/solar.yaml @@ -14,6 +14,10 @@ - hosts: all tasks: + # setup solar-resources + # change to openstack/solar-resources later + - git: repo=https://github.com/Mirantis/solar-resources.git dest=/vagrant/solar-resources update=no owner=vagrant + # set default config location - lineinfile: dest: /home/vagrant/.bashrc @@ -48,8 +52,8 @@ state: present - file: path=/var/lib/solar/repositories state=directory owner=vagrant - - file: src=/vagrant/resources dest=/var/lib/solar/repositories/resources state=link owner=vagrant - - file: src=/vagrant/templates dest=/var/lib/solar/repositories/templates state=link owner=vagrant + - file: src=/vagrant/solar-resources/resources dest=/var/lib/solar/repositories/resources state=link owner=vagrant + - file: src=/vagrant/solar-resources/templates dest=/var/lib/solar/repositories/templates state=link owner=vagrant - name: Starting docker containers shell: docker-compose up -d chdir=/vagrant diff --git a/doc/source/resource.rst b/doc/source/resource.rst index efc55f1a..0f66c247 100644 --- a/doc/source/resource.rst +++ b/doc/source/resource.rst @@ -11,7 +11,11 @@ resource. Examples are: Resources are defined in ``meta.yaml`` file. This file is responsible for basic configuration of given resource. Below is an explanation what constitutes -typical resource. +typical resource. + +.. TODO: change to openstack/solar-resources later +.. note:: + You can find example resources https://github.com/Mirantis/solar-resources Basic resource structure @@ -40,7 +44,7 @@ as below :: Solar currently supports following handlers: * puppet - first version of puppet handler (legacy, will be deprecated soon) -* puppetv2 - second, improved version of puppet, supporting hiera integration +* puppetv2 - second, improved version of puppet, supporting hiera integration * ansible_playbook - first version of ansible handler (legacy, will be deprecated soon) * ansible_template - second generation of ansible implementation, includes transport support diff --git a/docker-compose.yml b/docker-compose.yml index 286bfa15..d55c820f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -5,9 +5,7 @@ solar-celery: volumes: - /vagrant/.vagrant:/vagrant/.vagrant - /vagrant:/solar - - /vagrant/templates:/vagrant/templates - - /vagrant/resources:/vagrant/resources - - /vagrant/library:/vagrant/library + - /vagrant/solar-resources:/vagrant/solar-resources - /root/.ssh:/root/.ssh - ./bootstrap/playbooks/celery.yaml:/celery.yaml - /var/lib/solar/repositories:/var/lib/solar/repositories diff --git a/examples/bootstrap/README.md b/examples/bootstrap/README.md deleted file mode 100644 index fbb7fadf..00000000 --- a/examples/bootstrap/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Demo of the `solar_bootstrap` Resource - -You need to instantiate Vagrant with a slave node which is unprovisioned -(i.e. started from the `trusty64` Vagrant box). - -You can start the boxes from the `Vagrantfile` in master directory and -`vagrant-settings.yml` from this directory. - -Running -```bash -python example-bootstrap.py deploy -``` -will deploy full Solar env to node `solar-dev2`. diff --git a/examples/bootstrap/example-bootstrap.py b/examples/bootstrap/example-bootstrap.py deleted file mode 100755 index dc7b1fbb..00000000 --- a/examples/bootstrap/example-bootstrap.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python - -import click -import sys -import time - -from solar.core import actions -from solar.core import resource -from solar.core import signals -from solar.core import validation -from solar.core.resource import composer as cr -from solar import errors -from solar.dblayer.model import ModelMeta - - -@click.group() -def main(): - pass - - -def setup_resources(): - ModelMeta.remove_all() - - node2 = cr.create('node2', 'resources/ro_node/', { - 'ip': '10.0.0.4', - 'ssh_key': '/vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key', - 'ssh_user': 'vagrant' - })[0] - - solar_bootstrap2 = cr.create('solar_bootstrap2', 'resources/solar_bootstrap', {'master_ip': '10.0.0.2'})[0] - - signals.connect(node2, solar_bootstrap2) - - has_errors = False - for r in locals().values(): - if not isinstance(r, resource.Resource): - continue - - print 'Validating {}'.format(r.name) - errors = validation.validate_resource(r) - if errors: - has_errors = True - print 'ERROR: %s: %s' % (r.name, errors) - - if has_errors: - sys.exit(1) - -resources_to_run = [ - 'solar_bootstrap2', -] - - -@click.command() -def deploy(): - setup_resources() - - # run - resources = resource.load_all() - resources = {r.name: r for r in resources} - - for name in resources_to_run: - try: - actions.resource_action(resources[name], 'run') - except errors.SolarError as e: - print 'WARNING: %s' % str(e) - raise - - time.sleep(10) - - -@click.command() -def undeploy(): - resources = resource.load_all() - resources = {r.name: r for r in resources} - - for name in reversed(resources_to_run): - try: - actions.resource_action(resources[name], 'remove') - except errors.SolarError as e: - print 'WARNING: %s' % str(e) - - ModelMeta.remove_all() - -main.add_command(deploy) -main.add_command(undeploy) - - -if __name__ == '__main__': - main() diff --git a/examples/bootstrap/vagrant-settings.yaml b/examples/bootstrap/vagrant-settings.yaml deleted file mode 100644 index ca4632f1..00000000 --- a/examples/bootstrap/vagrant-settings.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# rename it to vagrant-settings.yml then Vagrantfile -# will use values from this file - -slaves_count: 3 -slaves_image: ubuntu/trusty64 diff --git a/examples/cli/README b/examples/cli/README deleted file mode 100644 index fa249101..00000000 --- a/examples/cli/README +++ /dev/null @@ -1,10 +0,0 @@ -This example shows how to use solar via CLI. - -Usage -===== - -Run - -`bash ./example.sh` - -after this you can run `solar orch report last` and wait until all tasks have status SUCCESS. diff --git a/examples/cli/example.sh b/examples/cli/example.sh deleted file mode 100755 index 6f86fdeb..00000000 --- a/examples/cli/example.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -eux - -function deploy { - # this two commands will clean db - solar resource clear_all - - solar resource create nodes templates/nodes '{"count": 1}' - solar resource create mariadb1 /vagrant/resources/mariadb_service image=mariadb port=3306 - solar connect node1 mariadb1 - - solar changes stage - solar changes process - solar orch run-once last - solar orch report last -} - -deploy diff --git a/examples/hosts_file/README.md b/examples/hosts_file/README.md deleted file mode 100644 index 7760f9a4..00000000 --- a/examples/hosts_file/README.md +++ /dev/null @@ -1,95 +0,0 @@ -Very simple solar example two nodes + hosts file mapping - -Run: - -`python examples/hosts_file/hosts.py` - -Then you can continue with standard solar things: - -``` -solar changes stage -d -solar changes process -solar or run-once last -watch -n 1 solar or report last -``` - -Wait until all actions have state `SUCCESS`, -after that check `/etc/hosts` files on both nodes, it will contain entries like: - -``` -10.0.0.3 first1441705177.99 -10.0.0.4 second1441705178.0 -``` - -If you want to try out revert functionality - you can do it in a next way: - -After you created all the stuff, print history like this: - -`solar ch history` - -Output: - -``` -log task=hosts_file1.run uid=282fe919-6059-4100-affc-56a2b3992d9d -log task=hosts_file2.run uid=774f5a49-00f1-4bae-8a77-90d1b2d54164 -log task=node1.run uid=2559f22c-5aa9-4c05-91c6-b70884190a56 -log task=node2.run uid=18f06abe-3e8d-4356-b172-128e1dded0e6 -``` - -Now you can try to revert creation of hosts_file1 - -``` -solar ch revert 282fe919-6059-4100-affc-56a2b3992d9d -solar ch stage -log task=hosts_file1.remove uid=1fe456c1-a847-4902-88bf-b7f2c5687d40 -solar ch process -solar or run-once last -watch -n 1 solar or report last -``` - -For now this file will be simply cleaned (more cophisticated task can be added later). -And you can create revert of your revert, which will lead to created hosts_file1 -resource and /etc/hosts with appropriate content - -``` -solar ch revert 282fe919-6059-4100-affc-56a2b3992d9d -solar ch stage -log task=hosts_file1.remove uid=1fe456c1-a847-4902-88bf-b7f2c5687d40 -solar ch process -solar changes run-once last -watch -n 1 solar changes report last -``` - -After this you can revert your result of your previous revert, which will -create this file with relevant content. - -``` -solar ch history -n 1 -log task=hosts_file1.remove uid=1fe456c1-a847-4902-88bf-b7f2c5687d40 -solar ch revert 1fe456c1-a847-4902-88bf-b7f2c5687d40 -solar ch stage -log task=hosts_file1.run uid=493326b2-989f-4b94-a22c-0bbd0fc5e755 -solar ch process -solar changes run-once last -watch -n 1 solar changes report last -``` - -How to discard pending changes ? - -After database was populated by some example, lets say -``` -python examples/hosts_file/hosts.py deploy -``` - -User is able to discard all changes with -``` -solar ch discard -``` - -Or any particular change with -``` -solar ch stage -log task=hosts_file1.run uid=a5990538-c9c6-49e4-8d58-29fae9c7aaed -solar ch discard a5990538-c9c6-49e4-8d58-29fae9c7aaed -``` - diff --git a/examples/hosts_file/hosts.py b/examples/hosts_file/hosts.py deleted file mode 100755 index cca1ab94..00000000 --- a/examples/hosts_file/hosts.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/python -import click -import sys -import time - -from solar.core import signals -from solar.core.resource import composer as cr -from solar.dblayer.model import ModelMeta - - -def run(): - ModelMeta.remove_all() - - resources = cr.create('nodes', 'templates/nodes', {'count': 2}) - - node1, node2 = [x for x in resources if x.name.startswith('node')] - hosts1, hosts2 = [x for x in resources - if x.name.startswith('hosts_file')] - - node1.connect(hosts1, { - 'name': 'hosts:name', - 'ip': 'hosts:ip', - }) - - node2.connect(hosts1, { - 'name': 'hosts:name', - 'ip': 'hosts:ip', - }) - - node1.connect(hosts2, { - 'name': 'hosts:name', - 'ip': 'hosts:ip', - }) - - node2.connect(hosts2, { - 'name': 'hosts:name', - 'ip': 'hosts:ip', - }) - -run() diff --git a/examples/librarian/README b/examples/librarian/README deleted file mode 100644 index 20b1caa0..00000000 --- a/examples/librarian/README +++ /dev/null @@ -1,15 +0,0 @@ -Usage -===== - -Run it from /vagrant dir: - -``` -solar resource clear_all -solar resource create nodes templates/nodes '{"count": 1}' -solar resource create librarian_example examples/librarian/librarian.yaml '{"node": "node1"}' - -solar changes stage -solar changes process -solar orch run-once -solar orch report -w 100 -``` diff --git a/examples/librarian/librarian.yaml b/examples/librarian/librarian.yaml deleted file mode 100644 index 07ec82c1..00000000 --- a/examples/librarian/librarian.yaml +++ /dev/null @@ -1,22 +0,0 @@ -id: librarian_examples - -resources: - - id: rabbitmq_service1 - from: resources/rabbitmq_service - location: #{node}# - input: - management_port: 15672 - port: 5672 - - - id: librarian - location: #{node}# - from: resources/librarian - input: - modules: - - rabbitmq_service1::module::NO_EVENTS - -events: - - type: depends_on - parent_action: librarian.run - state: success - child_action: rabbitmq_service1.run diff --git a/examples/library_ceph/README.md b/examples/library_ceph/README.md deleted file mode 100644 index 97e6cc4f..00000000 --- a/examples/library_ceph/README.md +++ /dev/null @@ -1,30 +0,0 @@ -Current example will do following things: - -- fetch fuel-library from github -- use ./update_modules.sh to fetch librarian dependencies -- generate ceph keys on a solar-dev1 -- install ceph-mon on solar-dev1 (INPROGRESS) -- install ceph-osd on solar-dev2 (TODO) -- imlement removal mechanism for ceph-mon/ceph-osd (TODO) - - -To use it: - -``` -python exaples/library_ceph/ceph.py -solar ch stage && solar ch process -solar or run-once last -w 120 -``` - -If it will fail you can run particular resource action, with a lot of -debug info. - -``` -solar res action run ceph_mon1 -``` - -To add repositories use - -``` -solar resource create apt1 templates/mos_repos node=node1 index=1 -``` diff --git a/examples/library_ceph/ceph.py b/examples/library_ceph/ceph.py deleted file mode 100644 index 24e173df..00000000 --- a/examples/library_ceph/ceph.py +++ /dev/null @@ -1,73 +0,0 @@ - -from solar.core.resource import composer as cr -from solar.dblayer.model import ModelMeta -import yaml - - -STORAGE = {'objects_ceph': True, - 'osd_pool_size': 2, - 'pg_num': 128} - -KEYSTONE = {'admin_token': 'abcde'} - - -NETWORK_SCHEMA = { - 'endpoints': {'eth1': {'IP': ['10.0.0.3/24']}}, - 'roles': {'ceph/replication': 'eth1', - 'ceph/public': 'eth1'} - } - -NETWORK_METADATA = yaml.load(""" - solar-dev1: - uid: '1' - fqdn: solar-dev1 - network_roles: - ceph/public: 10.0.0.3 - ceph/replication: 10.0.0.3 - node_roles: - - ceph-mon - name: solar-dev1 - - """) - - -def deploy(): - ModelMeta.remove_all() - resources = cr.create('nodes', 'templates/nodes', {'count': 2}) - first_node, second_node = [x for x in resources if x.name.startswith('node')] - first_transp = next(x for x in resources if x.name.startswith('transport')) - - library = cr.create('library1', 'resources/fuel_library', {})[0] - first_node.connect(library) - - keys = cr.create('ceph_key', 'resources/ceph_keys', {})[0] - first_node.connect(keys) - - remote_file = cr.create('ceph_key2', 'resources/remote_file', - {'dest': '/var/lib/astute/'})[0] - second_node.connect(remote_file) - keys.connect(remote_file, {'ip': 'remote_ip', 'path': 'remote_path'}) - first_transp.connect(remote_file, {'transports': 'remote'}) - - - ceph_mon = cr.create('ceph_mon1', 'resources/ceph_mon', - {'storage': STORAGE, - 'keystone': KEYSTONE, - 'network_scheme': NETWORK_SCHEMA, - 'ceph_monitor_nodes': NETWORK_METADATA, - 'ceph_primary_monitor_node': NETWORK_METADATA, - 'role': 'controller', - })[0] - - managed_apt = cr.create( - 'managed_apt1', 'templates/mos_repos', - {'node': first_node.name, 'index': 0})[-1] - - keys.connect(ceph_mon, {}) - first_node.connect(ceph_mon, - {'ip': ['ip', 'public_vip', 'management_vip']}) - library.connect(ceph_mon, {'puppet_modules': 'puppet_modules'}) - managed_apt.connect(ceph_mon, {}) - -if __name__ == '__main__': - deploy() diff --git a/examples/lxc/README.md b/examples/lxc/README.md deleted file mode 100644 index 7b177ef4..00000000 --- a/examples/lxc/README.md +++ /dev/null @@ -1,25 +0,0 @@ -Bootstraping lxc containers using solar and roles from os-ansible-deployment - -At first run: - -`python examples/lxc/example-lxc.py deploy` - -It will do several things: - -* Prepare about ~10 containers on solar-dev1 -* Add linux bridge on solar-dev and solar-dev1 with uid br-int53 -* Setup vxlan tunnel for solar-dev and solar-dev1 -* Generate ssh key and inject it into containers - -Later this containers can be used as regular nodes in solar. -Check rabbitmq example at the end of the file. - -To deploy everything use usual solar commands. -``` -solar changes stage -d -solar changes process -solar orch run-once last -watch -n 1 solar orch report last -``` - -Wait until all actions have state `SUCCESS` diff --git a/examples/lxc/example-lxc.py b/examples/lxc/example-lxc.py deleted file mode 100755 index 79f690d3..00000000 --- a/examples/lxc/example-lxc.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env python - -# To run: -# example-lxc.py deploy -# solar changes stage -# solar changes process -# solar orch run-once last -# watch 'solar orch report last' - -import click - -from solar.core import signals -from solar.core.resource import composer as cr - -from solar.system_log import change -from solar.cli import orch -from solar.dblayer.model import ModelMeta - - -@click.group() -def main(): - pass - - -def lxc_template(idx): - return { - 'user': 'root', - 'mgmt_ip': '172.18.11.{}'.format(idx), - 'container_name': 'test{}'.format(idx), - 'inventory_hostname': 'test{}'.format(idx), - 'properties': - {'container_release': 'trusty'}, - 'container_networks': - {'mgmt': { - 'address': '172.18.11.{}'.format(idx), # address for container - 'bridge': 'br-int53', # bridge to attach veth pair - 'bridge_address': '172.18.11.253/24', - 'interface': 'eth1', # interface name in container - 'netmask': '255.255.255.0', - 'type': 'veth'}} - } - - -@click.command() -def deploy(): - ModelMeta.remove_all() - - node1 = cr.create('nodes', 'templates/nodes', {})[0] - seed = cr.create('nodes', 'templates/seed_node', {})[0] - - ssh_key = cr.create('ssh_key1', 'resources/ssh_key', { - 'keys_dir': '/vagrant/.ssh', - 'private_key': '/vagrant/.ssh/id_rsa', - 'public_key': '/vagrant/.ssh/id_rsa.pub', - 'passphrase': '', - })[0] - signals.connect(seed, ssh_key) - - cnets1 = cr.create('cnets1', 'resources/container_networks', { - 'networks': - {'mgmt': { - 'bridge': 'br-int53', - 'bridge_address': '172.18.11.254/24' - }} - })[0] - cnets2 = cr.create('cnets2', 'resources/container_networks', { - 'networks': - {'mgmt': { - 'bridge': 'br-int53', - 'bridge_address': '172.18.11.253/24' - }} - })[0] - signals.connect(seed, cnets1) - signals.connect(node1, cnets2) - - vxlan_mesh1 = cr.create('vxlan_mesh1', 'resources/vxlan_mesh', { - 'id': 53, - 'parent': 'eth1', - 'master': 'br-int53' - })[0] - vxlan_mesh2 = cr.create('vxlan_mesh2', 'resources/vxlan_mesh', { - 'id': 53, - 'parent': 'eth1', - 'master': 'br-int53' - })[0] - # seed node should be connected anyway, because we need to be able to ssh - # into containers from any node - signals.connect(seed, vxlan_mesh1) - signals.connect(node1, vxlan_mesh2) - - lxc_infra1 = cr.create('lxc_infra1', 'resources/lxc_host', {})[0] - signals.connect(node1, lxc_infra1) - - lxc_hosts = range(28, 35) - hosts_map = {} - for idx in lxc_hosts: - - lxc_host_idx = cr.create( - 'lxc_host{}'.format(idx), - 'resources/lxc_container', lxc_template(idx))[0] - hosts_map[idx] = lxc_host_idx - - signals.connect(node1, lxc_host_idx, { - 'ip': ['ansible_ssh_host', 'physical_host'], - }) - # this is a required to introduce depends on relationship between lxc infre - # and lxc container - signals.connect(lxc_infra1, lxc_host_idx, {'provides': 'requires'}) - signals.connect(cnets2, lxc_host_idx) - signals.connect(ssh_key, lxc_host_idx, { - 'public_key': 'pub_key', - 'private_key': 'user_key'}) - - # RABBIT - rabbitmq_service1 = cr.create('rabbitmq_service1', 'resources/rabbitmq_service/', { - 'management_port': 15672, - 'port': 5672, - })[0] - openstack_vhost = cr.create('openstack_vhost', 'resources/rabbitmq_vhost/', { - 'vhost_name': 'openstack' - })[0] - - openstack_rabbitmq_user = cr.create('openstack_rabbitmq_user', 'resources/rabbitmq_user/', { - 'user_name': 'openstack', - 'password': 'openstack_password' - })[0] - - signals.connect(hosts_map[28], rabbitmq_service1, { - 'mgmt_ip': 'ip', - 'user_key': 'ssh_key', - 'user': 'ssh_user'}) - signals.connect(rabbitmq_service1, openstack_vhost) - signals.connect(rabbitmq_service1, openstack_rabbitmq_user) - signals.connect(openstack_vhost, openstack_rabbitmq_user, { - 'vhost_name', - }) - - print change.send_to_orchestration() - -main.add_command(deploy) - - -if __name__ == '__main__': - main() diff --git a/examples/openstack/README.md b/examples/openstack/README.md deleted file mode 100644 index c63eb50a..00000000 --- a/examples/openstack/README.md +++ /dev/null @@ -1,15 +0,0 @@ -Deploying simple two node OpenStack env. - -You need to run it from main solar directory. To prepare resources run: - -`python examples/openstack/openstack.py create_all` - -Then to start deployment: - -`solar changes stage -solar changes process -solar orch run-once last` - -To see the progress: - -`solar orch report` diff --git a/examples/openstack/openstack.py b/examples/openstack/openstack.py deleted file mode 100755 index cae94678..00000000 --- a/examples/openstack/openstack.py +++ /dev/null @@ -1,873 +0,0 @@ -#!/usr/bin/env python - -import click -import sys - -from solar.core import resource -from solar.core import signals -from solar.core import validation -from solar.core.resource import composer as cr -from solar import events as evapi -from solar.dblayer.model import ModelMeta - -PROFILE = False -#PROFILE = True - - -if PROFILE: - import StringIO - import cProfile - import pstats - - pr = cProfile.Profile() - pr.enable() - - -# TODO -# Resource for repository OR puppet apt-module in run.pp -# add-apt-repository cloud-archive:juno -# To discuss: install stuff in Docker container - -# NOTE -# No copy of manifests, pull from upstream (implemented in the librarian resource) -# Official puppet manifests, not fuel-library - - - -@click.group() -def main(): - pass - - -def prepare_nodes(nodes_count): - resources = cr.create('nodes', 'templates/nodes', {"count": nodes_count}) - nodes = resources.like('node') - resources = cr.create('nodes_network', 'templates/nodes_network', {"count": nodes_count}) - nodes_sdn = resources.like('node') - r = {} - - for node, node_sdn in zip(nodes, nodes_sdn): - r[node.name] = node - r[node_sdn.name] = node_sdn - - # LIBRARIAN - librarian = cr.create('librarian_{}'.format(node.name), 'resources/librarian', {})[0] - r[librarian.name] = librarian - - node.connect(librarian, {}) - - # NETWORKING - # TODO(bogdando) node's IPs should be populated as br-mgmt IPs, but now are hardcoded in templates - signals.connect(node, node_sdn) - node_sdn.connect_with_events(librarian, {'module': 'modules'}, {}) - evapi.add_dep(librarian.name, node_sdn.name, actions=('run', 'update')) - - signals.connect(node, node_sdn) - node_sdn.connect_with_events(librarian, {'module': 'modules'}, {}) - evapi.add_dep(librarian.name, node_sdn.name, actions=('run', 'update')) - - return r - -def setup_base(node, librarian): - # MARIADB - mariadb_service = cr.create('mariadb_service1', 'resources/mariadb_service', { - 'image': 'mariadb', - 'port': 3306 - })[0] - - node.connect(mariadb_service) - - # RABBIT - rabbitmq_service = cr.create('rabbitmq_service1', 'resources/rabbitmq_service/', { - 'management_port': 15672, - 'port': 5672, - })[0] - openstack_vhost = cr.create('openstack_vhost', 'resources/rabbitmq_vhost/', { - 'vhost_name': 'openstack' - })[0] - - openstack_rabbitmq_user = cr.create('openstack_rabbitmq_user', 'resources/rabbitmq_user/', { - 'user_name': 'openstack', - 'password': 'openstack_password' - })[0] - - node.connect(rabbitmq_service) - rabbitmq_service.connect_with_events(librarian, {'module': 'modules'}, {}) - evapi.add_dep(librarian.name, rabbitmq_service.name, actions=('run', 'update')) - rabbitmq_service.connect(openstack_vhost) - rabbitmq_service.connect(openstack_rabbitmq_user) - openstack_vhost.connect(openstack_rabbitmq_user, { - 'vhost_name', - }) - return {'mariadb_service': mariadb_service, - 'rabbitmq_service1': rabbitmq_service, - 'openstack_vhost': openstack_vhost, - 'openstack_rabbitmq_user': openstack_rabbitmq_user} - -def setup_keystone(node, librarian, mariadb_service, openstack_rabbitmq_user): - keystone_puppet = cr.create('keystone_puppet', 'resources/keystone_puppet', {})[0] - - keystone_puppet.connect_with_events(librarian, {'module': 'modules'}, {}) - evapi.add_dep(librarian.name, keystone_puppet.name, actions=('run', 'update')) - - evapi.add_dep(openstack_rabbitmq_user.name, keystone_puppet.name, actions=('run', 'update')) - keystone_db = cr.create('keystone_db', 'resources/mariadb_db/', { - 'db_name': 'keystone_db', - 'login_user': 'root' - })[0] - keystone_db_user = cr.create('keystone_db_user', 'resources/mariadb_user/', { - 'user_name': 'keystone', - 'user_password': 'keystone', - })[0] - keystone_service_endpoint = cr.create('keystone_service_endpoint', 'resources/keystone_service_endpoint', { - 'endpoint_name': 'keystone', - 'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2.0', - 'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2.0', - 'publicurl': 'http://{{public_ip}}:{{public_port}}/v2.0', - 'description': 'OpenStack Identity Service', - 'type': 'identity' - })[0] - - admin_tenant = cr.create('admin_tenant', 'resources/keystone_tenant', { - 'tenant_name': 'admin' - })[0] - admin_user = cr.create('admin_user', 'resources/keystone_user', { - 'user_name': 'admin', - 'user_password': 'admin' - })[0] - admin_role = cr.create('admin_role', 'resources/keystone_role', { - 'role_name': 'admin' - })[0] - services_tenant = cr.create('services_tenant', 'resources/keystone_tenant', { - 'tenant_name': 'services' - })[0] - admin_role_services = cr.create('admin_role_services', 'resources/keystone_role', { - 'role_name': 'admin' - })[0] - - node.connect(keystone_db) - node.connect(keystone_db_user) - node.connect(keystone_puppet) - mariadb_service.connect(keystone_db, { - 'port': 'login_port', - 'root_user': 'login_user', - 'root_password': 'login_password', - 'ip' : 'db_host', - }) - keystone_db.connect(keystone_db_user, { - 'db_name', - 'login_port', - 'login_user', - 'login_password', - 'db_host' - }) - - node.connect(keystone_service_endpoint) - keystone_puppet.connect(keystone_service_endpoint, { - 'admin_token': 'admin_token', - 'admin_port': ['admin_port', 'keystone_admin_port'], - 'ip': ['keystone_host', 'admin_ip', 'internal_ip', 'public_ip'], - 'port': ['internal_port', 'public_port'], - }) - - keystone_puppet.connect(admin_tenant) - keystone_puppet.connect(admin_tenant, { - 'admin_port': 'keystone_port', - 'ip': 'keystone_host' - }) - admin_tenant.connect(admin_user) - admin_user.connect(admin_role) - admin_tenant.connect(admin_role, { 'tenant_name' }) - - admin_user.connect(admin_role_services) - services_tenant.connect(admin_role_services, { 'tenant_name' }) - - keystone_puppet.connect(services_tenant) - keystone_puppet.connect(services_tenant, { - 'admin_port': 'keystone_port', - 'ip': 'keystone_host' - }) - - keystone_db.connect(keystone_puppet, { - 'db_name', - }) - keystone_db_user.connect(keystone_puppet, { - 'user_name': 'db_user', - 'user_password': 'db_password', - }) - mariadb_service.connect(keystone_puppet, { - 'ip': 'db_host', - 'port': 'db_port', - }) - return {'keystone_puppet': keystone_puppet, - 'keystone_db': keystone_db, - 'keystone_db_user': keystone_db_user, - 'keystone_service_endpoint': keystone_service_endpoint, - 'admin_tenant': admin_tenant, - 'admin_user': admin_user, - 'admin_role': admin_role, - 'services_tenant': services_tenant, - 'admin_role_services': admin_role_services, - } - -def setup_openrc(node, keystone_puppet, admin_user): - # OPENRC - openrc = cr.create('openrc_file', 'resources/openrc_file', {})[0] - - node.connect(openrc) - keystone_puppet.connect(openrc, {'ip': 'keystone_host', 'admin_port':'keystone_port'}) - admin_user.connect(openrc, {'user_name': 'user_name','user_password':'password', 'tenant_name': 'tenant'}) - return {'openrc_file' : openrc} - -def setup_neutron(node, librarian, rabbitmq_service, openstack_rabbitmq_user, openstack_vhost): - # NEUTRON - # Deploy chain neutron -> (plugins) -> neutron_server -> ( agents ) - neutron_puppet = cr.create('neutron_puppet', 'resources/neutron_puppet', { - 'core_plugin': 'neutron.plugins.ml2.plugin.Ml2Plugin' - })[0] - - node.connect(neutron_puppet) - - neutron_puppet.connect_with_events(librarian, {'module': 'modules'}, {}) - evapi.add_dep(librarian.name, neutron_puppet.name, actions=('run', 'update')) - - rabbitmq_service.connect(neutron_puppet, { - 'ip': 'rabbit_host', - 'port': 'rabbit_port' - }) - openstack_rabbitmq_user.connect(neutron_puppet, { - 'user_name': 'rabbit_user', - 'password': 'rabbit_password'}) - openstack_vhost.connect(neutron_puppet, { - 'vhost_name': 'rabbit_virtual_host'}) - return {'neutron_puppet': neutron_puppet} - -def setup_neutron_api(node, mariadb_service, admin_user, keystone_puppet, services_tenant, neutron_puppet): - # NEUTRON PLUGIN AND NEUTRON API (SERVER) - neutron_plugins_ml2 = cr.create('neutron_plugins_ml2', 'resources/neutron_plugins_ml2_puppet', {})[0] - node.connect(neutron_plugins_ml2) - - neutron_server_puppet = cr.create('neutron_server_puppet', 'resources/neutron_server_puppet', { - 'sync_db': True, - })[0] - evapi.add_dep(neutron_puppet.name, neutron_server_puppet.name, actions=('run',)) - evapi.add_dep(neutron_plugins_ml2.name, neutron_server_puppet.name, actions=('run',)) - evapi.add_dep(neutron_puppet.name, neutron_plugins_ml2.name, actions=('run',)) - - neutron_db = cr.create('neutron_db', 'resources/mariadb_db/', { - 'db_name': 'neutron_db', 'login_user': 'root'})[0] - neutron_db_user = cr.create('neutron_db_user', 'resources/mariadb_user/', { - 'user_name': 'neutron', 'user_password': 'neutron', 'login_user': 'root'})[0] - neutron_keystone_user = cr.create('neutron_keystone_user', 'resources/keystone_user', { - 'user_name': 'neutron', - 'user_password': 'neutron' - })[0] - neutron_keystone_role = cr.create('neutron_keystone_role', 'resources/keystone_role', { - 'role_name': 'admin' - })[0] - evapi.add_dep(neutron_keystone_role.name, neutron_server_puppet.name, actions=('run',)) - neutron_keystone_service_endpoint = cr.create('neutron_keystone_service_endpoint', 'resources/keystone_service_endpoint', { - 'endpoint_name': 'neutron', - 'adminurl': 'http://{{admin_ip}}:{{admin_port}}', - 'internalurl': 'http://{{internal_ip}}:{{internal_port}}', - 'publicurl': 'http://{{public_ip}}:{{public_port}}', - 'description': 'OpenStack Network Service', - 'type': 'network' - })[0] - - node.connect(neutron_db) - node.connect(neutron_db_user) - mariadb_service.connect(neutron_db, { - 'port': 'login_port', - 'root_password': 'login_password', - 'root_user': 'login_user', - 'ip' : 'db_host'}) - mariadb_service.connect(neutron_db_user, {'port': 'login_port', 'root_password': 'login_password'}) - neutron_db.connect(neutron_db_user, {'db_name', 'db_host'}) - neutron_db_user.connect(neutron_server_puppet, { - 'user_name':'db_user', - 'db_name':'db_name', - 'user_password':'db_password', - 'db_host' : 'db_host'}) - mariadb_service.connect(neutron_server_puppet, { - 'port': 'db_port', - 'ip' : 'db_host'}) - node.connect(neutron_server_puppet) - admin_user.connect(neutron_server_puppet, { - 'user_name': 'auth_user', - 'user_password': 'auth_password', - 'tenant_name': 'auth_tenant' - }) - keystone_puppet.connect(neutron_server_puppet, { - 'ip': 'auth_host', - 'port': 'auth_port' - }) - services_tenant.connect(neutron_keystone_user) - neutron_keystone_user.connect(neutron_keystone_role) - keystone_puppet.connect(neutron_keystone_service_endpoint, { - 'ip': ['ip', 'keystone_host'], - 'admin_port': 'keystone_admin_port', - 'admin_token': 'admin_token', - }) - neutron_puppet.connect(neutron_keystone_service_endpoint, { - 'ip': ['admin_ip', 'internal_ip', 'public_ip'], - 'bind_port': ['admin_port', 'internal_port', 'public_port'], - }) - return {'neutron_server_puppet': neutron_server_puppet, - 'neutron_plugins_ml2': neutron_plugins_ml2, - 'neutron_db': neutron_db, - 'neutron_db_user': neutron_db_user, - 'neutron_keystone_user': neutron_keystone_user, - 'neutron_keystone_role': neutron_keystone_role, - 'neutron_keystone_service_endpoint': neutron_keystone_service_endpoint} - -def setup_neutron_agent(node, neutron_server_puppet): - # NEUTRON ML2 PLUGIN & ML2-OVS AGENT WITH GRE - neutron_agents_ml2 = cr.create('neutron_agents_ml2', 'resources/neutron_agents_ml2_ovs_puppet', { - # TODO(bogdando) these should come from the node network resource - 'enable_tunneling': True, - 'tunnel_types': ['gre'], - 'local_ip': '10.1.0.13' # should be the IP addr of the br-mesh int. - })[0] - node.connect(neutron_agents_ml2) - evapi.add_dep(neutron_server_puppet.name, neutron_agents_ml2.name, actions=('run',)) - - # NEUTRON DHCP, L3, metadata agents - neutron_agents_dhcp = cr.create('neutron_agents_dhcp', 'resources/neutron_agents_dhcp_puppet', {})[0] - node.connect(neutron_agents_dhcp) - evapi.add_dep(neutron_server_puppet.name, neutron_agents_dhcp.name, actions=('run',)) - - neutron_agents_l3 = cr.create('neutron_agents_l3', 'resources/neutron_agents_l3_puppet', { - # TODO(bogdando) these should come from the node network resource - 'metadata_port': 8775, - 'external_network_bridge': 'br-floating', - })[0] - node.connect(neutron_agents_l3) - evapi.add_dep(neutron_server_puppet.name, neutron_agents_l3.name, actions=('run',)) - - neutron_agents_metadata = cr.create('neutron_agents_metadata', 'resources/neutron_agents_metadata_puppet', { - 'sh2ared_secret': 'secret', - })[0] - node.connect(neutron_agents_metadata) - neutron_server_puppet.connect(neutron_agents_metadata, { - 'auth_host', 'auth_port', 'auth_password', - 'auth_tenant', 'auth_user', - }) - return {'neutron_agents_ml2': neutron_agents_ml2, - 'neutron_agents_dhcp': neutron_agents_dhcp, - 'neutron_agents_metadata': neutron_agents_metadata} - -def setup_neutron_compute(node, librarian, neutron_puppet, neutron_server_puppet): - # NEUTRON FOR COMPUTE (node1) - # Deploy chain neutron -> (plugins) -> ( agents ) - name = node.name - neutron_puppet2 = cr.create('neutron_puppet_{}'.format(name), 'resources/neutron_puppet', {})[0] - - neutron_puppet2.connect_with_events(librarian, {'module': 'modules'}, {}) - evapi.add_dep(librarian.name, neutron_puppet2.name, actions=('run', 'update')) - dep = evapi.Dep(librarian.name, 'update', state='SUCESS', - child=neutron_puppet2.name, child_action='run') - evapi.add_event(dep) - - node.connect(neutron_puppet2) - neutron_puppet.connect(neutron_puppet2, { - 'rabbit_host', 'rabbit_port', - 'rabbit_user', 'rabbit_password', - 'rabbit_virtual_host', - 'package_ensure', 'core_plugin', - }) - - # NEUTRON OVS PLUGIN & AGENT WITH GRE FOR COMPUTE (node1) - neutron_plugins_ml22 = cr.create('neutron_plugins_ml_{}'.format(name), 'resources/neutron_plugins_ml2_puppet', {})[0] - node.connect(neutron_plugins_ml22) - evapi.add_dep(neutron_puppet2.name, neutron_plugins_ml22.name, actions=('run',)) - evapi.add_dep(neutron_server_puppet.name, neutron_plugins_ml22.name, actions=('run',)) - - neutron_agents_ml22 = cr.create('neutron_agents_ml_{}'.format(name), 'resources/neutron_agents_ml2_ovs_puppet', { - # TODO(bogdando) these should come from the node network resource - 'enable_tunneling': True, - 'tunnel_types': ['gre'], - 'local_ip': '10.1.0.14' # Should be the IP addr of the br-mesh int. - })[0] - node.connect(neutron_agents_ml22) - evapi.add_dep(neutron_puppet2.name, neutron_agents_ml22.name, actions=('run',)) - evapi.add_dep(neutron_server_puppet.name, neutron_agents_ml22.name, actions=('run',)) - - return {'neutron_puppet2': neutron_puppet2, - 'neutron_plugins_ml22': neutron_plugins_ml22, - 'neutron_agents_ml22': neutron_agents_ml22} - -def setup_cinder(node, librarian, rabbitmq_service, mariadb_service, keystone_puppet, admin_user, openstack_vhost, openstack_rabbitmq_user, services_tenant): - # CINDER - cinder_puppet = cr.create('cinder_puppet', 'resources/cinder_puppet', {})[0] - cinder_db = cr.create('cinder_db', 'resources/mariadb_db/', { - 'db_name': 'cinder_db', 'login_user': 'root'})[0] - cinder_db_user = cr.create('cinder_db_user', 'resources/mariadb_user/', { - 'user_name': 'cinder', 'user_password': 'cinder', 'login_user': 'root'})[0] - cinder_keystone_user = cr.create('cinder_keystone_user', 'resources/keystone_user', { - 'user_name': 'cinder', 'user_password': 'cinder'})[0] - cinder_keystone_role = cr.create('cinder_keystone_role', 'resources/keystone_role', { - 'role_name': 'admin'})[0] - cinder_keystone_service_endpoint = cr.create( - 'cinder_keystone_service_endpoint', - 'resources/keystone_service_endpoint', { - 'endpoint_name': 'cinder', - 'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2/%(tenant_id)s', - 'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2/%(tenant_id)s', - 'publicurl': 'http://{{public_ip}}:{{public_port}}/v2/%(tenant_id)s', - 'description': 'OpenStack Block Storage Service', 'type': 'volumev2'})[0] - - node.connect(cinder_puppet) - cinder_puppet.connect_with_events(librarian, {'module': 'modules'}, {}) - evapi.add_dep(librarian.name, cinder_puppet.name, actions=('run', 'update')) - - node.connect(cinder_db) - node.connect(cinder_db_user) - rabbitmq_service.connect(cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'}) - admin_user.connect(cinder_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'}) #? - openstack_vhost.connect(cinder_puppet, {'vhost_name': 'rabbit_virtual_host'}) - openstack_rabbitmq_user.connect(cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'}) - mariadb_service.connect(cinder_db, { - 'port': 'login_port', - 'root_password': 'login_password', - 'root_user': 'login_user', - 'ip' : 'db_host'}) - mariadb_service.connect(cinder_db_user, {'port': 'login_port', 'root_password': 'login_password'}) - cinder_db.connect(cinder_db_user, {'db_name', 'db_host'}) - cinder_db_user.connect(cinder_puppet, { - 'user_name':'db_user', - 'db_name':'db_name', - 'user_password':'db_password'}) - mariadb_service.connect(cinder_puppet, { - 'port': 'db_port', - 'ip': 'db_host'}) - keystone_puppet.connect(cinder_puppet, {'ip': 'keystone_host', 'admin_port': 'keystone_port'}) #or non admin port? - services_tenant.connect(cinder_keystone_user) - cinder_keystone_user.connect(cinder_keystone_role) - cinder_keystone_user.connect(cinder_puppet, {'user_name': 'keystone_user', 'tenant_name': 'keystone_tenant', 'user_password': 'keystone_password'}) - mariadb_service.connect(cinder_puppet, {'ip':'ip'}) - cinder_puppet.connect(cinder_keystone_service_endpoint, { - 'ip': ['ip', 'keystone_host', 'admin_ip', 'internal_ip', 'public_ip'], - 'port': ['admin_port', 'internal_port', 'public_port'],}) - keystone_puppet.connect(cinder_keystone_service_endpoint, { - 'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'}) - - # CINDER GLANCE - # Deploy chain: cinder_puppet -> cinder_glance -> ( cinder_api, cinder_scheduler, cinder_volume ) - cinder_glance_puppet = cr.create('cinder_glance_puppet', 'resources/cinder_glance_puppet', {})[0] - node.connect(cinder_glance_puppet) - evapi.add_dep(cinder_puppet.name, cinder_glance_puppet.name, actions=('run',)) - - return {'cinder_puppet': cinder_puppet, - 'cinder_db': cinder_db, - 'cinder_db_user': cinder_db_user, - 'cinder_keystone_user': cinder_keystone_user, - 'cinder_keystone_role': cinder_keystone_role, - 'cinder_keystone_service_endpoint': cinder_keystone_service_endpoint, - 'cinder_glance_puppet': cinder_glance_puppet} - -def setup_cinder_api(node, cinder_puppet): - # CINDER API - cinder_api_puppet = cr.create('cinder_api_puppet', 'resources/cinder_api_puppet', {})[0] - node.connect(cinder_api_puppet) - cinder_puppet.connect(cinder_api_puppet, { - 'keystone_password', 'keystone_tenant', 'keystone_user'}) - cinder_puppet.connect(cinder_api_puppet, { - 'keystone_host': 'keystone_auth_host', - 'keystone_port': 'keystone_auth_port'}) - evapi.add_react(cinder_puppet.name, cinder_api_puppet.name, actions=('update',)) - return {'cinder_api_puppet': cinder_api_puppet} - -def setup_cinder_scheduler(node, cinder_puppet): - # CINDER SCHEDULER - cinder_scheduler_puppet = cr.create('cinder_scheduler_puppet', 'resources/cinder_scheduler_puppet', {})[0] - node.connect(cinder_scheduler_puppet) - cinder_puppet.connect(cinder_scheduler_puppet) - evapi.add_react(cinder_puppet.name, cinder_scheduler_puppet.name, actions=('update',)) - return {'cinder_scheduler_puppet': cinder_scheduler_puppet} - -def setup_cinder_volume(node, cinder_puppet): - # CINDER VOLUME - cinder_volume = cr.create('cinder_volume_{}'.format(node.name), 'resources/volume_group', - {'path': '/root/cinder.img', 'volume_name': 'cinder-volume'})[0] - node.connect(cinder_volume) - - cinder_volume_puppet = cr.create('cinder_volume_puppet', 'resources/cinder_volume_puppet', {})[0] - node.connect(cinder_volume_puppet) - cinder_puppet.connect(cinder_volume_puppet) - evapi.add_react(cinder_puppet.name, cinder_volume_puppet.name, actions=('update',)) - cinder_volume.connect(cinder_volume_puppet, {'volume_name': 'volume_group'}) - return {'cinder_volume_puppet': cinder_volume_puppet} - -def setup_nova(node, librarian, mariadb_service, rabbitmq_service, admin_user, openstack_vhost, services_tenant, keystone_puppet, openstack_rabbitmq_user): - # NOVA - nova_puppet = cr.create('nova_puppet', 'resources/nova_puppet', {})[0] - nova_db = cr.create('nova_db', 'resources/mariadb_db/', { - 'db_name': 'nova_db', - 'login_user': 'root'})[0] - nova_db_user = cr.create('nova_db_user', 'resources/mariadb_user/', { - 'user_name': 'nova', - 'user_password': 'nova', - 'login_user': 'root'})[0] - nova_keystone_user = cr.create('nova_keystone_user', 'resources/keystone_user', { - 'user_name': 'nova', - 'user_password': 'nova'})[0] - nova_keystone_role = cr.create('nova_keystone_role', 'resources/keystone_role', { - 'role_name': 'admin'})[0] - nova_keystone_service_endpoint = cr.create('nova_keystone_service_endpoint', 'resources/keystone_service_endpoint', { - 'endpoint_name': 'nova', - 'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2/%(tenant_id)s', - 'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2/%(tenant_id)s', - 'publicurl': 'http://{{public_ip}}:{{public_port}}/v2/%(tenant_id)s', - 'description': 'OpenStack Compute Service', - 'type': 'compute'})[0] - - node.connect(nova_puppet) - nova_puppet.connect_with_events(librarian, {'module': 'modules'}, {}) - evapi.add_dep(librarian.name, nova_puppet.name, actions=('run', 'update')) - - node.connect(nova_db) - node.connect(nova_db_user) - mariadb_service.connect(nova_db, { - 'port': 'login_port', - 'root_password': 'login_password', - 'root_user': 'login_user', - 'ip' : 'db_host'}) - mariadb_service.connect(nova_db_user, { - 'port': 'login_port', - 'root_password': 'login_password'}) - admin_user.connect(nova_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'}) #? - openstack_vhost.connect(nova_puppet, {'vhost_name': 'rabbit_virtual_host'}) - nova_db.connect(nova_db_user, {'db_name', 'db_host'}) - services_tenant.connect(nova_keystone_user) - nova_keystone_user.connect(nova_keystone_role) - keystone_puppet.connect(nova_puppet, { - 'ip': 'keystone_host', - 'admin_port': 'keystone_port'}) - nova_keystone_user.connect(nova_puppet, { - 'user_name': 'keystone_user', - 'tenant_name': 'keystone_tenant', - 'user_password': 'keystone_password'}) - rabbitmq_service.connect(nova_puppet, { - 'ip': 'rabbit_host', 'port': 'rabbit_port'}) - openstack_rabbitmq_user.connect(nova_puppet, { - 'user_name': 'rabbit_userid', - 'password': 'rabbit_password'}) - keystone_puppet.connect(nova_keystone_service_endpoint, { - 'ip': 'keystone_host', - 'admin_port': 'keystone_admin_port', - 'admin_token': 'admin_token'}) - mariadb_service.connect(nova_puppet, { - 'ip':'db_host', - 'port': 'db_port'}) - nova_db_user.connect(nova_puppet, { - 'user_name':'db_user', - 'db_name':'db_name', - 'user_password':'db_password'}) - nova_puppet.connect(nova_keystone_service_endpoint, { - 'ip': ['ip', 'keystone_host', 'public_ip', 'internal_ip', 'admin_ip'], - 'port': ['admin_port', 'internal_port', 'public_port'], - }) - return {'nova_puppet': nova_puppet, - 'nova_db': nova_db, - 'nova_db_user': nova_db_user, - 'nova_keystone_user': nova_keystone_user, - 'nova_keystone_role': nova_keystone_role, - 'nova_keystone_service_endpoint': nova_keystone_service_endpoint} - -def setup_nova_api(node, nova_puppet, neutron_agents_metadata): - # NOVA API - nova_api_puppet = cr.create('nova_api_puppet', 'resources/nova_api_puppet', {})[0] - node.connect(nova_api_puppet) - nova_puppet.connect(nova_api_puppet, { - 'keystone_tenant': 'admin_tenant_name', - 'keystone_user': 'admin_user', - 'keystone_password': 'admin_password', - 'keystone_host': 'auth_host', - 'keystone_port': 'auth_port'}) - evapi.add_react(nova_puppet.name, nova_api_puppet.name, actions=('update',)) - nova_api_puppet.connect(neutron_agents_metadata, {'ip': 'metadata_ip'}) - return {'nova_api_puppet': nova_api_puppet} - -def setup_nova_conductor(node, nova_puppet, nova_api_puppet): - # NOVA CONDUCTOR - nova_conductor_puppet = cr.create('nova_conductor_puppet', 'resources/nova_conductor_puppet', {})[0] - node.connect(nova_conductor_puppet) - nova_puppet.connect(nova_conductor_puppet) - evapi.add_dep(nova_api_puppet.name, nova_conductor_puppet.name, actions=('run',)) - evapi.add_react(nova_puppet.name, nova_conductor_puppet.name, actions=('update',)) - return {'nova_conductor': nova_conductor_puppet} - -def setup_nova_scheduler(node, nova_puppet, nova_api_puppet): - # NOVA SCHEDULER - # NOTE(bogdando) Generic service is used. Package and service names for Ubuntu case - # come from https://github.com/openstack/puppet-nova/blob/5.1.0/manifests/params.pp - nova_scheduler_puppet = cr.create('nova_scheduler_puppet', 'resources/nova_generic_service_puppet', { - 'title' : 'scheduler', 'package_name': 'nova-scheduler', 'service_name': 'nova-scheduler', - })[0] - node.connect(nova_scheduler_puppet) - evapi.add_dep(nova_puppet.name, nova_scheduler_puppet.name, actions=('run',)) - evapi.add_dep(nova_api_puppet.name, nova_scheduler_puppet.name, actions=('run',)) - evapi.add_react(nova_puppet.name, nova_scheduler_puppet.name, actions=('update',)) - return {'nova_scheduler_puppet': nova_scheduler_puppet} - -def setup_nova_compute(node, librarian, nova_puppet, nova_api_puppet, neutron_server_puppet, neutron_keystone_service_endpoint, glance_api_puppet): - # NOVA COMPUTE - # Deploy chain (nova, node_networking(TODO)) -> (nova_compute_libvirt, nova_neutron) -> nova_compute - name = node.name - nova_compute_puppet = cr.create('nova_compute_puppet_{}'.format(name), 'resources/nova_compute_puppet', {})[0] - # TODO (bogdando) figure out how to use it for multiple glance api servers - nova_puppet2 = cr.create('nova_puppet_{}'.format(name), 'resources/nova_puppet', { - 'glance_api_servers': '{{glance_api_servers_host}}:{{glance_api_servers_port}}' - })[0] - nova_puppet.connect(nova_puppet2, { - 'ensure_package', 'rabbit_host', - 'rabbit_password', 'rabbit_port', 'rabbit_userid', - 'rabbit_virtual_host', 'db_user', 'db_password', - 'db_name', 'db_host', 'keystone_password', - 'keystone_port', 'keystone_host', 'keystone_tenant', - 'keystone_user', - }) - # TODO(bogdando): Make a connection for nova_puppet2.glance_api_servers = "glance_api_puppet.ip:glance_api_puppet.bind_port" - node.connect(nova_puppet2) - nova_puppet2.connect_with_events(librarian, {'module': 'modules'}, {}) - evapi.add_dep(librarian.name, nova_puppet2.name, actions=('run', 'update')) - dep = evapi.Dep(librarian.name, 'update', state='SUCESS', - child=nova_puppet2.name, child_action='run') - evapi.add_event(dep) - - node.connect(nova_compute_puppet) - evapi.add_dep(nova_puppet2.name, nova_compute_puppet.name, actions=('run',)) - evapi.add_dep(nova_api_puppet.name, nova_compute_puppet.name, actions=('run',)) - evapi.add_react(nova_puppet2.name, nova_compute_puppet.name, actions=('run', 'update')) - - # NOVA COMPUTE LIBVIRT, NOVA_NEUTRON - # NOTE(bogdando): changes nova config, so should notify nova compute service - nova_compute_libvirt_puppet = cr.create('nova_compute_libvirt_puppet_{}'.format(name), 'resources/nova_compute_libvirt_puppet', {})[0] - node.connect(nova_compute_libvirt_puppet) - evapi.add_dep(nova_puppet2.name, nova_compute_libvirt_puppet.name, actions=('run',)) - evapi.add_dep(nova_api_puppet.name, nova_compute_libvirt_puppet.name, actions=('run',)) - - # compute configuration for neutron, use http auth/endpoint protocols, keystone v2 auth hardcoded for the resource - nova_neutron_puppet = cr.create('nova_neutron_puppet_{}'.format(name), 'resources/nova_neutron_puppet', {})[0] - node.connect(nova_neutron_puppet) - evapi.add_dep(nova_puppet2.name, nova_neutron_puppet.name, actions=('run',)) - evapi.add_dep(nova_api_puppet.name, nova_neutron_puppet.name, actions=('run',)) - neutron_server_puppet.connect(nova_neutron_puppet, { - 'auth_password': 'neutron_admin_password', - 'auth_user': 'neutron_admin_username', - 'auth_type': 'neutron_auth_strategy', - 'auth_host': 'auth_host', 'auth_port': 'auth_port', - 'auth_protocol': 'auth_protocol', - }) - neutron_keystone_service_endpoint.connect(nova_neutron_puppet, { - 'internal_ip':'neutron_endpoint_host', - 'internal_port':'neutron_endpoint_port', - }) - # Update glance_api_service for nova compute - glance_api_puppet.connect(nova_puppet2, { - 'ip': 'glance_api_servers_host', - 'bind_port': 'glance_api_servers_port' - }) - - # signals.connect(keystone_puppet, nova_network_puppet, {'ip': 'keystone_host', 'port': 'keystone_port'}) - # signals.connect(keystone_puppet, nova_keystone_service_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_port', 'admin_token': 'admin_token'}) - # signals.connect(rabbitmq_service1, nova_network_puppet, {'ip': 'rabbitmq_host', 'port': 'rabbitmq_port'}) - return {'nova_compute_puppet': nova_compute_puppet, - 'nova_puppet2': nova_puppet2, - 'nova_compute_libvirt_puppet': nova_compute_libvirt_puppet, - 'nova_neutron_puppet': nova_neutron_puppet, - 'neutron_server_puppet': neutron_server_puppet} - -def setup_glance_api(node, librarian, mariadb_service, admin_user, keystone_puppet, services_tenant, cinder_glance_puppet): - # GLANCE (base and API) - glance_api_puppet = cr.create('glance_api_puppet', 'resources/glance_puppet', {})[0] - glance_db_user = cr.create('glance_db_user', 'resources/mariadb_user/', { - 'user_name': 'glance', 'user_password': 'glance', 'login_user': 'root'})[0] - glance_db = cr.create('glance_db', 'resources/mariadb_db/', { - 'db_name': 'glance', 'login_user': 'root'})[0] - glance_keystone_user = cr.create('glance_keystone_user', 'resources/keystone_user', { - 'user_name': 'glance', 'user_password': 'glance123'})[0] - glance_keystone_role = cr.create('glance_keystone_role', 'resources/keystone_role', { - 'role_name': 'admin'})[0] - glance_keystone_service_endpoint = cr.create( - 'glance_keystone_service_endpoint', - 'resources/keystone_service_endpoint', { - 'endpoint_name': 'glance', - 'adminurl': 'http://{{admin_ip}}:{{admin_port}}', - 'internalurl': 'http://{{internal_ip}}:{{internal_port}}', - 'publicurl': 'http://{{public_ip}}:{{public_port}}', - 'description': 'OpenStack Image Service', 'type': 'image'})[0] - - node.connect(glance_api_puppet) - glance_api_puppet.connect_with_events(librarian, {'module': 'modules'}, {}) - evapi.add_dep(librarian.name, glance_api_puppet.name, actions=('run', 'update')) - - node.connect(glance_db) - node.connect(glance_db_user) - admin_user.connect(glance_api_puppet, { - 'user_name': 'keystone_user', 'user_password': 'keystone_password', - 'tenant_name': 'keystone_tenant'}) #? - mariadb_service.connect(glance_db, { - 'port': 'login_port', - 'root_password': 'login_password', - 'root_user': 'login_user', - 'ip' : 'db_host'}) - mariadb_service.connect(glance_db_user, {'port': 'login_port', 'root_password': 'login_password'}) - glance_db.connect(glance_db_user, {'db_name', 'db_host'}) - glance_db_user.connect(glance_api_puppet, { - 'user_name':'db_user', - 'db_name':'db_name', - 'user_password':'db_password', - 'db_host' : 'db_host'}) - mariadb_service.connect(glance_api_puppet,{ - 'port': 'db_port', - 'ip': 'db_host'}) - keystone_puppet.connect(glance_api_puppet, {'ip': 'keystone_host', 'admin_port': 'keystone_port'}) #or non admin port? - services_tenant.connect(glance_keystone_user) - glance_keystone_user.connect(glance_keystone_role) - glance_keystone_user.connect(glance_api_puppet, { - 'user_name': 'keystone_user', 'tenant_name': 'keystone_tenant', - 'user_password': 'keystone_password'}) - mariadb_service.connect(glance_api_puppet, {'ip':'ip'}) - glance_api_puppet.connect(glance_keystone_service_endpoint, { - 'ip': ['ip', 'keystone_host', 'admin_ip', 'internal_ip', 'public_ip'], - 'bind_port': ['admin_port', 'internal_port', 'public_port'],}) - keystone_puppet.connect(glance_keystone_service_endpoint, { - 'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'}) - - # Update glance_api_service for cinder - glance_api_puppet.connect(cinder_glance_puppet, { - 'ip': 'glance_api_servers_host', - 'bind_port': 'glance_api_servers_port' - }) - return {'glance_api_puppet': glance_api_puppet, - 'glance_db_user': glance_db_user, - 'glance_db': glance_db, - 'glance_keystone_user': glance_keystone_user, - 'glance_keystone_role': glance_keystone_role, - 'glance_keystone_service_endpoint': glance_keystone_service_endpoint} - -def setup_glance_registry(node, glance_api_puppet): - # GLANCE REGISTRY - glance_registry_puppet = cr.create('glance_registry_puppet', 'resources/glance_registry_puppet', {})[0] - node.connect(glance_registry_puppet) - glance_api_puppet.connect(glance_registry_puppet) - evapi.add_react(glance_api_puppet.name, glance_registry_puppet.name, actions=('update',)) - # API and registry should not listen same ports - # should not use the same log destination and a pipeline, - # so disconnect them and restore the defaults - signals.disconnect_receiver_by_input(glance_registry_puppet, 'bind_port') - signals.disconnect_receiver_by_input(glance_registry_puppet, 'log_file') - signals.disconnect_receiver_by_input(glance_registry_puppet, 'pipeline') - glance_registry_puppet.update({ - 'bind_port': 9191, - 'log_file': '/var/log/glance/registry.log', - 'pipeline': 'keystone', - }) - return {'glance_registry_puppet': glance_registry_puppet} - - -def validate(): - has_errors = False - for r in locals().values(): - if not isinstance(r, resource.Resource): - continue - - print 'Validating {}'.format(r.name) - errors = validation.validate_resource(r) - if errors: - has_errors = True - print 'ERROR: %s: %s' % (r.name, errors) - - if has_errors: - sys.exit(1) - - -def create_controller(node): - r = {r.name: r for r in resource.load_all()} - librarian_node = 'librarian_{}'.format(node) - - r.update(setup_base(r[node], r[librarian_node])) - r.update(setup_keystone(r[node], r[librarian_node], - r['mariadb_service'], r['openstack_rabbitmq_user'])) - r.update(setup_openrc(r[node], r['keystone_puppet'], r['admin_user'])) - r.update(setup_neutron(r[node], r['librarian_{}'.format(node)], r['rabbitmq_service1'], - r['openstack_rabbitmq_user'], r['openstack_vhost'])) - r.update(setup_neutron_api(r[node], r['mariadb_service'], r['admin_user'], - r['keystone_puppet'], r['services_tenant'], r['neutron_puppet'])) - r.update(setup_neutron_agent(r[node], r['neutron_server_puppet'])) - r.update(setup_cinder(r[node], r['librarian_{}'.format(node)], r['rabbitmq_service1'], - r['mariadb_service'], r['keystone_puppet'], r['admin_user'], - r['openstack_vhost'], r['openstack_rabbitmq_user'], r['services_tenant'])) - r.update(setup_cinder_api(r[node], r['cinder_puppet'])) - r.update(setup_cinder_scheduler(r[node], r['cinder_puppet'])) - r.update(setup_cinder_volume(r[node], r['cinder_puppet'])) - r.update(setup_nova(r[node], r['librarian_{}'.format(node)], r['mariadb_service'], r['rabbitmq_service1'], - r['admin_user'], r['openstack_vhost'], r['services_tenant'], - r['keystone_puppet'], r['openstack_rabbitmq_user'])) - r.update(setup_nova_api(r[node], r['nova_puppet'], r['neutron_agents_metadata'])) - r.update(setup_nova_conductor(r[node], r['nova_puppet'], r['nova_api_puppet'])) - r.update(setup_nova_scheduler(r[node], r['nova_puppet'], r['nova_api_puppet'])) - r.update(setup_glance_api(r[node], r['librarian_{}'.format(node)], r['mariadb_service'], r['admin_user'], - r['keystone_puppet'], r['services_tenant'], - r['cinder_glance_puppet'])) - r.update(setup_glance_registry(r[node], r['glance_api_puppet'])) - return r - -def create_compute(node): - r = {r.name: r for r in resource.load_all()} - librarian_node = 'librarian_{}'.format(node) - res = {} - res.update(setup_neutron_compute(r[node], r[librarian_node], r['neutron_puppet'], r['neutron_server_puppet'])) - res.update(setup_nova_compute(r[node], r[librarian_node], r['nova_puppet'], r['nova_api_puppet'], - r['neutron_server_puppet'], r['neutron_keystone_service_endpoint'], r['glance_api_puppet'])) - return r - -@click.command() -def create_all(): - ModelMeta.remove_all() - r = prepare_nodes(2) - r.update(create_controller('node1')) - r.update(create_compute('node2')) - print '\n'.join(r.keys()) - -@click.command() -@click.argument('nodes_count') -def prepare(nodes_count): - r = prepare_nodes(nodes_count) - print '\n'.join(r.keys()) - -@click.command() -@click.argument('node') -def add_compute(node): - r = create_compute(node) - print '\n'.join(r.keys()) - -@click.command() -@click.argument('node') -def add_controller(node): - r = create_controller(node) - print '\n'.join(r.keys()) - -@click.command() -def clear(): - ModelMeta.remove_all() - - -if __name__ == '__main__': - main.add_command(create_all) - main.add_command(prepare) - main.add_command(add_controller) - main.add_command(add_compute) - main.add_command(clear) - main() - - if PROFILE: - pr.disable() - s = StringIO.StringIO() - sortby = 'cumulative' - ps = pstats.Stats(pr, stream=s).sort_stats(sortby) - ps.print_stats() - print s.getvalue() - sys.exit(0) diff --git a/examples/openstack/rabbitmq_user.yaml b/examples/openstack/rabbitmq_user.yaml deleted file mode 100644 index fd20b553..00000000 --- a/examples/openstack/rabbitmq_user.yaml +++ /dev/null @@ -1,22 +0,0 @@ -id: primary_controller - -resources: - - id: rabbit_user - from: resources/rabbitmq_user - location: {{node}} - input: - user_name: {{user_name}} - password: {{password}} - vhost_name: {{vhost_res}}::vhost_name - -updates: - - id: {{for}} - input: - {{for_user}}: rabbit_user::user_name - {{for_password}}: rabbit_user::password - -events: - - type: depends_on - parent_action: rabbit_user.run - state: success - child_action: {{for}}.update diff --git a/examples/provisioning/provision.py b/examples/provisioning/provision.py deleted file mode 100755 index 2abd7120..00000000 --- a/examples/provisioning/provision.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python -import requests - -from solar.core.resource import composer as cr -from solar.events.api import add_event -from solar.events.controls import React - - -discovery_service = 'http://0.0.0.0:8881' -bareon_partitioning = 'http://0.0.0.0:9322/v1/nodes/{0}/partitioning' -bareon_repos = 'http://0.0.0.0:9322/v1/nodes/{0}/repos' -bareon_sync = 'http://0.0.0.0:9322/v1/actions/sync_all' - - -class NodeAdapter(dict): - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - raise AttributeError(name) - - @property - def node_id(self): - return self['id'] - - @property - def partitioning(self): - return requests.get(bareon_partitioning.format(self['id'])).json() - - @property - def repos(self): - return requests.get(bareon_repos.format(self['id'])).json() - - -# Sync hw info about nodes from discovery service into bareon-api -requests.post(bareon_sync) - -# Get list of nodes from discovery service -nodes_list = requests.get(discovery_service).json() - -# Create slave node resources -node_resources = cr.create('nodes', 'templates/not_provisioned_nodes', - {'nodes': nodes_list}) - -# Get master node -master_node = filter(lambda n: n.name == 'node_master', node_resources)[0] - -with open('/vagrant/tmp/keys/ssh_public') as fp: - master_key = fp.read().strip() - -# Dnsmasq resources -for node in nodes_list: - node = NodeAdapter(node) - node_resource = next(n for n in node_resources - if n.name.endswith('node_{0}'.format(node.node_id))) - - node_resource.update( - { - 'partitioning': node.partitioning, - 'master_key': master_key, - 'repos': node.repos, - } - ) - - dnsmasq = cr.create('dnsmasq_{0}'.format(node.node_id), - 'resources/dnsmasq', {})[0] - master_node.connect(dnsmasq) - node_resource.connect(dnsmasq, {'admin_mac': 'exclude_mac_pxe'}) - - event = React(node_resource.name, 'run', 'success', node_resource.name, - 'provision') - add_event(event) - event = React(node_resource.name, 'provision', 'success', dnsmasq.name, - 'exclude_mac_pxe') - add_event(event) - event = React(dnsmasq.name, 'exclude_mac_pxe', 'success', - node_resource.name, 'reboot') - add_event(event) diff --git a/examples/provisioning/provision.sh b/examples/provisioning/provision.sh deleted file mode 100755 index d923fbef..00000000 --- a/examples/provisioning/provision.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -set -eux - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -# Remove generated pxe exclude files -sudo rm -f /etc/dnsmasq.d/no_pxe_*.conf -sudo service dnsmasq restart - -solar resource clear_all -python "${DIR}"/provision.py - -solar changes stage -solar changes process -solar orch run-once last -watch --color -n1 'solar orch report last' diff --git a/examples/riak/README.md b/examples/riak/README.md deleted file mode 100644 index edc03643..00000000 --- a/examples/riak/README.md +++ /dev/null @@ -1,47 +0,0 @@ -Example of 3 node riak cluster. - -At first run: - -`python examples/riak/riaks.py deploy` - -It will prepare riak nodes etc. - -Then you can continue with standard solar things: - -``` -solar changes stage -d -solar changes process -solar orch run-once last -watch -n 1 solar orch report last -``` - -Wait until all actions have state `SUCCESS` -After that you can add HAProxy on each node: - -`python examples/riak/riaks.py add_haproxies` - -Then again normal solar stuff - -``` -solar changes stage -d -solar changes process -solar orch run-once last -watch -n 1 solar orch report last -``` - - -Wait until all actions have state `SUCCESS` -After that you have basic 3 node riak cluster running. - -You can also modify riak http port by: - -`solar resource update riak_service1 riak_port_http=18100` - -And then again standard stuff: - -``` -solar changes stage -d -solar changes process -solar orch run-once last -watch -n 1 solar orch report last -``` diff --git a/examples/riak/haproxy_riak_config.yaml b/examples/riak/haproxy_riak_config.yaml deleted file mode 100644 index 34c49bbd..00000000 --- a/examples/riak/haproxy_riak_config.yaml +++ /dev/null @@ -1,50 +0,0 @@ -id: haproxy_riak_config - -resources: - - id: haproxy_riak_config_http - from: resources/haproxy_service_config - tags: ['service=riak', 'protocol=http'] - input: - listen_port: #{http_listen_port}# - protocol: 'http' - name: 'riak_haproxy_http' - backends:server: - #% for riak in riaks %# - - #{riak}#::riak_hostname - #% endfor %# - backends:port: - #% for riak in riaks %# - - #{riak}#::riak_port_http - #% endfor %# - - - id: haproxy_riak_config_pb - from: resources/haproxy_service_config - tags: ['service=riak', 'protocol=tcp'] - input: - listen_port: #{pb_listen_port}# - protocol: 'tcp' - name: 'riak_haproxy_pb' - backends:server: - #% for riak in riaks %# - - #{riak}#::riak_hostname - #% endfor %# - backends:port: - #% for riak in riaks %# - - #{riak}#::riak_port_pb - #% endfor %# - -updates: - - with_tags: ['resource=haproxy_config'] - input: - config:protocol: - - haproxy_riak_config_http::protocol - - haproxy_riak_config_pb::protocol - config:listen_port: - - haproxy_riak_config_http::listen_port - - haproxy_riak_config_pb::listen_port - config:name: - - haproxy_riak_config_http::name - - haproxy_riak_config_pb::name - config:backends: - - haproxy_riak_config_http::backends - - haproxy_riak_config_pb::backends diff --git a/examples/riak/riak_cluster.yaml b/examples/riak/riak_cluster.yaml deleted file mode 100644 index 3c2c6c8b..00000000 --- a/examples/riak/riak_cluster.yaml +++ /dev/null @@ -1,54 +0,0 @@ -id: riak_cluster - -resources: - - id: riak_service1 - # `./` added by intention - from: ./riak_service.yaml - input: - node: #{nodes[0]}# - index: 1 - join_to: '' - - - id: riak_service2 - # `./` ommited by intention - from: riak_service.yaml - input: - node: #{nodes[1]}# - index: 2 - join_to: riak_service1 - - - id: riak_service3 - # `./` ommited by intention - from: riak_service.yaml - input: - node: #{nodes[2]}# - index: 3 - join_to: riak_service1 - - - id: haproxy_riak_config - from: ./haproxy_riak_config.yaml - input: - http_listen_port: 8098 - pb_listen_port: 8087 - riaks: ['riak_service1', 'riak_service2', 'riak_service3'] - - - id: haproxy1 - from: templates/haproxy - input: - node: #{nodes[0]}# - service_configs: ['haproxy_riak_config_pb', 'haproxy_riak_config_http'] - index: 1 - - - id: haproxy2 - from: templates/haproxy - input: - node: #{nodes[1]}# - service_configs: ['haproxy_riak_config_pb', 'haproxy_riak_config_http'] - index: 2 - - - id: haproxy3 - from: templates/haproxy - input: - node: #{nodes[2]}# - service_configs: ['haproxy_riak_config_pb', 'haproxy_riak_config_http'] - index: 3 diff --git a/examples/riak/riak_service.yaml b/examples/riak/riak_service.yaml deleted file mode 100644 index e047488f..00000000 --- a/examples/riak/riak_service.yaml +++ /dev/null @@ -1,61 +0,0 @@ -id: riak_service - -resources: - - id: riak_service#{index}# - from: resources/riak_node - location: #{node}# - input: - riak_self_name: riak#{index}# - riak_hostname: riak_server#{index}#.solar - riak_name: riak#{index}#@riak_server#{index}#.solar - #% if join_to %# - join_to: #{join_to}#::riak_name - #% endif %# - ip: #{node}#::ip - -updates: - - with_tags: 'resource=hosts_file' - input: - hosts:name: - - riak_service#{index}#::riak_hostname::NO_EVENTS - hosts:ip: - - riak_service#{index}#::ip::NO_EVENTS - - - with_tags: 'resource=haproxy_service_config & service=riak & protocol=http' - input: - backends:server: - - riak_service#{index}#::riak_hostname - backends:port: - - riak_service#{index}#::riak_port_http - - - with_tags: 'resource=haproxy_service_config & service=riak & protocol=tcp' - input: - backends:server: - - riak_service#{index}#::riak_hostname - backends:port: - - riak_service#{index}#::riak_port_pb - -events: - - type: depends_on - parent: - with_tags: 'resource=hosts_file & location=#{node}#' - action: run - state: success - child_action: riak_service#{index}#.run - -#% if join_to %# - - type: react_on - parent_action: riak_service#{index}#.run - state: success - child_action: riak_service#{index}#.join - - - type: react_on - parent_action: riak_service#{index}#.leave - state: success - child_action: riak_service#{index}#.join - - - type: react_on - parent_action: riak_service#{index}#.join - state: success - child_action: #{join_to}#.commit -#% endif %# diff --git a/examples/riak/riaks-template.py b/examples/riak/riaks-template.py deleted file mode 100755 index 2c75d869..00000000 --- a/examples/riak/riaks-template.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/env python - -# WARNING: this might not be most up-to-date script and not all things might -# work here, for most up-to-date version see example-riaks.py -# This is just a demo of the template language of Solar - -import click -import sys - -from solar.core import resource -from solar import template -from solar.dblayer.model import ModelMeta - - -def setup_riak(): - - ModelMeta.remove_all() - nodes = template.nodes_from('templates/riak_nodes') - - riak_services = nodes.on_each( - 'resources/riak_node', - args={ - 'riak_self_name': 'riak{num}', - 'riak_hostname': 'riak_server{num}.solar', - 'riak_name': 'riak{num}@riak_server{num}.solar', - } - ) - - slave_riak_services = riak_services.tail() - - riak_services.take(0).connect_list( - slave_riak_services, - mapping={ - 'riak_name': 'join_to', - } - ) - - hosts_files = nodes.on_each('resources/hosts_file') - - riak_services.connect_list_to_each( - hosts_files, - mapping={ - 'ip': 'hosts:ip', - 'riak_hostname': 'hosts:name', - }, - events=False - ) - - errors = resource.validate_resources() - for r, error in errors: - click.echo('ERROR: %s: %s' % (r.name, error)) - - if errors: - click.echo("ERRORS") - sys.exit(1) - - hosts_files.add_deps('run/success', riak_services, 'run') - slave_riak_services.add_reacts('run/success', slave_riak_services, 'join') - slave_riak_services.add_reacts('leave/success', slave_riak_services, 'join') - slave_riak_services.add_react('run/success', riak_services.take(0), 'commit') - - -def setup_haproxies(): - # TODO: VR loading needs to be supported, then we can do something like - # nodes = template.load('nodes') - - nodes = template.ResourceListTemplate([ - resource.load('node1'), - resource.load('node2'), - resource.load('node3'), - ]) - riak_services = template.ResourceListTemplate([ - resource.load('riak_node-0'), - resource.load('riak_node-1'), - resource.load('riak_node-2'), - ]) - - haproxy_services = nodes.on_each( - 'resources/haproxy_service' - ) - haproxy_configs = nodes.on_each( - 'resources/haproxy_config' - ) - haproxy_service_configs_http = riak_services.on_each( - 'resources/haproxy_service_config', - { - 'listen_port': 8098, - 'protocol': 'http', - 'name': 'riak_haproxy_http{num}', - } - ) - haproxy_service_configs_pb = riak_services.on_each( - 'resources/haproxy_service_config', - { - 'listen_port': 8087, - 'protocol': 'tcp', - 'name': 'riak_haproxy_pb{num}', - } - ) - - riak_services.connect_list_to_each( - haproxy_service_configs_http, - { - 'riak_hostname': 'backends:server', - 'riak_port_http': 'backends:port', - } - ) - riak_services.connect_list_to_each( - haproxy_service_configs_pb, - { - 'riak_hostname': 'backends:server', - 'riak_port_pb': 'backends:port', - } - ) - haproxy_service_configs_http.connect_list( - haproxy_configs, - { - 'backends': 'config:backends', - 'listen_port': 'config:listen_port', - 'protocol': 'config:protocol', - 'name': 'config:name', - } - ) - haproxy_service_configs_pb.connect_list( - haproxy_configs, - { - 'backends': 'config:backends', - 'listen_port': 'config:listen_port', - 'protocol': 'config:protocol', - 'name': 'config:name', - } - ) - - #nodes.add_reacts('run/success', haproxy_services, 'install') - haproxy_services.add_deps('run/success', haproxy_configs, 'run') - haproxy_configs.add_reacts('run/success', haproxy_services, 'apply_config') - haproxy_configs.add_reacts('update/success', haproxy_services, 'apply_config') - - errors = resource.validate_resources() - for r, error in errors: - click.echo('ERROR: %s: %s' % (r.name, error)) - - if errors: - click.echo("ERRORS") - sys.exit(1) - - -@click.group() -def main(): - pass - - -@click.command() -def deploy(): - setup_riak() - - -@click.command() -def add_haproxies(): - setup_haproxies() - - -@click.command() -def undeploy(): - raise NotImplemented("Not yet") - - -main.add_command(deploy) -main.add_command(undeploy) -main.add_command(add_haproxies) - - -if __name__ == '__main__': - main() diff --git a/examples/riak/riaks.py b/examples/riak/riaks.py deleted file mode 100755 index 8202c47d..00000000 --- a/examples/riak/riaks.py +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/env python - -# To run: -# python example-riaks.py deploy -# solar changes stage -# solar changes process -# solar orch run-once last -# python example-riaks.py add_haproxies -# solar changes stage -# solar changes process -# solar orch run-once last - - -import click -import sys - -from solar.core import resource -from solar.core import signals -from solar.core import validation -from solar.core.resource import composer as cr -from solar import errors - -from solar.dblayer.model import ModelMeta - -from solar.events.controls import React, Dep -from solar.events.api import add_event - -from solar.dblayer.solar_models import Resource - - -def setup_riak(): - - ModelMeta.remove_all() - resources = cr.create('nodes', 'templates/nodes', {'count': 3}) - nodes = resources.like('node') - hosts_services = resources.like('hosts_file') - node1, node2, node3 = nodes - - riak_services = [] - ips = '10.0.0.%d' - for i in xrange(3): - num = i + 1 - r = cr.create('riak_service%d' % num, - 'resources/riak_node', - {'riak_self_name': 'riak%d' % num, - 'storage_backend': 'leveldb', - 'riak_hostname': 'riak_server%d.solar' % num})[0] - r.connect(r, {'riak_self_name': 'riak_name', - 'riak_hostname': 'riak_name'}) - riak_services.append(r) - - for i, riak in enumerate(riak_services): - nodes[i].connect(riak) - - for i, riak in enumerate(riak_services[1:]): - riak_services[0].connect(riak, {'riak_name': 'join_to'}) - - for riak in riak_services: - for hosts_file in hosts_services: - riak.connect_with_events(hosts_file, - {'riak_hostname': 'hosts:name', - 'ip': 'hosts:ip'}) - - Resource.save_all_lazy() - errors = resource.validate_resources() - for r, error in errors: - click.echo('ERROR: %s: %s' % (r.name, error)) - has_errors = False - - if errors: - click.echo("ERRORS") - sys.exit(1) - - events = [ - Dep('hosts_file1', 'run', 'success', 'riak_service1', 'run'), - Dep('hosts_file2', 'run', 'success', 'riak_service2', 'run'), - Dep('hosts_file3', 'run', 'success', 'riak_service3', 'run'), - - React('riak_service2', 'run', 'success', 'riak_service2', 'join'), - React('riak_service3', 'run', 'success', 'riak_service3', 'join'), - - # Dep('riak_service1', 'run', 'success', 'riak_service2', 'join'), - # Dep('riak_service1', 'run', 'success', 'riak_service3', 'join'), - - # React('riak_service2', 'join', 'error', 'riak_service2', 'leave'), - # React('riak_service3', 'join', 'error', 'riak_service3', 'leave'), - - React('riak_service2', 'leave', 'success', 'riak_service2', 'join'), - React('riak_service3', 'leave', 'success', 'riak_service3', 'join'), - - # React('riak_service2', 'leave', 'success', 'riak_service1', 'commit_leave'), - # React('riak_service3', 'leave', 'success', 'riak_service1', 'commit_leave'), - - # Dep('riak_service1', 'commit_leave', 'success', 'riak_service2', 'join'), - # Dep('riak_service1', 'commit_leave', 'success', 'riak_service3', 'join'), - - React('riak_service3', 'join', 'success', 'riak_service1', 'commit'), - React('riak_service2', 'join', 'success', 'riak_service1', 'commit') - ] - - for event in events: - add_event(event) - - click.echo('Use solar changes process & orch') - sys.exit(0) - - -def setup_haproxies(): - hps = [] - hpc = [] - hpsc_http = [] - hpsc_pb = [] - for i in xrange(3): - num = i + 1 - hps.append(cr.create('haproxy_service%d' % num, - 'resources/haproxy_service', - {})[0]) - hpc.append(cr.create('haproxy_config%d' % num, - 'resources/haproxy_config', - {})[0]) - hpsc_http.append(cr.create('haproxy_service_config_http%d' % num, - 'resources/haproxy_service_config', - {'listen_port': 8098, - 'protocol': 'http', - 'name': 'riak_haproxy_http%d' % num})[0]) - hpsc_pb.append(cr.create('haproxy_service_config_pb%d' % num, - 'resources/haproxy_service_config', - {'listen_port': 8087, - 'protocol': 'tcp', - 'name': 'riak_haproxy_pb%d' % num})[0]) - - riak1 = resource.load('riak_service1') - riak2 = resource.load('riak_service2') - riak3 = resource.load('riak_service3') - riaks = [riak1, riak2, riak3] - - for single_hpsc in hpsc_http: - for riak in riaks: - riak.connect(single_hpsc, { - 'riak_hostname': 'backends:server', - 'riak_port_http': 'backends:port'}) - - for single_hpsc in hpsc_pb: - for riak in riaks: - riak.connect(single_hpsc, - {'riak_hostname': 'backends:server', - 'riak_port_pb': 'backends:port'}) - - # haproxy config to haproxy service - - for single_hpc, single_hpsc in zip(hpc, hpsc_http): - single_hpsc.connect(single_hpc, {"backends": "config:backends", - "listen_port": "config:listen_port", - "protocol": "config:protocol", - "name": "config:name"}) - - for single_hpc, single_hpsc in zip(hpc, hpsc_pb): - single_hpsc.connect(single_hpc, {"backends": "config:backends", - "listen_port": "config:listen_port", - "protocol": "config:protocol", - "name": "config:name"}) - - - # assign haproxy services to each node - - node1 = resource.load('node1') - node2 = resource.load('node2') - node3 = resource.load('node3') - nodes = [node1, node2, node3] - - for single_node, single_hps in zip(nodes, hps): - single_node.connect(single_hps) - - for single_node, single_hpc in zip(nodes, hpc): - single_node.connect(single_hpc) - - has_errors = False - for r in locals().values(): - - # TODO: handle list - if not isinstance(r, resource.Resource): - continue - - # print 'Validating {}'.format(r.name) - local_errors = validation.validate_resource(r) - if local_errors: - has_errors = True - print 'ERROR: %s: %s' % (r.name, local_errors) - - if has_errors: - print "ERRORS" - sys.exit(1) - - events = [] - for node, single_hps, single_hpc in zip(nodes, hps, hpc): - # r = React(node.name, 'run', 'success', single_hps.name, 'install') - d = Dep(single_hps.name, 'run', 'success', single_hpc.name, 'run') - e1 = React(single_hpc.name, 'run', 'success', single_hps.name, 'apply_config') - e2 = React(single_hpc.name, 'update', 'success', single_hps.name, 'apply_config') - # events.extend([r, d, e1, e2]) - events.extend([d, e1, e2]) - - for event in events: - add_event(event) - - -@click.command() -@click.argument('i', type=int, required=True) -def add_solar_agent(i): - solar_agent_transport = cr.create('solar_agent_transport%s' % i, 'resources/transport_solar_agent', - {'solar_agent_user': 'vagrant', - 'solar_agent_password': 'password'})[0] - transports = resource.load('transports%s' % i) - ssh_transport = resource.load('ssh_transport%s' % i) - transports_for_solar_agent = cr.create('transports_for_solar_agent%s' % i, 'resources/transports')[0] - - # install solar_agent with ssh - signals.connect(transports_for_solar_agent, solar_agent_transport, {}) - - signals.connect(ssh_transport, transports_for_solar_agent, {'ssh_key': 'transports:key', - 'ssh_user': 'transports:user', - 'ssh_port': 'transports:port', - 'name': 'transports:name'}) - - # add solar_agent to transports on this node - signals.connect(solar_agent_transport, transports, {'solar_agent_user': 'transports:user', - 'solar_agent_port': 'transports:port', - 'solar_agent_password': 'transports:password', - 'name': 'transports:name'}) - - -@click.group() -def main(): - pass - - -@click.command() -def deploy(): - setup_riak() - - -@click.command() -def add_haproxies(): - setup_haproxies() - - -@click.command() -def undeploy(): - raise NotImplemented("Not yet") - - -@click.command() -def create_all(): - setup_riak() - setup_haproxies() - - -main.add_command(deploy) -main.add_command(undeploy) -main.add_command(add_haproxies) -main.add_command(add_solar_agent) -main.add_command(create_all) - - -if __name__ == '__main__': - main() diff --git a/examples/riak/riaks_big.py b/examples/riak/riaks_big.py deleted file mode 100755 index 679be4d7..00000000 --- a/examples/riak/riaks_big.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env python - -# this allows you to create riak cluster as big as you want - -import click -import sys - -from solar.core import resource -from solar.core import signals -from solar.core import validation -from solar.core.resource import composer as cr -from solar import errors - -from solar.interfaces.db import get_db - -from solar.events.controls import React, Dep -from solar.events.api import add_event - - -db = get_db() - - -NODES = 3 - -def setup_riak(nodes_num=None, hosts_mapping=False): - - if nodes_num is None: - nodes_num = NODES - db.clear() - - resources = cr.create('nodes', 'templates/nodes', {'count': nodes_num}) - nodes = [x for x in resources if x.name.startswith('node')] - hosts_services = [x for x in resources if x.name.startswith('hosts_file')] - - riak_services = [] - ips = '10.0.0.%d' - for i in xrange(nodes_num): - num = i + 1 - r = cr.create('riak_service%d' % num, - 'resources/riak_node', - {'riak_self_name': 'riak%d' % num, - 'riak_hostname': 'riak_server%d.solar' % num, - 'riak_name': 'riak%d@riak_server%d.solar' % (num, num)})[0] - riak_services.append(r) - - for i, riak in enumerate(riak_services): - nodes[i].connect(riak) - - for i, riak in enumerate(riak_services[1:]): - riak_services[0].connect(riak, {'riak_name': 'join_to'}) - - if hosts_mapping: - for riak in riak_services: - for hosts_file in hosts_services: - riak.connect_with_events(hosts_file, - {'riak_hostname': 'hosts:name', - 'ip': 'hosts:ip'}) - - res_errors = resource.validate_resources() - for r, error in res_errors: - click.echo('ERROR: %s: %s' % (r.name, error)) - has_errors = False - - if has_errors: - click.echo("ERRORS") - sys.exit(1) - - events = [] - for x in xrange(nodes_num): - i = x + 1 - if hosts_mapping: - events.append(Dep('hosts_file%d' % i, 'run', 'success', 'riak_service%d' % i, 'run')) - if i >= 2: - events.append(React('riak_service%d' % i, 'run', 'success', 'riak_service%d' % i, 'join')) - events.append(React('riak_service%d' % i, 'join', 'success', 'riak_service1', 'commit')) - - for event in events: - add_event(event) - - click.echo('Use solar changes process & orch') - sys.exit(0) - - -@click.group() -def main(): - pass - - -@click.command() -@click.argument('nodes_count', type=int) -@click.argument('hosts_mapping', type=bool) -def deploy(nodes_count, hosts_mapping): - click.secho("With big nodes_count, this example is DB heavy, it creates NxN connections, continue ? [y/N] ", fg='red', nl=False) - c= click.getchar() - if c in ('y', 'Y'): - setup_riak(nodes_count, hosts_mapping) - else: - click.echo("Aborted") - - -if __name__ == '__main__': - main.add_command(deploy) - main() diff --git a/examples/solar_agent/example.py b/examples/solar_agent/example.py deleted file mode 100644 index 08114b63..00000000 --- a/examples/solar_agent/example.py +++ /dev/null @@ -1,61 +0,0 @@ -import click -import sys -import time - -from solar.core import resource -from solar.core import signals -from solar.core.resource import composer as cr -from solar.dblayer.model import ModelMeta - - -def run(): - ModelMeta.remove_all() - - node = cr.create('node', 'resources/ro_node', {'name': 'first' + str(time.time()), - 'ip': '10.0.0.3', - 'node_id': 'node1', - })[0] - - transports = cr.create('transports_node1', 'resources/transports')[0] - transports_for_solar_agent = cr.create('transports_for_solar_agent', 'resources/transports')[0] - - ssh_transport = cr.create('ssh_transport', 'resources/transport_ssh', - {'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key', - 'ssh_user': 'vagrant'})[0] - - solar_agent_transport = cr.create('solar_agent_transport', 'resources/transport_solar_agent', - {'solar_agent_user': 'vagrant', - 'solar_agent_password': 'password'})[0] - - transports_for_solar_agent.connect(solar_agent_transport, {}) - ssh_transport.connect(transports_for_solar_agent,{'ssh_key': 'transports:key', - 'ssh_user': 'transports:user', - 'ssh_port': 'transports:port', - 'name': 'transports:name'}) - # set transports_id - transports.connect(node, {}) - - # it uses reverse mappings - ssh_transport.connect(transports, {'ssh_key': 'transports:key', - 'ssh_user': 'transports:user', - 'ssh_port': 'transports:port', - 'name': 'transports:name'}) - solar_agent_transport.connect(transports, {'solar_agent_user': 'transports:user', - 'solar_agent_port': 'transports:port', - 'solar_agent_password': 'transports:password', - 'name': 'transports:name'}) - - - hosts = cr.create('hosts_file', 'resources/hosts_file', {})[0] - node.connect(hosts, { - 'ip': 'hosts:ip', - 'name': 'hosts:name' - }) - - # for r in (node, hosts, ssh_transport, transports): - # print r.name, repr(r.args['location_id']), repr(r.args['transports_id']) - - # print hosts.transports() - # print hosts.ip() - -run() diff --git a/examples/torrent/README.md b/examples/torrent/README.md deleted file mode 100644 index 13b6a338..00000000 --- a/examples/torrent/README.md +++ /dev/null @@ -1,25 +0,0 @@ -Example of using torrent transport with solar. Torrent is used to distribute task data. After fetching is finished torrent client forks and continues seeding. - - -The example contains single node with single host mapping + transports. - -Execute: -``` -python examples/torrent/example.py -solar changes stage -solar changes process -solar orch run-once last -``` - -Wait for finish: - -``` -solar orch report last -w 100 -``` - -After this you should see new entry in `/etc/hosts` file. - - -* All created torrents are in `/vagrant/torrents`, it doesn't need to be shared -* Initial seeding is done using torrent file -* Downloading and then seeding is always done with magnetlinks diff --git a/examples/torrent/example.py b/examples/torrent/example.py deleted file mode 100644 index 2c09233c..00000000 --- a/examples/torrent/example.py +++ /dev/null @@ -1,70 +0,0 @@ -import time - -from solar.core.resource import composer as cr -from solar import errors -from solar.dblayer.model import ModelMeta - - -def run(): - ModelMeta.remove_all() - - node = cr.create('node', 'resources/ro_node', {'name': 'first' + str(time.time()), - 'ip': '10.0.0.3', - 'node_id': 'node1', - })[0] - - transports = cr.create('transports_node1', 'resources/transports')[0] - - ssh_transport = cr.create('ssh_transport', 'resources/transport_ssh', - {'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key', - 'ssh_user': 'vagrant'})[0] - - transports.connect(node, {}) - - # it uses reverse mappings - ssh_transport.connect(transports, {'ssh_key': 'transports:key', - 'ssh_user': 'transports:user', - 'ssh_port': 'transports:port', - 'name': 'transports:name'}) - - hosts = cr.create('hosts_file', 'resources/hosts_file', {})[0] - - # let's add torrent transport for hosts file deployment (useless in real life) - - torrent_transport = cr.create('torrent_transport', - 'resources/transport_torrent', - {'trackers': ['udp://open.demonii.com:1337', - 'udp://tracker.openbittorrent.com:80']})[0] - # you could use any trackers as you want - - transports_for_torrent = cr.create( - 'transports_for_torrent', 'resources/transports')[0] - - transports_for_torrent.connect(torrent_transport, {}) - - ssh_transport.connect_with_events(transports_for_torrent, {'ssh_key': 'transports:key', - 'ssh_user': 'transports:user', - 'ssh_port': 'transports:port', - 'name': 'transports:name'}, - events={}) - - transports_for_hosts = cr.create( - 'transports_for_hosts', 'resources/transports')[0] - - torrent_transport.connect(transports_for_hosts, {'trackers': 'transports:trackers', - 'name': 'transports:name'}) - - ssh_transport.connect(transports_for_hosts, {'ssh_key': 'transports:key', - 'ssh_user': 'transports:user', - 'ssh_port': 'transports:port', - 'name': 'transports:name'}) - - transports_for_hosts.connect(hosts) - transports_for_hosts.connect_with_events(node, events={}) - - node.connect(hosts, { - 'ip': 'hosts:ip', - 'name': 'hosts:name' - }) - -run() diff --git a/resources/ansible_local/0.0.1/actions/run.yaml b/resources/ansible_local/0.0.1/actions/run.yaml deleted file mode 100644 index 7351a738..00000000 --- a/resources/ansible_local/0.0.1/actions/run.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- hosts: localhost - sudo: yes - vars: - var1: 'playbook' - roles: - - { role: "test_role" } - tasks: - - debug: msg="VAR1 value is {{var1}}" - - fail: msg='just test failure' \ No newline at end of file diff --git a/resources/ansible_local/0.0.1/actions/test_role/defaults/main.yaml b/resources/ansible_local/0.0.1/actions/test_role/defaults/main.yaml deleted file mode 100644 index 14671490..00000000 --- a/resources/ansible_local/0.0.1/actions/test_role/defaults/main.yaml +++ /dev/null @@ -1,4 +0,0 @@ - -var1: initial -uuid: stuff -def1: the_same \ No newline at end of file diff --git a/resources/ansible_local/0.0.1/actions/test_role/tasks/main.yaml b/resources/ansible_local/0.0.1/actions/test_role/tasks/main.yaml deleted file mode 100644 index 1c628a0e..00000000 --- a/resources/ansible_local/0.0.1/actions/test_role/tasks/main.yaml +++ /dev/null @@ -1 +0,0 @@ -- debug: msg="Variable1 {{ var1 }} with uuid {{ uuid }} and default var {{ def1 }}" \ No newline at end of file diff --git a/resources/ansible_local/0.0.1/meta.yaml b/resources/ansible_local/0.0.1/meta.yaml deleted file mode 100644 index 32b8d38d..00000000 --- a/resources/ansible_local/0.0.1/meta.yaml +++ /dev/null @@ -1,10 +0,0 @@ -handler: ansible_playbook -version: 0.0.1 -input: - var1: - type: str! - value: meta - uuid: - type: str! - value: 'aa1das1231' - diff --git a/resources/ansible_remote/0.0.1/actions/run.yaml b/resources/ansible_remote/0.0.1/actions/run.yaml deleted file mode 100644 index 7482f7f8..00000000 --- a/resources/ansible_remote/0.0.1/actions/run.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- hosts: '*' - sudo: yes - vars: - default1: playbook - tasks: - - debug: msg="my message {{default1}}" \ No newline at end of file diff --git a/resources/ansible_remote/0.0.1/meta.yaml b/resources/ansible_remote/0.0.1/meta.yaml deleted file mode 100644 index 80895199..00000000 --- a/resources/ansible_remote/0.0.1/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -handler: ansible_playbook -version: 0.0.1 -input: - ip: - type: str! - value: - # ssh_user: - # type: str! - # value: - # ssh_key: - # type: str! - # value: - default1: - type: str! - value: meta diff --git a/resources/apache_puppet/1.0.0/README.md b/resources/apache_puppet/1.0.0/README.md deleted file mode 100644 index bf0e1438..00000000 --- a/resources/apache_puppet/1.0.0/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Apache puppet resource - -This class installs Apache and manages apache service. -Defaults provided for Debian OS family. \ No newline at end of file diff --git a/resources/apache_puppet/1.0.0/actions/remove.pp b/resources/apache_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 6719c017..00000000 --- a/resources/apache_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,5 +0,0 @@ -class {'apache': - service_enable => false, - service_ensure => 'stopped', - package_ensure => 'absent', -} diff --git a/resources/apache_puppet/1.0.0/actions/run.pp b/resources/apache_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 52d642de..00000000 --- a/resources/apache_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,120 +0,0 @@ -$resource = hiera($::resource_name) - -$apache_name = $resource['input']['apache_name'] -$service_name = $resource['input']['service_name'] -$default_mods = $resource['input']['default_mods'] -$default_vhost = $resource['input']['default_vhost'] -$default_charset = $resource['input']['default_charset'] -$default_confd_files = $resource['input']['default_confd_files'] -$default_ssl_vhost = $resource['input']['default_ssl_vhost'] -$default_ssl_cert = $resource['input']['default_ssl_cert'] -$default_ssl_key = $resource['input']['default_ssl_key'] -$default_ssl_chain = $resource['input']['default_ssl_chain'] -$default_ssl_ca = $resource['input']['default_ssl_ca'] -$default_ssl_crl_path = $resource['input']['default_ssl_crl_path'] -$default_ssl_crl = $resource['input']['default_ssl_crl'] -$default_ssl_crl_check = $resource['input']['default_ssl_crl_check'] -$default_type = $resource['input']['default_type'] -$ip = $resource['input']['ip'] -$service_restart = $resource['input']['service_restart'] -$purge_configs = $resource['input']['purge_configs'] -$purge_vhost_dir = $resource['input']['purge_vhost_dir'] -$purge_vdir = $resource['input']['purge_vdir'] -$serveradmin = $resource['input']['serveradmin'] -$sendfile = $resource['input']['sendfile'] -$error_documents = $resource['input']['error_documents'] -$timeout = $resource['input']['timeout'] -$httpd_dir = $resource['input']['httpd_dir'] -$server_root = $resource['input']['server_root'] -$conf_dir = $resource['input']['conf_dir'] -$confd_dir = $resource['input']['confd_dir'] -$vhost_dir = $resource['input']['vhost_dir'] -$vhost_enable_dir = $resource['input']['vhost_enable_dir'] -$mod_dir = $resource['input']['mod_dir'] -$mod_enable_dir = $resource['input']['mod_enable_dir'] -$mpm_module = $resource['input']['mpm_module'] -$lib_path = $resource['input']['lib_path'] -$conf_template = $resource['input']['conf_template'] -$servername = $resource['input']['servername'] -$manage_user = $resource['input']['manage_user'] -$manage_group = $resource['input']['manage_group'] -$user = $resource['input']['user'] -$group = $resource['input']['group'] -$keepalive = $resource['input']['keepalive'] -$keepalive_timeout = $resource['input']['keepalive_timeout'] -$max_keepalive_requests = $resource['input']['max_keepalive_requests'] -$logroot = $resource['input']['logroot'] -$logroot_mode = $resource['input']['logroot_mode'] -$log_level = $resource['input']['log_level'] -$log_formats = $resource['input']['log_formats'] -$ports_file = $resource['input']['ports_file'] -$docroot = $resource['input']['docroot'] -$apache_version = $resource['input']['apache_version'] -$server_tokens = $resource['input']['server_tokens'] -$server_signature = $resource['input']['server_signature'] -$trace_enable = $resource['input']['trace_enable'] -$allow_encoded_slashes = $resource['input']['allow_encoded_slashes'] -$package_ensure = $resource['input']['package_ensure'] -$use_optional_includes = $resource['input']['use_optional_includes'] - -class {'apache': - apache_name => $apache_name, - service_name => $service_name, - default_mods => $default_mods, - default_vhost => $default_vhost, - default_charset => $default_charset, - default_confd_files => $default_confd_files, - default_ssl_vhost => $default_ssl_vhost, - default_ssl_cert => $default_ssl_cert, - default_ssl_key => $default_ssl_key, - default_ssl_chain => $default_ssl_chain, - default_ssl_ca => $default_ssl_ca, - default_ssl_crl_path => $default_ssl_crl_path, - default_ssl_crl => $default_ssl_crl, - default_ssl_crl_check => $default_ssl_crl_check, - default_type => $default_type, - ip => $ip, - service_enable => true, - service_manage => true, - service_ensure => 'running', - service_restart => $service_restart, - purge_configs => $purge_configs, - purge_vhost_dir => $purge_vhost_dir, - purge_vdir => $purge_vdir, - serveradmin => $serveradmin, - sendfile => $sendfile, - error_documents => $error_documents, - timeout => $timeout, - httpd_dir => $httpd_dir, - server_root => $server_root, - conf_dir => $conf_dir, - confd_dir => $confd_dir, - vhost_dir => $vhost_dir, - vhost_enable_dir => $vhost_enable_dir, - mod_dir => $mod_dir, - mod_enable_dir => $mod_enable_dir, - mpm_module => $mpm_module, - lib_path => $lib_path, - conf_template => $conf_template, - servername => $servername, - manage_user => $manage_user, - manage_group => $manage_group, - user => $user, - group => $group, - keepalive => $keepalive, - keepalive_timeout => $keepalive_timeout, - max_keepalive_requests => $max_keepalive_requests, - logroot => $logroot, - logroot_mode => $logroot_mode, - log_level => $log_level, - log_formats => $log_formats, - ports_file => $ports_file, - docroot => $docroot, - apache_version => $apache_version, - server_tokens => $server_tokens, - server_signature => $server_signature, - trace_enable => $trace_enable, - allow_encoded_slashes => $allow_encoded_slashes, - package_ensure => $package_ensure, - use_optional_includes => $use_optional_includes, -} diff --git a/resources/apache_puppet/1.0.0/meta.yaml b/resources/apache_puppet/1.0.0/meta.yaml deleted file mode 100644 index a9b88a2b..00000000 --- a/resources/apache_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,184 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - apache_name: - schema: str - value: 'apache2' - service_name: - schema: str - value: 'apache2' - default_mods: - schema: bool - value: true - default_vhost: - schema: bool - value: true - default_charset: - schema: str - value: - default_confd_files: - schema: bool - value: true - default_ssl_vhost: - schema: bool - value: false - default_ssl_cert: - schema: str - value: '/etc/ssl/certs/ssl-cert-snakeoil.pem' - default_ssl_key: - schema: str - value: '/etc/ssl/private/ssl-cert-snakeoil.key' - default_ssl_chain: - schema: str - value: - default_ssl_ca: - schema: str - value: - default_ssl_crl_path: - schema: str - value: - default_ssl_crl: - schema: str - value: - default_ssl_crl_check: - schema: str - value: - default_type: - schema: str - value: 'none' - service_restart: - schema: str - value: 'restart' - purge_configs: - schema: bool - value: true - purge_vhost_dir: - schema: str - value: - purge_vdir: - schema: bool - value: false - serveradmin: - schema: str - value: 'root@localhost' - sendfile: - schema: str - value: 'On' - error_documents: - schema: bool - value: false - timeout: - schema: int - value: 120 - httpd_dir: - schema: str - value: '/etc/apache2' - server_root: - schema: str - value: '/etc/apache2' - conf_dir: - schema: str - value: '/etc/apache2' - confd_dir: - schema: str - value: '/etc/apache2/conf.d' - vhost_dir: - schema: str - value: '/etc/apache2/sites-available' - vhost_enable_dir: - schema: str - value: '/etc/apache2/sites-enabled' - mod_dir: - schema: str - value: '/etc/apache2/mods-available' - mod_enable_dir: - schema: str - value: '/etc/apache2/mods-enabled' - mpm_module: - schema: str - value: 'worker' - lib_path: - schema: str - value: '/usr/lib/apache2/modules' - conf_template: - schema: str - value: 'apache/httpd.conf.erb' - servername: - schema: str! - value: - manage_user: - schema: bool - value: true - manage_group: - schema: bool - value: true - user: - schema: str - value: 'www-data' - group: - schema: str - value: 'www-data' - keepalive: - schema: str - value: 'Off' - keepalive_timeout: - schema: int - value: 15 - max_keepalive_requests: - schema: int - value: 100 - logroot: - schema: str - value: '/var/log/apache2' - logroot_mode: - schema: str - value: '0640' - log_level: - schema: str - value: 'warn' - log_formats: - schema: {} - value: {} - ports_file: - schema: str - value: '/etc/apache2/ports.conf' - docroot: - schema: str - value: '/srv/www' - apache_version: - schema: str - value: '2.4' - server_tokens: - schema: str - value: 'OS' - server_signature: - schema: str - value: 'On' - trace_enable: - schema: str - value: 'On' - allow_encoded_slashes: - schema: str - value: - package_ensure: - schema: str - value: 'installed' - use_optional_includes: - schema: bool - value: false - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/puppetlabs/puppetlabs-apache.git', branch: '1.5.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/apache_service, resources/apache] diff --git a/resources/apache_puppet/1.0.0/test.py b/resources/apache_puppet/1.0.0/test.py deleted file mode 100644 index 4cff7578..00000000 --- a/resources/apache_puppet/1.0.0/test.py +++ /dev/null @@ -1,11 +0,0 @@ -import requests - -from solar.core.log import log - - -def test(resource): - log.debug('Testing apache_puppet') - requests.get( - 'http://%s:%s' % (resource.args['ip'], 80) - - ) diff --git a/resources/apt_repo/1.0.0/actions/remove.yaml b/resources/apt_repo/1.0.0/actions/remove.yaml deleted file mode 100644 index 55de2217..00000000 --- a/resources/apt_repo/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - shell: rm -f {{item}} - with_items: - - /etc/apt/sources.list.d/{{name}}.list - - /etc/apt/preferences.d/{{name}}.pref - - shell: apt-get update - when: {{validate_integrity}} diff --git a/resources/apt_repo/1.0.0/actions/run.yaml b/resources/apt_repo/1.0.0/actions/run.yaml deleted file mode 100644 index 7897049a..00000000 --- a/resources/apt_repo/1.0.0/actions/run.yaml +++ /dev/null @@ -1,11 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - template: - src: {{templates_dir}}/source - dest: /etc/apt/sources.list.d/{{name}}.list - - template: - src: {{templates_dir}}/preferences - dest: /etc/apt/preferences.d/{{name}}.pref - - shell: apt-get update - when: {{validate_integrity}} diff --git a/resources/apt_repo/1.0.0/meta.yaml b/resources/apt_repo/1.0.0/meta.yaml deleted file mode 100644 index c8e85bea..00000000 --- a/resources/apt_repo/1.0.0/meta.yaml +++ /dev/null @@ -1,24 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - ip: - schema: str! - value: - repo: - schema: str! - value: - name: - schema: str! - value: - package: - schema: str - value: '*' - pin: - schema: str - value: - pin_priority: - schema: int - value: - validate_integrity: - schema: bool - value: true diff --git a/resources/apt_repo/1.0.0/templates/preferences b/resources/apt_repo/1.0.0/templates/preferences deleted file mode 100644 index b8dfa300..00000000 --- a/resources/apt_repo/1.0.0/templates/preferences +++ /dev/null @@ -1,3 +0,0 @@ -Package: {{package}} -Pin: {{pin}} -Pin-Priority: {{pin_priority}} diff --git a/resources/apt_repo/1.0.0/templates/source b/resources/apt_repo/1.0.0/templates/source deleted file mode 100644 index 2f02ce16..00000000 --- a/resources/apt_repo/1.0.0/templates/source +++ /dev/null @@ -1 +0,0 @@ -{{repo}} diff --git a/resources/ceph_keys/1.0.0/actions/run.sh b/resources/ceph_keys/1.0.0/actions/run.sh deleted file mode 100644 index 7fa849cd..00000000 --- a/resources/ceph_keys/1.0.0/actions/run.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -BASE_PATH={{ target_directory }} -KEY_NAME={{ key_name }} - -function generate_ssh_keys { - local dir_path=$BASE_PATH$KEY_NAME/ - local key_path=$dir_path$KEY_NAME - mkdir -p $dir_path - if [ ! -f $key_path ]; then - ssh-keygen -b 2048 -t rsa -N '' -f $key_path 2>&1 - else - echo 'Key $key_path already exists' - fi -} - -generate_ssh_keys diff --git a/resources/ceph_keys/1.0.0/meta.yaml b/resources/ceph_keys/1.0.0/meta.yaml deleted file mode 100644 index 549b7862..00000000 --- a/resources/ceph_keys/1.0.0/meta.yaml +++ /dev/null @@ -1,16 +0,0 @@ -handler: shell -version: 1.0.0 -input: - ip: - schema: str! - value: - target_directory: - schema: str! - value: /var/lib/astute/ - key_name: - schema: str! - value: ceph - path: - schema: str! - value: /var/lib/astute/ceph/ -tags: [] diff --git a/resources/ceph_mon/1.0.0/actions/run.pp b/resources/ceph_mon/1.0.0/actions/run.pp deleted file mode 100644 index 6b172a81..00000000 --- a/resources/ceph_mon/1.0.0/actions/run.pp +++ /dev/null @@ -1,95 +0,0 @@ -notice('MODULAR: ceph/mon.pp') - - -$storage_hash = hiera('storage', {}) -$public_vip = hiera('public_vip') -$management_vip = hiera('management_vip') -$use_syslog = hiera('use_syslog', true) -$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') -$keystone_hash = hiera('keystone', {}) -$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public') - -if ($storage_hash['images_ceph']) { - $glance_backend = 'ceph' -} elsif ($storage_hash['images_vcenter']) { - $glance_backend = 'vmware' -} else { - $glance_backend = 'swift' -} - -if ($storage_hash['volumes_ceph'] or - $storage_hash['images_ceph'] or - $storage_hash['objects_ceph'] or - $storage_hash['ephemeral_ceph'] -) { - $use_ceph = true -} else { - $use_ceph = false -} - -if $use_ceph { - $ceph_primary_monitor_node = hiera('ceph_primary_monitor_node') - $primary_mons = keys($ceph_primary_monitor_node) - $primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name'] - - prepare_network_config(hiera_hash('network_scheme')) - $ceph_cluster_network = get_network_role_property('ceph/replication', 'network') - $ceph_public_network = get_network_role_property('ceph/public', 'network') - $mon_addr = get_network_role_property('ceph/public', 'ipaddr') - - class {'ceph': - primary_mon => $primary_mon, - mon_hosts => keys($mon_address_map), - mon_ip_addresses => values($mon_address_map), - mon_addr => $mon_addr, - cluster_node_address => $public_vip, - osd_pool_default_size => $storage_hash['osd_pool_size'], - osd_pool_default_pg_num => $storage_hash['pg_num'], - osd_pool_default_pgp_num => $storage_hash['pg_num'], - use_rgw => false, - glance_backend => $glance_backend, - rgw_pub_ip => $public_vip, - rgw_adm_ip => $management_vip, - rgw_int_ip => $management_vip, - cluster_network => $ceph_cluster_network, - public_network => $ceph_public_network, - use_syslog => $use_syslog, - syslog_log_level => hiera('syslog_log_level_ceph', 'info'), - syslog_log_facility => $syslog_log_facility_ceph, - rgw_keystone_admin_token => $keystone_hash['admin_token'], - ephemeral_ceph => $storage_hash['ephemeral_ceph'] - } - - if ($storage_hash['volumes_ceph']) { - include ::cinder::params - service { 'cinder-volume': - ensure => 'running', - name => $::cinder::params::volume_service, - hasstatus => true, - hasrestart => true, - } - - service { 'cinder-backup': - ensure => 'running', - name => $::cinder::params::backup_service, - hasstatus => true, - hasrestart => true, - } - - Class['ceph'] ~> Service['cinder-volume'] - Class['ceph'] ~> Service['cinder-backup'] - } - - if ($storage_hash['images_ceph']) { - include ::glance::params - service { 'glance-api': - ensure => 'running', - name => $::glance::params::api_service_name, - hasstatus => true, - hasrestart => true, - } - - Class['ceph'] ~> Service['glance-api'] - } - -} diff --git a/resources/ceph_mon/1.0.0/actions/test.pp b/resources/ceph_mon/1.0.0/actions/test.pp deleted file mode 100644 index b5d0bbf5..00000000 --- a/resources/ceph_mon/1.0.0/actions/test.pp +++ /dev/null @@ -1,4 +0,0 @@ -prepare_network_config(hiera_hash('network_scheme')) -$ceph_cluster_network = get_network_role_property('ceph/replication', 'network') - -notify{"The value is: ${ceph_cluster_network}": } diff --git a/resources/ceph_mon/1.0.0/meta.yaml b/resources/ceph_mon/1.0.0/meta.yaml deleted file mode 100644 index c63216e4..00000000 --- a/resources/ceph_mon/1.0.0/meta.yaml +++ /dev/null @@ -1,37 +0,0 @@ -handler: puppetv2 -version: 1.0.0 -input: - ip: - schema: str! - value: - public_vip: - schema: str! - value: - management_vip: - schema: str! - value: - use_syslog: - schema: bool - value: true - keystone: - schema: {'admin_token': 'str'} - value: {} - ceph_monitor_nodes: - schema: [] - value: [] - ceph_primary_monitor_node: - schema: [] - value: [] - storage: - schema: {} - value: {} - network_scheme: - schema: {} - value: {} - role: - schema: str! - value: - puppet_modules: - schema: str! - value: -tags: [] diff --git a/resources/cinder_api_puppet/1.0.0/README.md b/resources/cinder_api_puppet/1.0.0/README.md deleted file mode 100644 index 177e119a..00000000 --- a/resources/cinder_api_puppet/1.0.0/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# Cinder API resource for puppet handler - -Setup and configure the cinder API endpoint - -## Parameters - -source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/api.pp - - ``keystone_password`` - The password to use for authentication (keystone) - - ``keystone_enabled`` - (optional) Use keystone for authentification - Defaults to true - - ``keystone_tenant`` - (optional) The tenant of the auth user - Defaults to services - - ``keystone_user`` - (optional) The name of the auth user - Defaults to cinder - - ``keystone_auth_host`` - (optional) The keystone host - Defaults to localhost - - ``keystone_auth_port`` - (optional) The keystone auth port - Defaults to 35357 - - ``keystone_auth_protocol`` - (optional) The protocol used to access the auth host - Defaults to http. - - ``os_region_name`` - (optional) Some operations require cinder to make API requests - to Nova. This sets the keystone region to be used for these - requests. For example, boot-from-volume. - Defaults to undef. - - ``keystone_auth_admin_prefix`` - (optional) The admin_prefix used to admin endpoint of the auth host - This allow admin auth URIs like http://auth_host:35357/keystone. - (where '/keystone' is the admin prefix) - Defaults to false for empty. If defined, should be a string with a - leading '/' and no trailing '/'. - - ``service_port`` - (optional) The cinder api port - Defaults to 5000 - - ``service_workers`` - (optional) Number of cinder-api workers - Defaults to $::processorcount - - ``package_ensure`` - (optional) The state of the package - Defaults to present - - ``bind_host`` - (optional) The cinder api bind address - Defaults to 0.0.0.0 - - ``ratelimits`` - (optional) The state of the service - Defaults to undef. If undefined the default ratelimiting values are used. - - ``ratelimits_factory`` - (optional) Factory to use for ratelimiting - Defaults to 'cinder.api.v1.limits:RateLimitingMiddleware.factory' - - ``default_volume_type`` - (optional) default volume type to use. - This should contain the name of the default volume type to use. - If not configured, it produces an error when creating a volume - without specifying a type. - Defaults to 'false'. - - ``validate`` - (optional) Whether to validate the service is working after any service refreshes - Defaults to false - - ``validation_options`` - (optional) Service validation options - Should be a hash of options defined in openstacklib::service_validation - If empty, defaults values are taken from openstacklib function. - Default command list volumes. - Require validate set at True. - Example: - glance::api::validation_options: - glance-api: - command: check_cinder-api.py - path: /usr/bin:/bin:/usr/sbin:/sbin - provider: shell - tries: 5 - try_sleep: 10 - Defaults to {} diff --git a/resources/cinder_api_puppet/1.0.0/actions/remove.pp b/resources/cinder_api_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index f533cb1a..00000000 --- a/resources/cinder_api_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,12 +0,0 @@ -class {'cinder::api': - enabled => false, - package_ensure => 'absent', - keystone_password => 'not important as removed', -} - -include cinder::params - -package { 'cinder': - ensure => 'absent', - name => $::cinder::params::package_name, -} \ No newline at end of file diff --git a/resources/cinder_api_puppet/1.0.0/actions/run.pp b/resources/cinder_api_puppet/1.0.0/actions/run.pp deleted file mode 100644 index f7d18786..00000000 --- a/resources/cinder_api_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,52 +0,0 @@ -$resource = hiera($::resource_name) - -$keystone_password = $resource['input']['keystone_password'] -$keystone_enabled = $resource['input']['keystone_enabled'] -$keystone_tenant = $resource['input']['keystone_tenant'] -$keystone_user = $resource['input']['keystone_user'] -$keystone_auth_host = $resource['input']['keystone_auth_host'] -$keystone_auth_port = $resource['input']['keystone_auth_port'] -$keystone_auth_protocol = $resource['input']['keystone_auth_protocol'] -$keystone_auth_admin_prefix = $resource['input']['keystone_auth_admin_prefix'] -$keystone_auth_uri = $resource['input']['keystone_auth_uri'] -$os_region_name = $resource['input']['os_region_name'] -$service_port = $resource['input']['service_port'] -$service_workers = $resource['input']['service_workers'] -$package_ensure = $resource['input']['package_ensure'] -$bind_host = $resource['input']['bind_host'] -$ratelimits = $resource['input']['ratelimits'] -$default_volume_type = $resource['input']['default_volume_type'] -$ratelimits_factory = $resource['input']['ratelimits_factory'] -$validate = $resource['input']['validate'] -$validation_options = $resource['input']['validation_options'] - -include cinder::params - -package { 'cinder': - ensure => $package_ensure, - name => $::cinder::params::package_name, -} -> - -class {'cinder::api': - keystone_password => $keystone_password, - keystone_enabled => $keystone_enabled, - keystone_tenant => $keystone_tenant, - keystone_user => $keystone_user, - keystone_auth_host => $keystone_auth_host, - keystone_auth_port => $keystone_auth_port, - keystone_auth_protocol => $keystone_auth_protocol, - keystone_auth_admin_prefix => $keystone_auth_admin_prefix, - keystone_auth_uri => $keystone_auth_uri, - os_region_name => $os_region_name, - service_port => $service_port, - service_workers => $service_workers, - package_ensure => $package_ensure, - bind_host => $bind_host, - enabled => true, - manage_service => true, - ratelimits => $ratelimits, - default_volume_type => $default_volume_type, - ratelimits_factory => $ratelimits_factory, - validate => $validate, - validation_options => $validation_options, -} diff --git a/resources/cinder_api_puppet/1.0.0/actions/update.pp b/resources/cinder_api_puppet/1.0.0/actions/update.pp deleted file mode 100644 index 3486c9a2..00000000 --- a/resources/cinder_api_puppet/1.0.0/actions/update.pp +++ /dev/null @@ -1,56 +0,0 @@ -$resource = hiera($::resource_name) - -$keystone_password = $resource['input']['keystone_password'] -$keystone_enabled = $resource['input']['keystone_enabled'] -$keystone_tenant = $resource['input']['keystone_tenant'] -$keystone_user = $resource['input']['keystone_user'] -$keystone_auth_host = $resource['input']['keystone_auth_host'] -$keystone_auth_port = $resource['input']['keystone_auth_port'] -$keystone_auth_protocol = $resource['input']['keystone_auth_protocol'] -$keystone_auth_admin_prefix = $resource['input']['keystone_auth_admin_prefix'] -$keystone_auth_uri = $resource['input']['keystone_auth_uri'] -$os_region_name = $resource['input']['os_region_name'] -$service_port = $resource['input']['service_port'] -$service_workers = $resource['input']['service_workers'] -$package_ensure = $resource['input']['package_ensure'] -$bind_host = $resource['input']['bind_host'] -$ratelimits = $resource['input']['ratelimits'] -$default_volume_type = $resource['input']['default_volume_type'] -$ratelimits_factory = $resource['input']['ratelimits_factory'] -$validate = $resource['input']['validate'] -$validation_options = $resource['input']['validation_options'] - -include cinder::params - -package { 'cinder': - ensure => $package_ensure, - name => $::cinder::params::package_name, -} -> - -class {'cinder::api': - keystone_password => $keystone_password, - keystone_enabled => $keystone_enabled, - keystone_tenant => $keystone_tenant, - keystone_user => $keystone_user, - keystone_auth_host => $keystone_auth_host, - keystone_auth_port => $keystone_auth_port, - keystone_auth_protocol => $keystone_auth_protocol, - keystone_auth_admin_prefix => $keystone_auth_admin_prefix, - keystone_auth_uri => $keystone_auth_uri, - os_region_name => $os_region_name, - service_port => $service_port, - service_workers => $service_workers, - package_ensure => $package_ensure, - bind_host => $bind_host, - enabled => true, - manage_service => true, - ratelimits => $ratelimits, - default_volume_type => $default_volume_type, - ratelimits_factory => $ratelimits_factory, - validate => $validate, - validation_options => $validation_options, -} - -notify { "restart cinder api": - notify => Service["cinder-api"], -} diff --git a/resources/cinder_api_puppet/1.0.0/meta.yaml b/resources/cinder_api_puppet/1.0.0/meta.yaml deleted file mode 100644 index b6096059..00000000 --- a/resources/cinder_api_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,76 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - keystone_password: - schema: str! - value: 'keystone' - keystone_enabled: - schema: bool - value: true - keystone_tenant: - schema: str - value: 'services' - keystone_user: - schema: str - value: 'cinder' - keystone_auth_host: - schema: str - value: 'localhost' - keystone_auth_port: - schema: int - value: 35357 - keystone_auth_protocol: - schema: str - value: 'http' - keystone_auth_admin_prefix: - schema: bool - value: false - keystone_auth_uri: - schema: bool - value: false - os_region_name: - schema: str - value: - service_port: - schema: int - value: 5000 - service_workers: - schema: int - value: 1 - package_ensure: - schema: str - value: 'present' - bind_host: - schema: str - value: '0.0.0.0' - ratelimits: - schema: str - value: - default_volume_type: - schema: bool - value: false - ratelimits_factory: - schema: str - value: 'cinder.api.v1.limits:RateLimitingMiddleware.factory' - validate: - schema: bool - value: false - validation_options: - schema: {} - value: {} - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/cinder_api_service, resources/cinder_api, resources/cinder] diff --git a/resources/cinder_api_puppet/1.0.0/test.py b/resources/cinder_api_puppet/1.0.0/test.py deleted file mode 100644 index 3dcbda7d..00000000 --- a/resources/cinder_api_puppet/1.0.0/test.py +++ /dev/null @@ -1,10 +0,0 @@ -import requests - -from solar.core.log import log - - -def test(resource): - log.debug('Testing cinder_api_puppet') - requests.get( - 'http://%s:%s' % (resource.args['ip'], resource.args['service_port']) - ) diff --git a/resources/cinder_glance_puppet/1.0.0/README.md b/resources/cinder_glance_puppet/1.0.0/README.md deleted file mode 100644 index 96da42a1..00000000 --- a/resources/cinder_glance_puppet/1.0.0/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Cinder Volume resource for puppet handler - -Glance drive Cinder as a block storage backend to store image data. - -# Parameters - -source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/glance.pp - - ``glance_api_servers`` - (optional) A list of the glance api servers available to cinder. - Should be an array with [hostname|ip]:port - Defaults to undef - Note: for this resource, it is decomposed to *_host and *_port due to - existing implementation limitations - - ``glance_api_version`` - (optional) Glance API version. - Should be 1 or 2 - Defaults to 2 (current version) - - ``glance_num_retries`` - (optional) Number retries when downloading an image from glance. - Defaults to 0 - - ``glance_api_insecure`` - (optional) Allow to perform insecure SSL (https) requests to glance. - Defaults to false - - ``glance_api_ssl_compression`` - (optional) Whether to attempt to negotiate SSL layer compression when - using SSL (https) requests. Set to False to disable SSL - layer compression. In some cases disabling this may improve - data throughput, eg when high network bandwidth is available - and you are using already compressed image formats such as qcow2. - Defaults to false - - ``glance_request_timeout`` - (optional) http/https timeout value for glance operations. - Defaults to undef \ No newline at end of file diff --git a/resources/cinder_glance_puppet/1.0.0/actions/remove.pp b/resources/cinder_glance_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index b98f9cd4..00000000 --- a/resources/cinder_glance_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1 +0,0 @@ -notify { 'Nothing to remove here': } diff --git a/resources/cinder_glance_puppet/1.0.0/actions/run.pp b/resources/cinder_glance_puppet/1.0.0/actions/run.pp deleted file mode 100644 index fdbe9618..00000000 --- a/resources/cinder_glance_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,18 +0,0 @@ -$resource = hiera($::resource_name) - -$glance_api_version = $resource['input']['glance_api_version'] -$glance_num_retries = $resource['input']['glance_num_retries'] -$glance_api_insecure = $resource['input']['glance_api_insecure'] -$glance_api_ssl_compression = $resource['input']['glance_api_ssl_compression'] -$glance_request_timeout = $resource['input']['glance_request_timeout'] -$glance_api_servers_host = $resource['input']['glance_api_servers_host'] -$glance_api_servers_port = $resource['input']['glance_api_servers_port'] - -class {'cinder::glance': - glance_api_servers => "${glance_api_servers_host}:${glance_api_servers_port}", - glance_api_version => $glance_api_version, - glance_num_retries => $glance_num_retries, - glance_api_insecure => $glance_api_insecure, - glance_api_ssl_compression => $glance_api_ssl_compression, - glance_request_timeout => $glance_request_timeout, -} diff --git a/resources/cinder_glance_puppet/1.0.0/meta.yaml b/resources/cinder_glance_puppet/1.0.0/meta.yaml deleted file mode 100644 index c6a30871..00000000 --- a/resources/cinder_glance_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,41 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - glance_api_version: - schema: int - value: 2 - glance_num_retries: - schema: int - value: 0 - glance_api_insecure: - schema: bool - value: false - glance_api_ssl_compression: - schema: bool - value: false - glance_request_timeout: - schema: str - value: - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - - glance_api_servers_port: - schema: int - value: 9292 - glance_api_servers_host: - schema: 'str' - value: 'localhost' - -tags: [resource/cinder_glance_service, resources/cinder_glance, resources/cinder] diff --git a/resources/cinder_puppet/1.0.0/README.md b/resources/cinder_puppet/1.0.0/README.md deleted file mode 100644 index 52021836..00000000 --- a/resources/cinder_puppet/1.0.0/README.md +++ /dev/null @@ -1,112 +0,0 @@ -# Cinder resource for puppet handler - -Controls a live cycle of the cinder entities, -like the main puppet class, auth, DB, AMQP, packages, -keystone user, role and endpoint. - -# Parameters - -source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/init.pp - - ``database_connection`` - Url used to connect to database. - (Optional) Defaults to - 'sqlite:////var/lib/cinder/cinder.sqlite' - - ``database_idle_timeout`` - Timeout when db connections should be reaped. - (Optional) Defaults to 3600. - - ``database_min_pool_size`` - Minimum number of SQL connections to keep open in a pool. - (Optional) Defaults to 1. - - ``database_max_pool_size`` - Maximum number of SQL connections to keep open in a pool. - (Optional) Defaults to undef. - - ``database_max_retries`` - Maximum db connection retries during startup. - Setting -1 implies an infinite retry count. - (Optional) Defaults to 10. - - ``database_retry_interval`` - Interval between retries of opening a sql connection. - (Optional) Defaults to 10. - - ``database_max_overflow`` - If set, use this value for max_overflow with sqlalchemy. - (Optional) Defaults to undef. - - ``rabbit_use_ssl`` - (optional) Connect over SSL for RabbitMQ - Defaults to false - - ``kombu_ssl_ca_certs`` - (optional) SSL certification authority file (valid only if SSL enabled). - Defaults to undef - - ``kombu_ssl_certfile`` - (optional) SSL cert file (valid only if SSL enabled). - Defaults to undef - - ``kombu_ssl_keyfile`` - (optional) SSL key file (valid only if SSL enabled). - Defaults to undef - - ``kombu_ssl_version`` - (optional) SSL version to use (valid only if SSL enabled). - Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be - available on some distributions. - Defaults to 'TLSv1' - - ``amqp_durable_queues`` - Use durable queues in amqp. - (Optional) Defaults to false. - - ``use_syslog`` - Use syslog for logging. - (Optional) Defaults to false. - - ``log_facility`` - Syslog facility to receive log lines. - (Optional) Defaults to LOG_USER. - - ``log_dir`` - (optional) Directory where logs should be stored. - If set to boolean false, it will not log to any directory. - Defaults to '/var/log/cinder' - - ``use_ssl`` - (optional) Enable SSL on the API server - Defaults to false, not set - - ``cert_file`` - (optinal) Certificate file to use when starting API server securely - Defaults to false, not set - - ``key_file`` - (optional) Private key file to use when starting API server securely - Defaults to false, not set - - ``ca_file`` - (optional) CA certificate file to use to verify connecting clients - Defaults to false, not set_ - - ``mysql_module`` - (optional) Deprecated. Does nothing. - - ``storage_availability_zone`` - (optional) Availability zone of the node. - Defaults to 'nova' - - ``default_availability_zone`` - (optional) Default availability zone for new volumes. - If not set, the storage_availability_zone option value is used as - the default for new volumes. - Defaults to false - - ``sql_connection`` - DEPRECATED - ``sql_idle_timeout`` - DEPRECATED diff --git a/resources/cinder_puppet/1.0.0/actions/remove.pp b/resources/cinder_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 21f5561d..00000000 --- a/resources/cinder_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,4 +0,0 @@ -class {'cinder': - package_ensure => 'absent', - rabbit_password => 'not important as removed', -} diff --git a/resources/cinder_puppet/1.0.0/actions/run.pp b/resources/cinder_puppet/1.0.0/actions/run.pp deleted file mode 100644 index cb456062..00000000 --- a/resources/cinder_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,116 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] - -$db_user = $resource['input']['db_user'] -$db_password = $resource['input']['db_password'] -$db_name = $resource['input']['db_name'] -$db_host = $resource['input']['db_host'] -$db_port = $resource['input']['db_port'] - -$database_connection = $resource['input']['database_connection'] -$database_idle_timeout = $resource['input']['database_idle_timeout'] -$database_min_pool_size = $resource['input']['database_min_pool_size'] -$database_max_pool_size = $resource['input']['database_max_pool_size'] -$database_max_retries = $resource['input']['database_max_retries'] -$database_retry_interval = $resource['input']['database_retry_interval'] -$database_max_overflow = $resource['input']['database_max_overflow'] -$rpc_backend = $resource['input']['rpc_backend'] -$control_exchange = $resource['input']['control_exchange'] -$rabbit_host = $resource['input']['rabbit_host'] -$rabbit_port = $resource['input']['rabbit_port'] -$rabbit_hosts = $resource['input']['rabbit_hosts'] -$rabbit_virtual_host = $resource['input']['rabbit_virtual_host'] -$rabbit_userid = $resource['input']['rabbit_userid'] -$rabbit_password = $resource['input']['rabbit_password'] -$rabbit_use_ssl = $resource['input']['rabbit_use_ssl'] -$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs'] -$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile'] -$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile'] -$kombu_ssl_version = $resource['input']['kombu_ssl_version'] -$amqp_durable_queues = $resource['input']['amqp_durable_queues'] -$qpid_hostname = $resource['input']['qpid_hostname'] -$qpid_port = $resource['input']['qpid_port'] -$qpid_username = $resource['input']['qpid_username'] -$qpid_password = $resource['input']['qpid_password'] -$qpid_sasl_mechanisms = $resource['input']['qpid_sasl_mechanisms'] -$qpid_reconnect = $resource['input']['qpid_reconnect'] -$qpid_reconnect_timeout = $resource['input']['qpid_reconnect_timeout'] -$qpid_reconnect_limit = $resource['input']['qpid_reconnect_limit'] -$qpid_reconnect_interval_min = $resource['input']['qpid_reconnect_interval_min'] -$qpid_reconnect_interval_max = $resource['input']['qpid_reconnect_interval_max'] -$qpid_reconnect_interval = $resource['input']['qpid_reconnect_interval'] -$qpid_heartbeat = $resource['input']['qpid_heartbeat'] -$qpid_protocol = $resource['input']['qpid_protocol'] -$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay'] -$package_ensure = $resource['input']['package_ensure'] -$use_ssl = $resource['input']['use_ssl'] -$ca_file = $resource['input']['ca_file'] -$cert_file = $resource['input']['cert_file'] -$key_file = $resource['input']['key_file'] -$api_paste_config = $resource['input']['api_paste_config'] -$use_syslog = $resource['input']['use_syslog'] -$log_facility = $resource['input']['log_facility'] -$log_dir = $resource['input']['log_dir'] -$verbose = $resource['input']['verbose'] -$debug = $resource['input']['debug'] -$storage_availability_zone = $resource['input']['storage_availability_zone'] -$default_availability_zone = $resource['input']['default_availability_zone'] -$mysql_module = $resource['input']['mysql_module'] -# Do not apply the legacy stuff -#$sql_connection = $resource['input']['sql_connection'] -$sql_idle_timeout = $resource['input']['sql_idle_timeout'] - -class {'cinder': - database_connection => "mysql://${db_user}:${db_password}@${db_host}:${db_port}/${db_name}", - database_idle_timeout => $database_idle_timeout, - database_min_pool_size => $database_min_pool_size, - database_max_pool_size => $database_max_pool_size, - database_max_retries => $database_max_retries, - database_retry_interval => $database_retry_interval, - database_max_overflow => $database_max_overflow, - rpc_backend => $rpc_backend, - control_exchange => $control_exchange, - rabbit_host => $rabbit_host, - rabbit_port => $rabbit_port, - rabbit_hosts => $rabbit_hosts, - rabbit_virtual_host => $rabbit_virtual_host, - rabbit_userid => $rabbit_userid, - rabbit_password => $rabbit_password, - rabbit_use_ssl => $rabbit_use_ssl, - kombu_ssl_ca_certs => $kombu_ssl_ca_certs, - kombu_ssl_certfile => $kombu_ssl_certfile, - kombu_ssl_keyfile => $kombu_ssl_keyfile, - kombu_ssl_version => $kombu_ssl_version, - amqp_durable_queues => $amqp_durable_queues, - qpid_hostname => $qpid_hostname, - qpid_port => $qpid_port, - qpid_username => $qpid_username, - qpid_password => $qpid_password, - qpid_sasl_mechanisms => $qpid_sasl_mechanisms, - qpid_reconnect => $qpid_reconnect, - qpid_reconnect_timeout => $qpid_reconnect_timeout, - qpid_reconnect_limit => $qpid_reconnect_limit, - qpid_reconnect_interval_min => $qpid_reconnect_interval_min, - qpid_reconnect_interval_max => $qpid_reconnect_interval_max, - qpid_reconnect_interval => $qpid_reconnect_interval, - qpid_heartbeat => $qpid_heartbeat, - qpid_protocol => $qpid_protocol, - qpid_tcp_nodelay => $qpid_tcp_nodelay, - package_ensure => $package_ensure, - use_ssl => $use_ssl, - ca_file => $ca_file, - cert_file => $cert_file, - key_file => $key_file, - api_paste_config => $api_paste_config, - use_syslog => $use_syslog, - log_facility => $log_facility, - log_dir => $log_dir, - verbose => $verbose, - debug => $debug, - storage_availability_zone => $storage_availability_zone, - default_availability_zone => $default_availability_zone, - mysql_module => $mysql_module, - sql_connection => $sql_connection, - sql_idle_timeout => $sql_idle_timeout, -} diff --git a/resources/cinder_puppet/1.0.0/meta.yaml b/resources/cinder_puppet/1.0.0/meta.yaml deleted file mode 100644 index dc19d3a6..00000000 --- a/resources/cinder_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,215 +0,0 @@ -handler: puppet -actions: - run: run.pp - update: run.pp -version: 1.0.0 -input: - database_connection: - schema: str - value: 'sqlite:////var/lib/cinder/cinder.sqlite' - database_idle_timeout: - schema: int - value: 3600 - database_min_pool_size: - schema: int - value: 1 - database_max_pool_size: - schema: str - value: - database_max_retries: - schema: int - value: 10 - database_retry_interval: - schema: int - value: 10 - database_max_overflow: - schema: str - value: - rpc_backend: - schema: str - value: 'cinder.openstack.common.rpc.impl_kombu' - control_exchange: - schema: str - value: 'openstack' - rabbit_host: - schema: str - value: '127.0.0.1' - rabbit_port: - schema: int - value: 5672 - rabbit_hosts: - schema: bool - value: false - rabbit_virtual_host: - schema: str - value: '/' - rabbit_userid: - schema: str - value: 'guest' - rabbit_password: - schema: str! - value: 'rabbit' - rabbit_use_ssl: - schema: bool - value: false - kombu_ssl_ca_certs: - schema: str - value: - kombu_ssl_certfile: - schema: str - value: - kombu_ssl_keyfile: - schema: str - value: - kombu_ssl_version: - schema: str - value: 'TLSv1' - amqp_durable_queues: - schema: bool - value: false - qpid_hostname: - schema: str - value: 'localhost' - qpid_port: - schema: int - value: 5672 - qpid_username: - schema: str - value: 'guest' - qpid_password: - schema: str! - value: 'qpid' - qpid_sasl_mechanisms: - schema: bool - value: false - qpid_reconnect: - schema: bool - value: true - qpid_reconnect_timeout: - schema: int - value: 0 - qpid_reconnect_limit: - schema: int - value: 0 - qpid_reconnect_interval_min: - schema: int - value: 0 - qpid_reconnect_interval_max: - schema: int - value: 0 - qpid_reconnect_interval: - schema: int - value: 0 - qpid_heartbeat: - schema: int - value: 60 - qpid_protocol: - schema: str - value: 'tcp' - qpid_tcp_nodelay: - schema: bool - value: true - package_ensure: - schema: str - value: 'present' - use_ssl: - schema: bool - value: false - ca_file: - schema: bool - value: false - cert_file: - schema: bool - value: false - key_file: - schema: bool - value: false - api_paste_config: - schema: str - value: '/etc/cinder/api-paste.ini' - use_syslog: - schema: bool - value: false - log_facility: - schema: str - value: 'LOG_USER' - log_dir: - schema: str - value: '/var/log/cinder' - verbose: - schema: bool - value: false - debug: - schema: bool - value: false - storage_availability_zone: - schema: str - value: 'nova' - default_availability_zone: - schema: bool - value: false - mysql_module: - schema: str - value: - sql_connection: - schema: str - value: - sql_idle_timeout: - schema: str - value: - - db_user: - schema: str! - value: cinder - db_password: - schema: str! - value: cinder - db_name: - schema: str! - value: cinder - db_host: - schema: str! - value: - db_port: - schema: int! - value: - - port: - schema: int! - value: 8776 - - module: - schema: {name: str!, type: str, url: str, ref: str} - value: {name: 'cinder', type: 'git', url: 'https://github.com/openstack/puppet-cinder', ref: '5.1.0'} - - keystone_host: - schema: str! - value: - keystone_port: - schema: int! - value: - keystone_user: - schema: str! - value: - keystone_password: - schema: str! - value: - keystone_tenant: - schema: str! - value: - -# forge: -# schema: str! -# value: 'stackforge-cinder' - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/cinder_service, resources/cinder] diff --git a/resources/cinder_puppet/1.0.0/test.py b/resources/cinder_puppet/1.0.0/test.py deleted file mode 100644 index 46a25da9..00000000 --- a/resources/cinder_puppet/1.0.0/test.py +++ /dev/null @@ -1,10 +0,0 @@ -import requests - -from solar.core.log import log - - -def test(resource): - log.debug('Testing cinder_puppet') - requests.get( - 'http://%s:%s' % (resource.args['ip'], resource.args['port']) - ) diff --git a/resources/cinder_scheduler_puppet/1.0.0/README.md b/resources/cinder_scheduler_puppet/1.0.0/README.md deleted file mode 100644 index fc64071b..00000000 --- a/resources/cinder_scheduler_puppet/1.0.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Cinder Scheduler resource for puppet handler - -Setup and configure the cinder scheduler service diff --git a/resources/cinder_scheduler_puppet/1.0.0/actions/remove.pp b/resources/cinder_scheduler_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index d050b28b..00000000 --- a/resources/cinder_scheduler_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,4 +0,0 @@ -class {'cinder::scheduler': - enabled => false, - package_ensure => 'absent', -} diff --git a/resources/cinder_scheduler_puppet/1.0.0/actions/run.pp b/resources/cinder_scheduler_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 8a354de1..00000000 --- a/resources/cinder_scheduler_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,18 +0,0 @@ -$resource = hiera($::resource_name) - -$scheduler_driver = $resource['input']['scheduler_driver'] -$package_ensure = $resource['input']['package_ensure'] - -include cinder::params - -package { 'cinder': - ensure => $package_ensure, - name => $::cinder::params::package_name, -} -> - -class {'cinder::scheduler': - scheduler_driver => $scheduler_driver, - package_ensure => $package_ensure, - enabled => true, - manage_service => true, -} diff --git a/resources/cinder_scheduler_puppet/1.0.0/actions/update.pp b/resources/cinder_scheduler_puppet/1.0.0/actions/update.pp deleted file mode 100644 index 7b67bfd3..00000000 --- a/resources/cinder_scheduler_puppet/1.0.0/actions/update.pp +++ /dev/null @@ -1,22 +0,0 @@ -$resource = hiera($::resource_name) - -$scheduler_driver = $resource['input']['scheduler_driver'] -$package_ensure = $resource['input']['package_ensure'] - -include cinder::params - -package { 'cinder': - ensure => $package_ensure, - name => $::cinder::params::package_name, -} -> - -class {'cinder::scheduler': - scheduler_driver => $scheduler_driver, - package_ensure => $package_ensure, - enabled => true, - manage_service => true, -} - -notify { "restart cinder volume": - notify => Service["cinder-scheduler"], -} diff --git a/resources/cinder_scheduler_puppet/1.0.0/meta.yaml b/resources/cinder_scheduler_puppet/1.0.0/meta.yaml deleted file mode 100644 index 88c3e65c..00000000 --- a/resources/cinder_scheduler_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - scheduler_driver: - schema: str - value: - package_ensure: - schema: str - value: 'present' - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/cinder_scheduler_service, resources/cinder_scheduler, resources/cinder] diff --git a/resources/cinder_scheduler_puppet/1.0.0/test.py b/resources/cinder_scheduler_puppet/1.0.0/test.py deleted file mode 100644 index af73ec96..00000000 --- a/resources/cinder_scheduler_puppet/1.0.0/test.py +++ /dev/null @@ -1,12 +0,0 @@ -import requests - -from solar.core.log import log - - -def test(resource): - log.debug('Testing cinder_scheduler_puppet') -# requests.get( -# 'http://%s:%s' % (resource.args['ip'], resource.args['port']) -# TODO(bogdando) figure out how to test this -# http://docs.openstack.org/developer/nova/devref/scheduler.html -# ) diff --git a/resources/cinder_volume_puppet/1.0.0/README.md b/resources/cinder_volume_puppet/1.0.0/README.md deleted file mode 100644 index e597c2bf..00000000 --- a/resources/cinder_volume_puppet/1.0.0/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# Cinder Volume resource for puppet handler - -Setup and configure the cinder volume service. -Configure ISCSI volume backend for cinder volume nodes, if specified -(Default true) - -source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/volume/iscsi.pp -source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/volume.pp \ No newline at end of file diff --git a/resources/cinder_volume_puppet/1.0.0/actions/remove.pp b/resources/cinder_volume_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 605db1fd..00000000 --- a/resources/cinder_volume_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,4 +0,0 @@ -class {'cinder::volume': - enabled => false, - package_ensure => 'absent', -} diff --git a/resources/cinder_volume_puppet/1.0.0/actions/run.pp b/resources/cinder_volume_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 58a7ea4e..00000000 --- a/resources/cinder_volume_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,31 +0,0 @@ -$resource = hiera($::resource_name) - -$package_ensure = $resource['input']['package_ensure'] -$use_iscsi_backend = $resource['input']['use_iscsi_backend'] - -$iscsi_ip_address = $resource['input']['iscsi_ip_address'] -$volume_driver = $resource['input']['volume_driver'] -$volume_group = $resource['input']['volume_group'] -$iscsi_helper = $resource['input']['iscsi_helper'] - -include cinder::params - -package { 'cinder': - ensure => $package_ensure, - name => $::cinder::params::package_name, -} -> - -class {'cinder::volume': - package_ensure => $package_ensure, - enabled => true, - manage_service => true, -} - -if $use_iscsi_backend { - class {'cinder::volume::iscsi': - iscsi_ip_address => $iscsi_ip_address, - volume_driver => $volume_driver, - volume_group => $volume_group, - iscsi_helper => $iscsi_helper, - } -} \ No newline at end of file diff --git a/resources/cinder_volume_puppet/1.0.0/actions/update.pp b/resources/cinder_volume_puppet/1.0.0/actions/update.pp deleted file mode 100644 index b8f23629..00000000 --- a/resources/cinder_volume_puppet/1.0.0/actions/update.pp +++ /dev/null @@ -1,26 +0,0 @@ -$resource = hiera($::resource_name) - -$package_ensure = $resource['input']['package_ensure'] -$use_iscsi_backend = $resource['input']['use_iscsi_backend'] - -$iscsi_ip_address = $resource['input']['iscsi_ip_address'] -$volume_driver = $resource['input']['volume_driver'] -$volume_group = $resource['input']['volume_group'] -$iscsi_helper = $resource['input']['iscsi_helper'] - -include cinder::params - -package { 'cinder': - ensure => $package_ensure, - name => $::cinder::params::package_name, -} -> - -class {'cinder::volume': - package_ensure => $package_ensure, - enabled => true, - manage_service => true, -} - -notify { "restart cinder volume": - notify => Service["cinder-volume"], -} diff --git a/resources/cinder_volume_puppet/1.0.0/meta.yaml b/resources/cinder_volume_puppet/1.0.0/meta.yaml deleted file mode 100644 index 1ef6657c..00000000 --- a/resources/cinder_volume_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,38 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - package_ensure: - schema: str - value: 'present' - iscsi_ip_address: - schema: str - value: '127.0.0.1' - volume_driver: - schema: str - value: 'cinder.volume.drivers.lvm.LVMISCSIDriver' - volume_group: - schema: str - value: 'cinder-volumes' - iscsi_helper: - schema: str - value: 'tgtadm' - - use_iscsi_backend: - schema: bool - value: true - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/cinder_volume_service, resources/cinder_volume, resources/cinder] diff --git a/resources/cinder_volume_puppet/1.0.0/test.py b/resources/cinder_volume_puppet/1.0.0/test.py deleted file mode 100644 index 8c8263de..00000000 --- a/resources/cinder_volume_puppet/1.0.0/test.py +++ /dev/null @@ -1,12 +0,0 @@ -import requests - -from solar.core.log import log - - -def test(resource): - log.debug('Testing cinder_volume_puppet') -# requests.get( -# 'http://%s:%s' % (resource.args['ip'], resource.args['port']) -# TODO(bogdando) figure out how to test this -# http://docs.openstack.org/developer/nova/devref/volume.html -# ) diff --git a/resources/container_networks/1.0.0/actions/run.yaml b/resources/container_networks/1.0.0/actions/run.yaml deleted file mode 100644 index a7ef8042..00000000 --- a/resources/container_networks/1.0.0/actions/run.yaml +++ /dev/null @@ -1,22 +0,0 @@ -- hosts: '*' - sudo: yes - gather_facts: false - # this is default variables, they will be overwritten by resource one - vars: - networks: - mgmt: - address: 172.18.10.6 - bridge: br-test0 - bridge_address: 172.18.10.252/24 - interface: eth1 - netmask: 255.255.255.0 - type: veth - tasks: - - shell: ip l add {{item.value.bridge}} type bridge - with_dict: networks - ignore_errors: true - - shell: ip l set {{item.value.bridge}} up - with_dict: networks - - shell: ip a add dev {{item.value.bridge}} {{item.value.bridge_address}} - with_dict: networks - ignore_errors: true diff --git a/resources/container_networks/1.0.0/meta.yaml b/resources/container_networks/1.0.0/meta.yaml deleted file mode 100644 index 73f110ca..00000000 --- a/resources/container_networks/1.0.0/meta.yaml +++ /dev/null @@ -1,16 +0,0 @@ -handler: ansible_playbook -version: 1.0.0 -actions: -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - networks: - schema: {} - value: diff --git a/resources/data_container/1.0.0/actions/echo.yaml b/resources/data_container/1.0.0/actions/echo.yaml deleted file mode 100644 index affdbef0..00000000 --- a/resources/data_container/1.0.0/actions/echo.yaml +++ /dev/null @@ -1,5 +0,0 @@ - -- hosts: [{{host}}] - sudo: yes - tasks: - - shell: echo `/sbin/ifconfig` diff --git a/resources/data_container/1.0.0/actions/remove.yaml b/resources/data_container/1.0.0/actions/remove.yaml deleted file mode 100644 index 50b041f7..00000000 --- a/resources/data_container/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,6 +0,0 @@ - -- hosts: [{{host}}] - sudo: yes - tasks: - - shell: docker stop {{ resource_name }} - - shell: docker rm {{ resource_name }} diff --git a/resources/data_container/1.0.0/actions/run.yaml b/resources/data_container/1.0.0/actions/run.yaml deleted file mode 100644 index b61979e4..00000000 --- a/resources/data_container/1.0.0/actions/run.yaml +++ /dev/null @@ -1,24 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - docker: - name: {{ resource_name }} - image: {{ image }} - state: running - net: host - {% if ports.value %} - ports: - {% for port in ports.value %} - - {{ port['value'] }}:{{ port['value'] }} - {% endfor %} - {% endif %} - {% if host_binds.value %} - volumes: - # TODO: host_binds might need more work - # Currently it's not that trivial to pass custom src: dst here - # (when a config variable is passed here from other resource) - # so we mount it to the same directory as on host - {% for bind in host_binds.value %} - - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} - {% endfor %} - {% endif %} diff --git a/resources/data_container/1.0.0/meta.yaml b/resources/data_container/1.0.0/meta.yaml deleted file mode 100644 index 76d0200e..00000000 --- a/resources/data_container/1.0.0/meta.yaml +++ /dev/null @@ -1,12 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - ip: - type: str! - value: - image: - type: str! - value: - export_volumes: - type: str! - value: diff --git a/resources/dnsmasq/1.0.0/actions/exclude_mac_pxe.yaml b/resources/dnsmasq/1.0.0/actions/exclude_mac_pxe.yaml deleted file mode 100644 index 91b16416..00000000 --- a/resources/dnsmasq/1.0.0/actions/exclude_mac_pxe.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - - tasks: - - lineinfile: create=yes dest=/etc/dnsmasq.d/no_pxe_{{exclude_mac_pxe | replace(':', '_')}}.conf line="dhcp-host={{exclude_mac_pxe}},set:nopxe" - - shell: service dnsmasq restart diff --git a/resources/dnsmasq/1.0.0/actions/run.yaml b/resources/dnsmasq/1.0.0/actions/run.yaml deleted file mode 100644 index 9c29505e..00000000 --- a/resources/dnsmasq/1.0.0/actions/run.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- hosts: [{{host}}] - sudo: yes diff --git a/resources/dnsmasq/1.0.0/meta.yaml b/resources/dnsmasq/1.0.0/meta.yaml deleted file mode 100644 index d9060965..00000000 --- a/resources/dnsmasq/1.0.0/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -handler: ansible -version: 1.0.0 - -actions: - exclude_mac_pxe: exclude_mac_pxe.yaml - run: run.yaml - -input: - ip: - schema: str! - value: - - exclude_mac_pxe: - schema: str! - value: - -tags: [resources=dnsmasq] diff --git a/resources/docker/1.0.0/actions/run.yaml b/resources/docker/1.0.0/actions/run.yaml deleted file mode 100644 index 4d582f84..00000000 --- a/resources/docker/1.0.0/actions/run.yaml +++ /dev/null @@ -1,9 +0,0 @@ - -- hosts: [{{host}}] - sudo: yes - tasks: - - shell: docker --version - ignore_errors: true - register: docker_version - - shell: curl -sSL https://get.docker.com/ | sudo sh - when: docker_version|failed diff --git a/resources/docker/1.0.0/meta.yaml b/resources/docker/1.0.0/meta.yaml deleted file mode 100644 index aaedfe94..00000000 --- a/resources/docker/1.0.0/meta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -handler: ansible -version: 1.0.0 - -input: - ip: - schema: str! - value: - # ssh_user: - # schema: str! - # value: - # ssh_key: - # schema: str! - # value: - -tags: [resources/docker] diff --git a/resources/docker_container/1.0.0/actions/remove.yaml b/resources/docker_container/1.0.0/actions/remove.yaml deleted file mode 100644 index 50b041f7..00000000 --- a/resources/docker_container/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,6 +0,0 @@ - -- hosts: [{{host}}] - sudo: yes - tasks: - - shell: docker stop {{ resource_name }} - - shell: docker rm {{ resource_name }} diff --git a/resources/docker_container/1.0.0/actions/run.yaml b/resources/docker_container/1.0.0/actions/run.yaml deleted file mode 100644 index f0a6e963..00000000 --- a/resources/docker_container/1.0.0/actions/run.yaml +++ /dev/null @@ -1,37 +0,0 @@ - -- hosts: [{{host}}] - sudo: yes - tasks: - - docker: - name: {{ resource_name }} - image: {{ image }} - state: running - net: host - {% if ports %} - ports: - {% for port in ports %} - - {{ port }}:{{ port }} - {% endfor %} - expose: - {% for port in ports %} - - {{ port }} - {% endfor %} - {% endif %} - - {% if host_binds.value %} - volumes: - # TODO: host_binds might need more work - # Currently it's not that trivial to pass custom src: dst here - # (when a config variable is passed here from other resource) - # so we mount it to the same directory as on host - {% for bind in host_binds.value %} - - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} - {% endfor %} - {% endif %} - - {% if env %} - env: - {% for key, value in env.iteritems() %} - {{ key }}: {{ value }} - {% endfor %} - {% endif %} diff --git a/resources/docker_container/1.0.0/actions/update.yaml b/resources/docker_container/1.0.0/actions/update.yaml deleted file mode 100644 index 727a3057..00000000 --- a/resources/docker_container/1.0.0/actions/update.yaml +++ /dev/null @@ -1,37 +0,0 @@ - -- hosts: [{{host}}] - sudo: yes - tasks: - - docker: - name: {{ resource_name }} - image: {{ image }} - state: reloaded - net: host - {% if ports %} - ports: - {% for port in ports %} - - {{ port }}:{{ port }} - {% endfor %} - expose: - {% for port in ports %} - - {{ port }} - {% endfor %} - {% endif %} - - {% if host_binds.value %} - volumes: - # TODO: host_binds might need more work - # Currently it's not that trivial to pass custom src: dst here - # (when a config variable is passed here from other resource) - # so we mount it to the same directory as on host - {% for bind in host_binds.value %} - - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} - {% endfor %} - {% endif %} - - {% if env %} - env: - {% for key, value in env.iteritems() %} - {{ key }}: {{ value }} - {% endfor %} - {% endif %} diff --git a/resources/docker_container/1.0.0/meta.yaml b/resources/docker_container/1.0.0/meta.yaml deleted file mode 100644 index 8526f154..00000000 --- a/resources/docker_container/1.0.0/meta.yaml +++ /dev/null @@ -1,29 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - ip: - schema: str! - value: - image: - schema: str! - value: - ports: - schema: [int] - value: [] - host_binds: - schema: [{value: {src: str, dst: str, mode: str}}] - value: [] - volume_binds: - schema: [{src: str, dst: str, mode: str}] - value: [] - env: - schema: {} - value: {} - # ssh_user: - # schema: str! - # value: [] - # ssh_key: - # schema: str! - # value: [] - -tags: [resource/container] diff --git a/resources/ex_managed/1.0.0/managers/manager.py b/resources/ex_managed/1.0.0/managers/manager.py deleted file mode 100755 index 588c0e0b..00000000 --- a/resources/ex_managed/1.0.0/managers/manager.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env python - -import sys -import json - -data = json.loads(sys.stdin.read()) - -rst = {'val_x_val': int(data['val'])**2} - -sys.stdout.write(json.dumps(rst)) diff --git a/resources/ex_managed/1.0.0/meta.yaml b/resources/ex_managed/1.0.0/meta.yaml deleted file mode 100644 index c09f43af..00000000 --- a/resources/ex_managed/1.0.0/meta.yaml +++ /dev/null @@ -1,11 +0,0 @@ -handler: none -version: 1.0.0 -managers: - - managers/manager.py -input: - val: - schema: int! - value: 2 - val_x_val: - schema: int - value: diff --git a/resources/file/1.0.0/actions/remove.sh b/resources/file/1.0.0/actions/remove.sh deleted file mode 100644 index dc21c836..00000000 --- a/resources/file/1.0.0/actions/remove.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -rm {{ path }} diff --git a/resources/file/1.0.0/actions/run.sh b/resources/file/1.0.0/actions/run.sh deleted file mode 100644 index 461a550e..00000000 --- a/resources/file/1.0.0/actions/run.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -touch {{ path }} diff --git a/resources/file/1.0.0/meta.yaml b/resources/file/1.0.0/meta.yaml deleted file mode 100644 index 10843aa9..00000000 --- a/resources/file/1.0.0/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -handler: shell -version: 1.0.0 -input: - path: - schema: str! - value: /tmp/test_file diff --git a/resources/fuel_library/1.0.0/actions/run.sh b/resources/fuel_library/1.0.0/actions/run.sh deleted file mode 100644 index e16245b4..00000000 --- a/resources/fuel_library/1.0.0/actions/run.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -mkdir -p {{temp_directory}} - -pushd {{temp_directory}} -if [ ! -d fuel-library ] -then - git clone -b {{ git['branch'] }} {{ git['repository'] }} -else - pushd ./fuel-library - git pull - popd -fi -pushd ./fuel-library/deployment -./update_modules.sh -popd - -mkdir -p {{puppet_modules}} -cp -r ./fuel-library/deployment/puppet/* {{puppet_modules}} -popd diff --git a/resources/fuel_library/1.0.0/meta.yaml b/resources/fuel_library/1.0.0/meta.yaml deleted file mode 100644 index 7600b9f3..00000000 --- a/resources/fuel_library/1.0.0/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -handler: shell -version: 1.0.0 -input: - ip: - schema: str! - value: - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/stackforge/fuel-library', - branch: 'stable/7.0'} - temp_directory: - schema: str! - value: /tmp/solar - puppet_modules: - schema: str! - value: /etc/fuel/modules -tags: [] diff --git a/resources/glance_api_service/1.0.0/actions/remove.yaml b/resources/glance_api_service/1.0.0/actions/remove.yaml deleted file mode 100644 index 9b145ab0..00000000 --- a/resources/glance_api_service/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,8 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: glance api container - docker: - name: {{ resource_name }} - image: {{ image }} - state: absent diff --git a/resources/glance_api_service/1.0.0/actions/run.yaml b/resources/glance_api_service/1.0.0/actions/run.yaml deleted file mode 100644 index 7a5b8be4..00000000 --- a/resources/glance_api_service/1.0.0/actions/run.yaml +++ /dev/null @@ -1,27 +0,0 @@ - -- hosts: [{{host}}] - sudo: yes - tasks: - - docker: - command: /bin/bash -c "glance-manage db_sync && /usr/bin/glance-api" - #command: /usr/bin/glance-api - name: {{ resource_name }} - image: {{ image }} - state: running - expose: - - 9393 - ports: - - {{ ports.value[0]['value'][0]['value'] }}:9393 - {% if host_binds.value %} - volumes: - # TODO: host_binds might need more work - # Currently it's not that trivial to pass custom src: dst here - # (when a config variable is passed here from other resource) - # so we mount it to the same directory as on host - {% for bind in host_binds.value %} - - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} - {% endfor %} - {% endif %} - - - name: wait for glance api - wait_for: host={{ ip }} port={{ ports.value[0]['value']['value'] }} timeout=20 diff --git a/resources/glance_api_service/1.0.0/meta.yaml b/resources/glance_api_service/1.0.0/meta.yaml deleted file mode 100644 index de9ecb31..00000000 --- a/resources/glance_api_service/1.0.0/meta.yaml +++ /dev/null @@ -1,42 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - ip: - schema: str! - value: - image: - schema: str! - value: - ports: - schema: [{value: [{value: int}]}] - value: [] - host_binds: - schema: [{value: {src: str, dst: str, mode: str}}] - value: [] - volume_binds: - schema: [{src: str, dst: str, mode: str}] - value: [] - # ssh_user: - # schema: str! - # value: [] - # ssh_key: - # schema: str! - # value: [] - - db_password: - schema: str! - value: - keystone_admin_token: - schema: str! - value: - keystone_password: - schema: str! - value: - keystone_host: - schema: str! - value: - keystone_port: - schema: int! - value: - -tags: [resource/container] diff --git a/resources/glance_api_service/1.0.0/test.py b/resources/glance_api_service/1.0.0/test.py deleted file mode 100644 index e683b55a..00000000 --- a/resources/glance_api_service/1.0.0/test.py +++ /dev/null @@ -1,24 +0,0 @@ -import requests - -from solar.core.log import log -from solar.core import validation - - -def test(resource): - log.debug('Testing glance_service') - - args = resource.args - - token, _ = validation.validate_token( - keystone_host=args['keystone_host'], - keystone_port=args['keystone_port'], - user='glance_admin', - tenant='services', - password=args['keystone_password'], - ) - - images = requests.get( - 'http://%s:%s/v1/images' % (resource.args['ip'], 9393), - headers={'X-Auth-Token': token} - ) - assert images.json() == {'images': []} diff --git a/resources/glance_config/1.0.0/actions/remove.yaml b/resources/glance_config/1.0.0/actions/remove.yaml deleted file mode 100644 index 95cf18da..00000000 --- a/resources/glance_config/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,5 +0,0 @@ - -- hosts: [{{host}}] - sudo: yes - tasks: - - shell: echo 'removed' diff --git a/resources/glance_config/1.0.0/actions/run.yaml b/resources/glance_config/1.0.0/actions/run.yaml deleted file mode 100644 index bc265529..00000000 --- a/resources/glance_config/1.0.0/actions/run.yaml +++ /dev/null @@ -1,37 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - vars: - ip: {{ ip }} - api_port: {{ api_port }} - registry_port: {{ registry_port }} - keystone_ip: {{ keystone_ip }} - keystone_port: {{ keystone_port }} - keystone_admin_user: {{ keystone_admin_user }} - keystone_admin_password: {{ keystone_admin_password }} - keystone_admin_port: {{ keystone_admin_port }} - keystone_admin_tenant: {{ keystone_admin_tenant }} - mysql_ip: {{ mysql_ip }} - mysql_db: {{ mysql_db }} - mysql_user: {{ mysql_user }} - mysql_password: {{ mysql_password }} - config_dir: {src: {{ config_dir.value['src'] }}, dst: {{ config_dir.value['dst'] }}} - tasks: - - file: path={{ config_dir.value['src'] }}/ state=directory - - file: path={{ config_dir.value['src'] }}/glance-api.conf state=touch - - file: path={{ config_dir.value['src'] }}/glance-api-paste.ini state=touch - - file: path={{ config_dir.value['src'] }}/glance-cache.conf state=touch - - file: path={{ config_dir.value['src'] }}/glance-registry.conf state=touch - - file: path={{ config_dir.value['src'] }}/glance-registry-paste.ini state=touch - - file: path={{ config_dir.value['src'] }}/glance-scrubber.conf state=touch - - file: path={{ config_dir.value['src'] }}/policy.json state=touch - - file: path={{ config_dir.value['src'] }}/schema-image.json state=touch - - file: path={{ config_dir.value['src'] }}/exports state=touch - - template: src={{templates_dir}}/glance-api.conf dest={{ config_dir.value['src'] }}/glance-api.conf - - template: src={{templates_dir}}/glance-api-paste.ini dest={{ config_dir.value['src'] }}/glance-api-paste.ini - - template: src={{templates_dir}}/glance-cache.conf dest={{ config_dir.value['src'] }}/glance-cache.conf - - template: src={{templates_dir}}/glance-registry.conf dest={{ config_dir.value['src'] }}/glance-registry.conf - - template: src={{templates_dir}}/glance-registry-paste.ini dest={{ config_dir.value['src'] }}/glance-registry-paste.ini - - template: src={{templates_dir}}/glance-scrubber.conf dest={{ config_dir.value['src'] }}/glance-scrubber.conf - - template: src={{templates_dir}}/policy.json dest={{ config_dir.value['src'] }}/policy.json - - template: src={{templates_dir}}/schema-image.json dest={{ config_dir.value['src'] }}/schema-image.json - - template: src={{templates_dir}}/exports dest={{ config_dir.value['src'] }}/glance-export diff --git a/resources/glance_config/1.0.0/meta.yaml b/resources/glance_config/1.0.0/meta.yaml deleted file mode 100644 index e617ff46..00000000 --- a/resources/glance_config/1.0.0/meta.yaml +++ /dev/null @@ -1,52 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - ip: - schema: str! - value: - # ssh_user: - # schema: str! - # value: [] - # ssh_key: - # schema: str! - # value: [] - - config_dir: - schema: {src: str!, dst: str!, mode: str} - value: {src: /etc/solar/glance, dst: /etc/glance, mode: rw} - api_port: - schema: int! - value: 9292 - registry_port: - schema: int! - value: 9191 - keystone_ip: - schema: str! - value: - keystone_port: - schema: int! - value: 5000 - keystone_admin_user: - schema: str! - value: glance_admin - keystone_admin_password: - schema: str! - value: password1234 - keystone_admin_port: - schema: int! - value: - keystone_admin_tenant: - schema: str! - value: service_admins - mysql_ip: - schema: str! - value: - mysql_db: - schema: str! - value: - mysql_user: - schema: str! - value: - mysql_password: - schema: str! - value: diff --git a/resources/glance_config/1.0.0/templates/exports b/resources/glance_config/1.0.0/templates/exports deleted file mode 100644 index d9d35bc8..00000000 --- a/resources/glance_config/1.0.0/templates/exports +++ /dev/null @@ -1,4 +0,0 @@ -export OS_TENANT_NAME={{ keystone_admin_tenant }} -export OS_USERNAME={{ keystone_admin_user }} -export OS_PASSWORD={{ keystone_admin_password }} -export OS_AUTH_URL=http://{{ keystone_ip }}:{{ keystone_admin_port }}/v2.0 \ No newline at end of file diff --git a/resources/glance_config/1.0.0/templates/glance-api-paste.ini b/resources/glance_config/1.0.0/templates/glance-api-paste.ini deleted file mode 100644 index 03667961..00000000 --- a/resources/glance_config/1.0.0/templates/glance-api-paste.ini +++ /dev/null @@ -1,82 +0,0 @@ -# Use this pipeline for no auth or image caching - DEFAULT -[pipeline:glance-api] -#pipeline = versionnegotiation osprofiler unauthenticated-context rootapp -pipeline = versionnegotiation authtoken context appv1app - -# Use this pipeline for image caching and no auth -[pipeline:glance-api-caching] -pipeline = versionnegotiation osprofiler unauthenticated-context cache rootapp - -# Use this pipeline for caching w/ management interface but no auth -[pipeline:glance-api-cachemanagement] -pipeline = versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp - -# Use this pipeline for keystone auth -[pipeline:glance-api-keystone] -pipeline = versionnegotiation osprofiler authtoken context rootapp - -# Use this pipeline for keystone auth with image caching -[pipeline:glance-api-keystone+caching] -pipeline = versionnegotiation osprofiler authtoken context cache rootapp - -# Use this pipeline for keystone auth with caching and cache management -[pipeline:glance-api-keystone+cachemanagement] -pipeline = versionnegotiation osprofiler authtoken context cache cachemanage rootapp - -# Use this pipeline for authZ only. This means that the registry will treat a -# user as authenticated without making requests to keystone to reauthenticate -# the user. -[pipeline:glance-api-trusted-auth] -pipeline = versionnegotiation osprofiler context rootapp - -# Use this pipeline for authZ only. This means that the registry will treat a -# user as authenticated without making requests to keystone to reauthenticate -# the user and uses cache management -[pipeline:glance-api-trusted-auth+cachemanagement] -pipeline = versionnegotiation osprofiler context cache cachemanage rootapp - -[composite:rootapp] -paste.composite_factory = glance.api:root_app_factory -/: apiversions -/v1: apiv1app -/v2: apiv2app - -[app:apiversions] -paste.app_factory = glance.api.versions:create_resource - -[app:apiv1app] -paste.app_factory = glance.api.v1.router:API.factory - -[app:apiv2app] -paste.app_factory = glance.api.v2.router:API.factory - -[filter:versionnegotiation] -paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory - -[filter:cache] -paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory - -[filter:cachemanage] -paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory -identity_uri = http://{{ keystone_ip }}:{{ keystone_admin_port }} -admin_user = {{ keystone_admin_user }} -admin_tenant_name = {{ keystone_admin_tenant }} -admin_password = {{ keystone_admin_password }} -#delay_auth_decision = true - -[filter:gzip] -paste.filter_factory = glance.api.middleware.gzip:GzipMiddleware.factory - -[filter:osprofiler] -paste.filter_factory = osprofiler.web:WsgiMiddleware.factory -hmac_keys = SECRET_KEY -enabled = yes diff --git a/resources/glance_config/1.0.0/templates/glance-api.conf b/resources/glance_config/1.0.0/templates/glance-api.conf deleted file mode 100644 index 8ed38e41..00000000 --- a/resources/glance_config/1.0.0/templates/glance-api.conf +++ /dev/null @@ -1,35 +0,0 @@ -[DEFAULT] -default_store = file -bind_host = 0.0.0.0 -bind_port = {{ api_port }} -log_file = /var/log/glance/api.log -backlog = 4096 -registry_host = {{ ip }} -registry_port = {{ registry_port }} -registry_client_protocol = http - -delayed_delete = False -scrub_time = 43200 -scrubber_datadir = /var/lib/glance/scrubber -image_cache_dir = /var/lib/glance/image-cache/ - -[database] -connection = mysql://{{ mysql_user }}:{{ mysql_password }}@{{ mysql_ip }}/{{ mysql_db }} -backend = mysql - -[keystone_authtoken] -auth_uri = http://{{ keystone_ip }}:{{ keystone_port }}/v2.0 -identity_uri = http://{{ keystone_ip }}:{{ keystone_admin_port }} -admin_tenant_name = {{ keystone_admin_tenant }} -admin_user = {{ keystone_admin_user }} -admin_password = {{ keystone_admin_password }} -revocation_cache_time = 10 - -[paste_deploy] -flavor=keystone+cachemanagement - -[glance_store] -filesystem_store_datadir = /var/lib/glance/images/ -sheepdog_store_address = localhost -sheepdog_store_port = 7000 -sheepdog_store_chunk_size = 64 diff --git a/resources/glance_config/1.0.0/templates/glance-cache.conf b/resources/glance_config/1.0.0/templates/glance-cache.conf deleted file mode 100644 index b94bcaa0..00000000 --- a/resources/glance_config/1.0.0/templates/glance-cache.conf +++ /dev/null @@ -1,200 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -#verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -#debug = False - -# Log to this file. Make sure you do not set the same log file for both the API -# and registry servers! -# -# If `log_file` is omitted and `use_syslog` is false, then log messages are -# sent to stdout as a fallback. -log_file = /var/log/glance/image-cache.log - -# Send logs to syslog (/dev/log) instead of to file specified by `log_file` -#use_syslog = False - -# Directory that the Image Cache writes data to -image_cache_dir = /var/lib/glance/image-cache/ - -# Number of seconds after which we should consider an incomplete image to be -# stalled and eligible for reaping -image_cache_stall_time = 86400 - -# Max cache size in bytes -image_cache_max_size = 10737418240 - -# Address to find the registry server -registry_host = {{ ip }} - -# Port the registry server is listening on -registry_port = 9191 - -# Auth settings if using Keystone -# auth_url = http://127.0.0.1:5000/v2.0/ -# admin_tenant_name = %SERVICE_TENANT_NAME% -# admin_user = %SERVICE_USER% -# admin_password = %SERVICE_PASSWORD% - -# List of which store classes and store class locations are -# currently known to glance at startup. -# known_stores = glance.store.filesystem.Store, -# glance.store.http.Store, -# glance.store.rbd.Store, -# glance.store.s3.Store, -# glance.store.swift.Store, -# glance.store.sheepdog.Store, -# glance.store.cinder.Store, -# glance.store.vmware_datastore.Store, - -# ============ Filesystem Store Options ======================== - -# Directory that the Filesystem backend store -# writes image data to -filesystem_store_datadir = /var/lib/glance/images/ - -# ============ Swift Store Options ============================= - -# Version of the authentication service to use -# Valid versions are '2' for keystone and '1' for swauth and rackspace -swift_store_auth_version = 2 - -# Address where the Swift authentication service lives -# Valid schemes are 'http://' and 'https://' -# If no scheme specified, default to 'https://' -# For swauth, use something like '127.0.0.1:8080/v1.0/' -swift_store_auth_address = 127.0.0.1:5000/v2.0/ - -# User to authenticate against the Swift authentication service -# If you use Swift authentication service, set it to 'account':'user' -# where 'account' is a Swift storage account and 'user' -# is a user in that account -swift_store_user = jdoe:jdoe - -# Auth key for the user authenticating against the -# Swift authentication service -swift_store_key = a86850deb2742ec3cb41518e26aa2d89 - -# Container within the account that the account should use -# for storing images in Swift -swift_store_container = glance - -# Do we create the container if it does not exist? -swift_store_create_container_on_put = False - -# What size, in MB, should Glance start chunking image files -# and do a large object manifest in Swift? By default, this is -# the maximum object size in Swift, which is 5GB -swift_store_large_object_size = 5120 - -# When doing a large object manifest, what size, in MB, should -# Glance write chunks to Swift? This amount of data is written -# to a temporary disk buffer during the process of chunking -# the image file, and the default is 200MB -swift_store_large_object_chunk_size = 200 - -# Whether to use ServiceNET to communicate with the Swift storage servers. -# (If you aren't RACKSPACE, leave this False!) -# -# To use ServiceNET for authentication, prefix hostname of -# `swift_store_auth_address` with 'snet-'. -# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ -swift_enable_snet = False - -# ============ S3 Store Options ============================= - -# Address where the S3 authentication service lives -# Valid schemes are 'http://' and 'https://' -# If no scheme specified, default to 'http://' -s3_store_host = 127.0.0.1:8080/v1.0/ - -# User to authenticate against the S3 authentication service -s3_store_access_key = <20-char AWS access key> - -# Auth key for the user authenticating against the -# S3 authentication service -s3_store_secret_key = <40-char AWS secret key> - -# Container within the account that the account should use -# for storing images in S3. Note that S3 has a flat namespace, -# so you need a unique bucket name for your glance images. An -# easy way to do this is append your AWS access key to "glance". -# S3 buckets in AWS *must* be lowercased, so remember to lowercase -# your AWS access key if you use it in your bucket name below! -s3_store_bucket = glance - -# Do we create the bucket if it does not exist? -s3_store_create_bucket_on_put = False - -# When sending images to S3, the data will first be written to a -# temporary buffer on disk. By default the platform's temporary directory -# will be used. If required, an alternative directory can be specified here. -# s3_store_object_buffer_dir = /path/to/dir - -# ============ Cinder Store Options =========================== - -# Info to match when looking for cinder in the service catalog -# Format is : separated values of the form: -# :: (string value) -#cinder_catalog_info = volume:cinder:publicURL - -# Override service catalog lookup with template for cinder endpoint -# e.g. http://localhost:8776/v1/%(project_id)s (string value) -#cinder_endpoint_template = - -# Region name of this node (string value) -#os_region_name = - -# Location of ca certicates file to use for cinder client requests -# (string value) -#cinder_ca_certificates_file = - -# Number of cinderclient retries on failed http calls (integer value) -#cinder_http_retries = 3 - -# Allow to perform insecure SSL requests to cinder (boolean value) -#cinder_api_insecure = False - -# ============ VMware Datastore Store Options ===================== - -# ESX/ESXi or vCenter Server target system. -# The server value can be an IP address or a DNS name -# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com -#vmware_server_host = - -# Server username (string value) -#vmware_server_username = - -# Server password (string value) -#vmware_server_password = - -# Inventory path to a datacenter (string value) -# Value optional when vmware_server_ip is an ESX/ESXi host: if specified -# should be `ha-datacenter`. -#vmware_datacenter_path = - -# Datastore associated with the datacenter (string value) -#vmware_datastore_name = - -# The number of times we retry on failures -# e.g., socket error, etc (integer value) -#vmware_api_retry_count = 10 - -# The interval used for polling remote tasks -# invoked on VMware ESX/VC server in seconds (integer value) -#vmware_task_poll_interval = 5 - -# Absolute path of the folder containing the images in the datastore -# (string value) -#vmware_store_image_dir = /openstack_glance - -# Allow to perform insecure SSL requests to the target system (boolean value) -#vmware_api_insecure = False - -# ================= Security Options ========================== - -# AES key for encrypting store 'location' metadata, including -# -- if used -- Swift or S3 credentials -# Should be set to a random string of length 16, 24 or 32 bytes -# metadata_encryption_key = <16, 24 or 32 char registry metadata key> diff --git a/resources/glance_config/1.0.0/templates/glance-registry-paste.ini b/resources/glance_config/1.0.0/templates/glance-registry-paste.ini deleted file mode 100644 index 01994804..00000000 --- a/resources/glance_config/1.0.0/templates/glance-registry-paste.ini +++ /dev/null @@ -1,35 +0,0 @@ -# Use this pipeline for no auth - DEFAULT -[pipeline:glance-registry] -#pipeline = osprofiler unauthenticated-context registryapp -pipeline = authtoke context registryapp - -# Use this pipeline for keystone auth -[pipeline:glance-registry-keystone] -pipeline = osprofiler authtoken context registryapp - -# Use this pipeline for authZ only. This means that the registry will treat a -# user as authenticated without making requests to keystone to reauthenticate -# the user. -[pipeline:glance-registry-trusted-auth] -pipeline = osprofiler context registryapp - -[app:registryapp] -paste.app_factory = glance.registry.api:API.factory - -[filter:context] -paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory - -[filter:unauthenticated-context] -paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory -identity_uri = http://{{ keystone_ip }}:{{ keystone_admin_port }} -admin_user = {{ keystone_admin_user }} -admin_tenant_name = {{ keystone_admin_tenant }} -admin_password = {{ keystone_admin_password }} - -[filter:osprofiler] -paste.filter_factory = osprofiler.web:WsgiMiddleware.factory -hmac_keys = SECRET_KEY -enabled = yes diff --git a/resources/glance_config/1.0.0/templates/glance-registry.conf b/resources/glance_config/1.0.0/templates/glance-registry.conf deleted file mode 100644 index 7efe994a..00000000 --- a/resources/glance_config/1.0.0/templates/glance-registry.conf +++ /dev/null @@ -1,22 +0,0 @@ -[DEFAULT] -bind_host = 0.0.0.0 -bind_port = 9191 -log_file = /var/log/glance/registry.log -backlog = 4096 -api_limit_max = 1000 -limit_param_default = 25 - -[database] -backend = mysql -connection = mysql://{{ mysql_user }}:{{ mysql_password }}@{{ mysql_ip }}/{{ mysql_db }} - -[keystone_authtoken] -auth_uri = http://{{ keystone_ip }}:{{ keystone_port }}/v2.0 -identity_uri = http://{{ keystone_ip }}:{{ keystone_admin_port }} -admin_tenant_name = {{ keystone_admin_tenant }} -admin_user = {{ keystone_admin_user }} -admin_password = {{ keystone_admin_password }} - -[paste_deploy] -flavor=keystone -[profiler] diff --git a/resources/glance_config/1.0.0/templates/glance-scrubber.conf b/resources/glance_config/1.0.0/templates/glance-scrubber.conf deleted file mode 100644 index fc98f3c6..00000000 --- a/resources/glance_config/1.0.0/templates/glance-scrubber.conf +++ /dev/null @@ -1,108 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -#verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -#debug = False - -# Log to this file. Make sure you do not set the same log file for both the API -# and registry servers! -# -# If `log_file` is omitted and `use_syslog` is false, then log messages are -# sent to stdout as a fallback. -log_file = /var/log/glance/scrubber.log - -# Send logs to syslog (/dev/log) instead of to file specified by `log_file` -#use_syslog = False - -# Should we run our own loop or rely on cron/scheduler to run us -daemon = False - -# Loop time between checking for new items to schedule for delete -wakeup_time = 300 - -# Directory that the scrubber will use to remind itself of what to delete -# Make sure this is also set in glance-api.conf -scrubber_datadir = /var/lib/glance/scrubber - -# Only one server in your deployment should be designated the cleanup host -cleanup_scrubber = False - -# pending_delete items older than this time are candidates for cleanup -cleanup_scrubber_time = 86400 - -# Address to find the registry server for cleanups -registry_host = {{ ip }} - -# Port the registry server is listening on -registry_port = 9191 - -# Auth settings if using Keystone -# auth_url = http://127.0.0.1:5000/v2.0/ -# admin_tenant_name = %SERVICE_TENANT_NAME% -# admin_user = %SERVICE_USER% -# admin_password = %SERVICE_PASSWORD% - -# Directory to use for lock files. Default to a temp directory -# (string value). This setting needs to be the same for both -# glance-scrubber and glance-api. -#lock_path= - -# API to use for accessing data. Default value points to sqlalchemy -# package, it is also possible to use: glance.db.registry.api -#data_api = glance.db.sqlalchemy.api - -# ================= Security Options ========================== - -# AES key for encrypting store 'location' metadata, including -# -- if used -- Swift or S3 credentials -# Should be set to a random string of length 16, 24 or 32 bytes -#metadata_encryption_key = <16, 24 or 32 char registry metadata key> - -# ================= Database Options ===============+========== - -[database] - -# The SQLAlchemy connection string used to connect to the -# database (string value) -#connection=sqlite:////glance/openstack/common/db/$sqlite_db - -# The SQLAlchemy connection string used to connect to the -# slave database (string value) -#slave_connection= - -# timeout before idle sql connections are reaped (integer -# value) -#idle_timeout=3600 - -# Minimum number of SQL connections to keep open in a pool -# (integer value) -#min_pool_size=1 - -# Maximum number of SQL connections to keep open in a pool -# (integer value) -#max_pool_size= - -# maximum db connection retries during startup. (setting -1 -# implies an infinite retry count) (integer value) -#max_retries=10 - -# interval between retries of opening a sql connection -# (integer value) -#retry_interval=10 - -# If set, use this value for max_overflow with sqlalchemy -# (integer value) -#max_overflow= - -# Verbosity of SQL debugging information. 0=None, -# 100=Everything (integer value) -#connection_debug=0 - -# Add python stack traces to SQL as comment strings (boolean -# value) -#connection_trace=false - -# If set, use this value for pool_timeout with sqlalchemy -# (integer value) -#pool_timeout= diff --git a/resources/glance_config/1.0.0/templates/policy.json b/resources/glance_config/1.0.0/templates/policy.json deleted file mode 100644 index 325f00b2..00000000 --- a/resources/glance_config/1.0.0/templates/policy.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "context_is_admin": "role:admin", - "default": "", - - "add_image": "", - "delete_image": "", - "get_image": "", - "get_images": "", - "modify_image": "", - "publicize_image": "role:admin", - "copy_from": "", - - "download_image": "", - "upload_image": "", - - "delete_image_location": "", - "get_image_location": "", - "set_image_location": "", - - "add_member": "", - "delete_member": "", - "get_member": "", - "get_members": "", - "modify_member": "", - - "manage_image_cache": "role:admin", - - "get_task": "", - "get_tasks": "", - "add_task": "", - "modify_task": "", - - "get_metadef_namespace": "", - "get_metadef_namespaces":"", - "modify_metadef_namespace":"", - "add_metadef_namespace":"", - - "get_metadef_object":"", - "get_metadef_objects":"", - "modify_metadef_object":"", - "add_metadef_object":"", - - "list_metadef_resource_types":"", - "get_metadef_resource_type":"", - "add_metadef_resource_type_association":"", - - "get_metadef_property":"", - "get_metadef_properties":"", - "modify_metadef_property":"", - "add_metadef_property":"" - -} diff --git a/resources/glance_config/1.0.0/templates/schema-image.json b/resources/glance_config/1.0.0/templates/schema-image.json deleted file mode 100644 index 5aafd6b3..00000000 --- a/resources/glance_config/1.0.0/templates/schema-image.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "kernel_id": { - "type": "string", - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." - }, - "ramdisk_id": { - "type": "string", - "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", - "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." - }, - "instance_uuid": { - "type": "string", - "description": "ID of instance used to create this image." - }, - "architecture": { - "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", - "type": "string" - }, - "os_distro": { - "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", - "type": "string" - }, - "os_version": { - "description": "Operating system version as specified by the distributor", - "type": "string" - } -} diff --git a/resources/glance_puppet/1.0.0/README.md b/resources/glance_puppet/1.0.0/README.md deleted file mode 100644 index 74be1851..00000000 --- a/resources/glance_puppet/1.0.0/README.md +++ /dev/null @@ -1,194 +0,0 @@ -# Glance (API) resource for puppet handler - -Controls a live cycle of the glance entities, -like the main puppet class, auth, DB, AMQP, packages, -keystone user, role and endpoint, API service. Also configures -glance file backend. - -# Parameters - -source https://github.com/openstack/puppet-glance/tree/5.1.0/manifests/init.pp - - ``package_ensure`` - Ensure state for package. - (Optional) Defaults to 'present'. - - ``filesystem_store_datadir`` - Location where dist images are stored. - (Optional) Defaults to /var/lib/glance/images/. - -source https://github.com/openstack/puppet-glance/blob/5.1.0/manifests/api.pp - - ``keystone_password`` - (required) Password used to authentication. - - ``verbose`` - (optional) Rather to log the glance api service at verbose level. - Default: false - - ``debug`` - (optional) Rather to log the glance api service at debug level. - Default: false - - ``bind_host`` - (optional) The address of the host to bind to. - Default: 0.0.0.0 - - ``bind_port`` - (optional) The port the server should bind to. - Default: 9292 - - ``backlog`` - (optional) Backlog requests when creating socket - Default: 4096 - - ``workers`` - (optional) Number of Glance API worker processes to start - Default: $::processorcount - - ``log_file`` - (optional) The path of file used for logging - If set to boolean false, it will not log to any file. - Default: /var/log/glance/api.log - - ``log_dir`` - (optional) directory to which glance logs are sent. - If set to boolean false, it will not log to any directory. - Defaults to '/var/log/glance' - - ``registry_host`` - (optional) The address used to connect to the registry service. - Default: 0.0.0.0 - - ``registry_port`` - (optional) The port of the Glance registry service. - Default: 9191 - - ``registry_client_protocol`` - (optional) The protocol of the Glance registry service. - Default: http - - ``auth_type`` - (optional) Type is authorization being used. - Defaults to 'keystone' - - `` auth_host`` - (optional) Host running auth service. - Defaults to '127.0.0.1'. - - ``auth_url`` - (optional) Authentication URL. - Defaults to 'http://localhost:5000/v2.0'. - - `` auth_port`` - (optional) Port to use for auth service on auth_host. - Defaults to '35357'. - - `` auth_uri`` - (optional) Complete public Identity API endpoint. - Defaults to false. - - ``auth_admin_prefix`` - (optional) Path part of the auth url. - This allow admin auth URIs like http://auth_host:35357/keystone/admin. - (where '/keystone/admin' is auth_admin_prefix) - Defaults to false for empty. If defined, should be a string with a leading '/' and no trailing '/'. - - `` auth_protocol`` - (optional) Protocol to use for auth. - Defaults to 'http'. - - ``pipeline`` - (optional) Partial name of a pipeline in your paste configuration file with the - service name removed. - Defaults to 'keystone+cachemanagement'. - - ``keystone_tenant`` - (optional) Tenant to authenticate to. - Defaults to services. - - ``keystone_user`` - (optional) User to authenticate as with keystone. - Defaults to 'glance'. - - ``sql_idle_timeout`` - (optional) Deprecated. Use database_idle_timeout instead - Defaults to false - - ``sql_connection`` - (optional) Deprecated. Use database_connection instead. - Defaults to false - - ``database_connection`` - (optional) Connection url to connect to nova database. - Defaults to 'sqlite:///var/lib/glance/glance.sqlite' - - ``database_idle_timeout`` - (optional) Timeout before idle db connections are reaped. - Defaults to 3600 - - ``use_syslog`` - (optional) Use syslog for logging. - Defaults to false. - - ``log_facility`` - (optional) Syslog facility to receive log lines. - Defaults to 'LOG_USER'. - - ``show_image_direct_url`` - (optional) Expose image location to trusted clients. - Defaults to false. - - ``purge_config`` - (optional) Whether to set only the specified config options - in the api config. - Defaults to false. - - ``cert_file`` - (optinal) Certificate file to use when starting API server securely - Defaults to false, not set - - ``key_file`` - (optional) Private key file to use when starting API server securely - Defaults to false, not set - - ``ca_file`` - (optional) CA certificate file to use to verify connecting clients - Defaults to false, not set - - ``mysql_module`` - (optional) Deprecated. Does nothing. - - ``known_stores`` - (optional)List of which store classes and store class locations are - currently known to glance at startup. - Defaults to false. - Example: ['glance.store.filesystem.Store','glance.store.http.Store'] - - ``image_cache_dir`` - (optional) Base directory that the Image Cache uses. - Defaults to '/var/lib/glance/image-cache'. - - ``os_region_name`` - (optional) Sets the keystone region to use. - Defaults to 'RegionOne'. - - ``validate`` - (optional) Whether to validate the service is working after any service refreshes - Defaults to false - - ``validation_options`` - (optional) Service validation options - Should be a hash of options defined in openstacklib::service_validation - If empty, defaults values are taken from openstacklib function. - Default command list images. - Require validate set at True. - Example: - glance::api::validation_options: - glance-api: - command: check_glance-api.py - path: /usr/bin:/bin:/usr/sbin:/sbin - provider: shell - tries: 5 - try_sleep: 10 - Defaults to {} \ No newline at end of file diff --git a/resources/glance_puppet/1.0.0/actions/remove.pp b/resources/glance_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index b825ba6c..00000000 --- a/resources/glance_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,11 +0,0 @@ -$resource = hiera($::resource_name) - -include glance::params - -class {'glance': - package_ensure => 'absent', -} - -package { [$glance::params::api_package_name, $::glance::params::package_name] : - ensure => 'absent', -} diff --git a/resources/glance_puppet/1.0.0/actions/run.pp b/resources/glance_puppet/1.0.0/actions/run.pp deleted file mode 100644 index ed2e3d13..00000000 --- a/resources/glance_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,101 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] - -$db_user = $resource['input']['db_user'] -$db_password = $resource['input']['db_password'] -$db_name = $resource['input']['db_name'] -$db_host = $resource['input']['db_host'] -$db_port = $resource['input']['db_port'] - -$filesystem_store_datadir = $resource['input']['filesystem_store_datadir'] - -$keystone_password = $resource['input']['keystone_password'] -$verbose = $resource['input']['verbose'] -$debug = $resource['input']['debug'] -$bind_host = $resource['input']['bind_host'] -$bind_port = $resource['input']['bind_port'] -$backlog = $resource['input']['backlog'] -$workers = $resource['input']['workers'] -$log_file = $resource['input']['log_file'] -$log_dir = $resource['input']['log_dir'] -$registry_host = $resource['input']['registry_host'] -$registry_port = $resource['input']['registry_port'] -$registry_client_protocol = $resource['input']['registry_client_protocol'] -$auth_type = $resource['input']['auth_type'] -$auth_host = $resource['input']['auth_host'] -$auth_url = $resource['input']['auth_url'] -$auth_port = $resource['input']['auth_port'] -$auth_uri = $resource['input']['auth_uri'] -$auth_admin_prefix = $resource['input']['auth_admin_prefix'] -$auth_protocol = $resource['input']['auth_protocol'] -$pipeline = $resource['input']['pipeline'] -$keystone_tenant = $resource['input']['keystone_tenant'] -$keystone_user = $resource['input']['keystone_user'] -$use_syslog = $resource['input']['use_syslog'] -$log_facility = $resource['input']['log_facility'] -$show_image_direct_url = $resource['input']['show_image_direct_url'] -$purge_config = $resource['input']['purge_config'] -$cert_file = $resource['input']['cert_file'] -$key_file = $resource['input']['key_file'] -$ca_file = $resource['input']['ca_file'] -$known_stores = $resource['input']['known_stores'] -$database_connection = $resource['input']['database_connection'] -$database_idle_timeout = $resource['input']['database_idle_timeout'] -$image_cache_dir = $resource['input']['image_cache_dir'] -$os_region_name = $resource['input']['os_region_name'] -$validate = $resource['input']['validate'] -$validation_options = $resource['input']['validation_options'] -$mysql_module = $resource['input']['mysql_module'] -$sql_idle_timeout = $resource['input']['sql_idle_timeout'] - -class {'glance': - package_ensure => 'present', -} - -class {'glance::api': - keystone_password => $keystone_password, - enabled => true, - manage_service => true, - verbose => $verbose, - debug => $debug, - bind_host => $bind_host, - bind_port => $bind_port, - backlog => $backlog, - workers => $workers, - log_file => $log_file, - log_dir => $log_dir, - registry_host => $registry_host, - registry_port => $registry_port, - registry_client_protocol => $registry_client_protocol, - auth_type => $auth_type, - auth_host => $auth_host, - auth_url => $auth_url, - auth_port => $auth_port, - auth_uri => $auth_uri, - auth_admin_prefix => $auth_admin_prefix, - auth_protocol => $auth_protocol, - pipeline => $pipeline, - keystone_tenant => $keystone_tenant, - keystone_user => $keystone_user, - use_syslog => $use_syslog, - log_facility => $log_facility, - show_image_direct_url => $show_image_direct_url, - purge_config => $purge_config, - cert_file => $cert_file, - key_file => $key_file, - ca_file => $ca_file, - known_stores => $known_stores, - database_connection => "mysql://${db_user}:${db_password}@${db_host}:${db_port}/${db_name}", - database_idle_timeout => $database_idle_timeout, - image_cache_dir => $image_cache_dir, - os_region_name => $os_region_name, - validate => $validate, - validation_options => $validation_options, - mysql_module => $mysql_module, - sql_idle_timeout => $sql_idle_timeout, -} - -class { 'glance::backend::file': - filesystem_store_datadir => $filesystem_store_datadir, -} diff --git a/resources/glance_puppet/1.0.0/actions/update.pp b/resources/glance_puppet/1.0.0/actions/update.pp deleted file mode 100644 index ed2e3d13..00000000 --- a/resources/glance_puppet/1.0.0/actions/update.pp +++ /dev/null @@ -1,101 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] - -$db_user = $resource['input']['db_user'] -$db_password = $resource['input']['db_password'] -$db_name = $resource['input']['db_name'] -$db_host = $resource['input']['db_host'] -$db_port = $resource['input']['db_port'] - -$filesystem_store_datadir = $resource['input']['filesystem_store_datadir'] - -$keystone_password = $resource['input']['keystone_password'] -$verbose = $resource['input']['verbose'] -$debug = $resource['input']['debug'] -$bind_host = $resource['input']['bind_host'] -$bind_port = $resource['input']['bind_port'] -$backlog = $resource['input']['backlog'] -$workers = $resource['input']['workers'] -$log_file = $resource['input']['log_file'] -$log_dir = $resource['input']['log_dir'] -$registry_host = $resource['input']['registry_host'] -$registry_port = $resource['input']['registry_port'] -$registry_client_protocol = $resource['input']['registry_client_protocol'] -$auth_type = $resource['input']['auth_type'] -$auth_host = $resource['input']['auth_host'] -$auth_url = $resource['input']['auth_url'] -$auth_port = $resource['input']['auth_port'] -$auth_uri = $resource['input']['auth_uri'] -$auth_admin_prefix = $resource['input']['auth_admin_prefix'] -$auth_protocol = $resource['input']['auth_protocol'] -$pipeline = $resource['input']['pipeline'] -$keystone_tenant = $resource['input']['keystone_tenant'] -$keystone_user = $resource['input']['keystone_user'] -$use_syslog = $resource['input']['use_syslog'] -$log_facility = $resource['input']['log_facility'] -$show_image_direct_url = $resource['input']['show_image_direct_url'] -$purge_config = $resource['input']['purge_config'] -$cert_file = $resource['input']['cert_file'] -$key_file = $resource['input']['key_file'] -$ca_file = $resource['input']['ca_file'] -$known_stores = $resource['input']['known_stores'] -$database_connection = $resource['input']['database_connection'] -$database_idle_timeout = $resource['input']['database_idle_timeout'] -$image_cache_dir = $resource['input']['image_cache_dir'] -$os_region_name = $resource['input']['os_region_name'] -$validate = $resource['input']['validate'] -$validation_options = $resource['input']['validation_options'] -$mysql_module = $resource['input']['mysql_module'] -$sql_idle_timeout = $resource['input']['sql_idle_timeout'] - -class {'glance': - package_ensure => 'present', -} - -class {'glance::api': - keystone_password => $keystone_password, - enabled => true, - manage_service => true, - verbose => $verbose, - debug => $debug, - bind_host => $bind_host, - bind_port => $bind_port, - backlog => $backlog, - workers => $workers, - log_file => $log_file, - log_dir => $log_dir, - registry_host => $registry_host, - registry_port => $registry_port, - registry_client_protocol => $registry_client_protocol, - auth_type => $auth_type, - auth_host => $auth_host, - auth_url => $auth_url, - auth_port => $auth_port, - auth_uri => $auth_uri, - auth_admin_prefix => $auth_admin_prefix, - auth_protocol => $auth_protocol, - pipeline => $pipeline, - keystone_tenant => $keystone_tenant, - keystone_user => $keystone_user, - use_syslog => $use_syslog, - log_facility => $log_facility, - show_image_direct_url => $show_image_direct_url, - purge_config => $purge_config, - cert_file => $cert_file, - key_file => $key_file, - ca_file => $ca_file, - known_stores => $known_stores, - database_connection => "mysql://${db_user}:${db_password}@${db_host}:${db_port}/${db_name}", - database_idle_timeout => $database_idle_timeout, - image_cache_dir => $image_cache_dir, - os_region_name => $os_region_name, - validate => $validate, - validation_options => $validation_options, - mysql_module => $mysql_module, - sql_idle_timeout => $sql_idle_timeout, -} - -class { 'glance::backend::file': - filesystem_store_datadir => $filesystem_store_datadir, -} diff --git a/resources/glance_puppet/1.0.0/meta.yaml b/resources/glance_puppet/1.0.0/meta.yaml deleted file mode 100644 index 49edbd18..00000000 --- a/resources/glance_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,165 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - package_ensure: - schema: str - value: 'present' - filesystem_store_datadir: - schema: str - value: '/var/lib/glance/images/' - verbose: - schema: bool - value: false - debug: - schema: bool - value: false - bind_host: - schema: str - value: '0.0.0.0' - bind_port: - schema: int - value: 9292 - backlog: - schema: int - value: 4096 - workers: - schema: int - value: 1 - log_file: - schema: str - value: '/var/log/glance/api.log' - log_dir: - schema: str - value: '/var/log/glance' - registry_host: - schema: str - value: '0.0.0.0' - registry_port: - schema: int - value: 9191 - registry_client_protocol: - schema: str - value: 'http' - auth_type: - schema: str - value: 'keystone' - auth_host: - schema: str - value: '127.0.0.1' - auth_url: - schema: str - value: 'http://{{auth_host}}:{{auth_port}}/v2.0' - auth_port: - schema: int - value: 35357 - auth_uri: - schema: str - value: - auth_admin_prefix: - schema: str - value: - auth_protocol: - schema: str - value: 'http' - pipeline: - schema: str - value: 'keystone+cachemanagement' - keystone_tenant: - schema: str! - value: 'services' - keystone_user: - schema: str! - value: 'glance' - use_syslog: - schema: bool - value: false - log_facility: - schema: str - value: 'LOG_USER' - show_image_direct_url: - schema: bool - value: false - purge_config: - schema: bool - value: false - cert_file: - schema: str - value: - key_file: - schema: str - value: - ca_file: - schema: str - value: - known_stores: - schema: str - value: - database_connection: - schema: str - value: 'sqlite:///var/lib/glance/glance.sqlite' - database_idle_timeout: - schema: int - value: 3600 - image_cache_dir: - schema: str - value: '/var/lib/glance/image-cache' - os_region_name: - schema: str - value: 'RegionOne' - validate: - schema: bool - value: false - validation_options: - schema: {} - value: {} - mysql_module: - schema: str - value: - sql_idle_timeout: - schema: str - value: - sql_connection: - schema: str - value: - - module: - schema: {repository: str!, branch: str!} - value: {name: 'glance', type: 'git', url: 'https://github.com/openstack/puppet-glance', ref: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - - db_user: - schema: str! - value: cinder - db_password: - schema: str! - value: cinder - db_name: - schema: str! - value: cinder - db_host: - schema: str! - value: - db_port: - schema: int! - value: - - keystone_host: - schema: str! - value: - keystone_port: - schema: int! - value: - keystone_password: - schema: str! - value: - -tags: [resource/glance_service, resources/glance, resource/glance_api_service] diff --git a/resources/glance_puppet/1.0.0/test.py b/resources/glance_puppet/1.0.0/test.py deleted file mode 100644 index 03d64d1c..00000000 --- a/resources/glance_puppet/1.0.0/test.py +++ /dev/null @@ -1,22 +0,0 @@ -import requests - -from solar.core.log import log -from solar.core import validation - - -def test(resource): - log.debug('Testing glance_puppet') - requests.get( - 'http://%s:%s' % (resource.args['ip'], resource.args['bind_port']) - ) - #TODO(bogdando) test packages installed and filesystem store datadir created - - args = resource.args - - token, _ = validation.validate_token( - keystone_host=args['keystone_host'], - keystone_port=args['keystone_port'], - user=args['keystone_user'], - tenant=args['keystone_tenant'], - password=args['keystone_password'], - ) diff --git a/resources/glance_registry_puppet/1.0.0/README.md b/resources/glance_registry_puppet/1.0.0/README.md deleted file mode 100644 index b111f722..00000000 --- a/resources/glance_registry_puppet/1.0.0/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Glance registry resource for puppet handler - -Configures glance registry service. - -# Parameters - -source https://github.com/openstack/puppet-glance/tree/5.1.0/manifests/registry.pp - - ``keystone_password`` - (required) The keystone password for administrative user - - ``package_ensure`` - (optional) Ensure state for package. Defaults to 'present'. On RedHat - platforms this setting is ignored and the setting from the glance class is - used because there is only one glance package. - - ``verbose`` - (optional) Enable verbose logs (true|false). Defaults to false. - - ``debug`` - (optional) Enable debug logs (true|false). Defaults to false. - - ``bind_host`` - (optional) The address of the host to bind to. Defaults to '0.0.0.0'. - - ``bind_port`` - (optional) The port the server should bind to. Defaults to '9191'. - - ``log_file`` - (optional) Log file for glance-registry. - If set to boolean false, it will not log to any file. - Defaults to '/var/log/glance/registry.log'. - - ``log_dir`` - (optional) directory to which glance logs are sent. - If set to boolean false, it will not log to any directory. - Defaults to '/var/log/glance' - - ``sql_idle_timeout`` - (optional) Deprecated. Use database_idle_timeout instead - Defaults to false - - ``sql_connection`` - (optional) Deprecated. Use database_connection instead. - Defaults to false - - ``database_connection`` - (optional) Connection url to connect to nova database. - Defaults to 'sqlite:///var/lib/glance/glance.sqlite' - - ``database_idle_timeout`` - (optional) Timeout before idle db connections are reaped. - Defaults to 3600 - - ``auth_type`` - (optional) Authentication type. Defaults to 'keystone'. - - ``auth_host`` - (optional) Address of the admin authentication endpoint. - Defaults to '127.0.0.1'. - - ``auth_port`` - (optional) Port of the admin authentication endpoint. Defaults to '35357'. - - ``auth_admin_prefix`` - (optional) path part of the auth url. - This allow admin auth URIs like http://auth_host:35357/keystone/admin. - (where '/keystone/admin' is auth_admin_prefix) - Defaults to false for empty. If defined, should be a string with a leading '/' and no trailing '/'. - - ``auth_protocol`` - (optional) Protocol to communicate with the admin authentication endpoint. - Defaults to 'http'. Should be 'http' or 'https'. - - ``auth_uri`` - (optional) Complete public Identity API endpoint. - - ``keystone_tenant`` - (optional) administrative tenant name to connect to keystone. - Defaults to 'services'. - - ``keystone_user`` - (optional) administrative user name to connect to keystone. - Defaults to 'glance'. - - ``use_syslog`` - (optional) Use syslog for logging. - Defaults to false. - - ``log_facility`` - (optional) Syslog facility to receive log lines. - Defaults to LOG_USER. - - ``purge_config`` - (optional) Whether to create only the specified config values in - the glance registry config file. - Defaults to false. - - ``cert_file`` - (optinal) Certificate file to use when starting registry server securely - Defaults to false, not set - - ``key_file`` - (optional) Private key file to use when starting registry server securely - Defaults to false, not set - - ``ca_file`` - (optional) CA certificate file to use to verify connecting clients - Defaults to false, not set - - ``sync_db`` - (Optional) Run db sync on the node. - Defaults to true - - ``mysql_module`` - (optional) Deprecated. Does nothing. diff --git a/resources/glance_registry_puppet/1.0.0/actions/remove.pp b/resources/glance_registry_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 5e62e4cf..00000000 --- a/resources/glance_registry_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,7 +0,0 @@ -$resource = hiera($::resource_name) - -class {'glance::registry': - enabled => false, - package_ensure => 'absent', - keystone_password => 'not important as removed' -} diff --git a/resources/glance_registry_puppet/1.0.0/actions/run.pp b/resources/glance_registry_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 1bfad2f8..00000000 --- a/resources/glance_registry_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,74 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] - -$db_user = $resource['input']['db_user'] -$db_password = $resource['input']['db_password'] -$db_name = $resource['input']['db_name'] -$db_host = $resource['input']['db_host'] -$db_port = $resource['input']['db_port'] - -$keystone_password = $resource['input']['keystone_password'] -$package_ensure = $resource['input']['package_ensure'] -$verbose = $resource['input']['verbose'] -$debug = $resource['input']['debug'] -$bind_host = $resource['input']['bind_host'] -$bind_port = $resource['input']['bind_port'] -$log_file = $resource['input']['log_file'] -$log_dir = $resource['input']['log_dir'] -$database_connection = $resource['input']['database_connection'] -$database_idle_timeout = $resource['input']['database_idle_timeout'] -$auth_type = $resource['input']['auth_type'] -$auth_host = $resource['input']['auth_host'] -$auth_port = $resource['input']['auth_port'] -$auth_admin_prefix = $resource['input']['auth_admin_prefix'] -$auth_uri = $resource['input']['auth_uri'] -$auth_protocol = $resource['input']['auth_protocol'] -$keystone_tenant = $resource['input']['keystone_tenant'] -$keystone_user = $resource['input']['keystone_user'] -$pipeline = $resource['input']['pipeline'] -$use_syslog = $resource['input']['use_syslog'] -$log_facility = $resource['input']['log_facility'] -$purge_config = $resource['input']['purge_config'] -$cert_file = $resource['input']['cert_file'] -$key_file = $resource['input']['key_file'] -$ca_file = $resource['input']['ca_file'] -$sync_db = $resource['input']['sync_db'] -$mysql_module = $resource['input']['mysql_module'] -$sql_idle_timeout = $resource['input']['sql_idle_timeout'] -$sql_connection = $resource['input']['sql_connection'] - -include glance::params - -class {'glance::registry': - keystone_password => $keystone_password, - enabled => true, - manage_service => true, - package_ensure => $package_ensure, - verbose => $verbose, - debug => $debug, - bind_host => $bind_host, - bind_port => $bind_port, - log_file => $log_file, - log_dir => $log_dir, - database_connection => "mysql://${db_user}:${db_password}@${db_host}:${db_port}/${db_name}", - database_idle_timeout => $database_idle_timeout, - auth_type => $auth_type, - auth_host => $auth_host, - auth_port => $auth_port, - auth_admin_prefix => $auth_admin_prefix, - auth_uri => $auth_uri, - auth_protocol => $auth_protocol, - keystone_tenant => $keystone_tenant, - keystone_user => $keystone_user, - pipeline => $pipeline, - use_syslog => $use_syslog, - log_facility => $log_facility, - purge_config => $purge_config, - cert_file => $cert_file, - key_file => $key_file, - ca_file => $ca_file, - sync_db => $sync_db, - mysql_module => $mysql_module, - sql_idle_timeout => $sql_idle_timeout, -} diff --git a/resources/glance_registry_puppet/1.0.0/actions/update.pp b/resources/glance_registry_puppet/1.0.0/actions/update.pp deleted file mode 100644 index 7169a3b5..00000000 --- a/resources/glance_registry_puppet/1.0.0/actions/update.pp +++ /dev/null @@ -1,78 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] - -$db_user = $resource['input']['db_user'] -$db_password = $resource['input']['db_password'] -$db_name = $resource['input']['db_name'] -$db_host = $resource['input']['db_host'] -$db_port = $resource['input']['db_port'] - -$keystone_password = $resource['input']['keystone_password'] -$package_ensure = $resource['input']['package_ensure'] -$verbose = $resource['input']['verbose'] -$debug = $resource['input']['debug'] -$bind_host = $resource['input']['bind_host'] -$bind_port = $resource['input']['bind_port'] -$log_file = $resource['input']['log_file'] -$log_dir = $resource['input']['log_dir'] -$database_connection = $resource['input']['database_connection'] -$database_idle_timeout = $resource['input']['database_idle_timeout'] -$auth_type = $resource['input']['auth_type'] -$auth_host = $resource['input']['auth_host'] -$auth_port = $resource['input']['auth_port'] -$auth_admin_prefix = $resource['input']['auth_admin_prefix'] -$auth_uri = $resource['input']['auth_uri'] -$auth_protocol = $resource['input']['auth_protocol'] -$keystone_tenant = $resource['input']['keystone_tenant'] -$keystone_user = $resource['input']['keystone_user'] -$pipeline = $resource['input']['pipeline'] -$use_syslog = $resource['input']['use_syslog'] -$log_facility = $resource['input']['log_facility'] -$purge_config = $resource['input']['purge_config'] -$cert_file = $resource['input']['cert_file'] -$key_file = $resource['input']['key_file'] -$ca_file = $resource['input']['ca_file'] -$sync_db = $resource['input']['sync_db'] -$mysql_module = $resource['input']['mysql_module'] -$sql_idle_timeout = $resource['input']['sql_idle_timeout'] -$sql_connection = $resource['input']['sql_connection'] - -include glance::params - -class {'glance::registry': - keystone_password => $keystone_password, - enabled => true, - manage_service => true, - package_ensure => $package_ensure, - verbose => $verbose, - debug => $debug, - bind_host => $bind_host, - bind_port => $bind_port, - log_file => $log_file, - log_dir => $log_dir, - database_connection => "mysql://${db_user}:${db_password}@${db_host}:${db_port}/${db_name}", - database_idle_timeout => $database_idle_timeout, - auth_type => $auth_type, - auth_host => $auth_host, - auth_port => $auth_port, - auth_admin_prefix => $auth_admin_prefix, - auth_uri => $auth_uri, - auth_protocol => $auth_protocol, - keystone_tenant => $keystone_tenant, - keystone_user => $keystone_user, - pipeline => $pipeline, - use_syslog => $use_syslog, - log_facility => $log_facility, - purge_config => $purge_config, - cert_file => $cert_file, - key_file => $key_file, - ca_file => $ca_file, - sync_db => $sync_db, - mysql_module => $mysql_module, - sql_idle_timeout => $sql_idle_timeout, -} - -notify { "restart glance registry": - notify => Service["glance-registry"], -} diff --git a/resources/glance_registry_puppet/1.0.0/meta.yaml b/resources/glance_registry_puppet/1.0.0/meta.yaml deleted file mode 100644 index 3ad1b830..00000000 --- a/resources/glance_registry_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,129 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - package_ensure: - schema: str - value: 'present' - verbose: - schema: bool - value: false - debug: - schema: bool - value: false - bind_host: - schema: str - value: '0.0.0.0' - bind_port: - schema: int - value: 9191 - log_file: - schema: str - value: '/var/log/glance/registry.log' - log_dir: - schema: str - value: '/var/log/glance' - database_connection: - schema: str - value: 'sqlite:///var/lib/glance/glance.sqlite' - database_idle_timeout: - schema: int - value: 3600 - auth_type: - schema: str - value: 'keystone' - auth_host: - schema: str - value: '127.0.0.1' - auth_port: - schema: int - value: 35357 - auth_admin_prefix: - schema: str - value: - auth_uri: - schema: str - value: - auth_protocol: - schema: str - value: 'http' - keystone_tenant: - schema: str! - value: 'services' - keystone_user: - schema: str! - value: 'glance' - pipeline: - schema: str - value: 'keystone' - use_syslog: - schema: bool - value: false - log_facility: - schema: str - value: 'LOG_USER' - purge_config: - schema: bool - value: false - cert_file: - schema: str - value: - key_file: - schema: str - value: - ca_file: - schema: str - value: - sync_db: - schema: bool - value: true - mysql_module: - schema: str - value: - sql_idle_timeout: - schema: str - value: - sql_connection: - schema: str - value: - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-glance', branch: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - - db_user: - schema: str! - value: glance - db_password: - schema: str! - value: glance - db_name: - schema: str! - value: glance - db_host: - schema: str! - value: - db_port: - schema: str! - value: - - keystone_host: - schema: str! - value: - keystone_port: - schema: int! - value: - keystone_password: - schema: str! - value: - -tags: [resource/glance_registry_service, resources/glance_registry, resource/glance] diff --git a/resources/glance_registry_puppet/1.0.0/test.py b/resources/glance_registry_puppet/1.0.0/test.py deleted file mode 100644 index 3374e0e4..00000000 --- a/resources/glance_registry_puppet/1.0.0/test.py +++ /dev/null @@ -1,10 +0,0 @@ -import requests - -from solar.core.log import log - - -def test(resource): - log.debug('Testing glance_registry_puppet') - requests.get( - 'http://%s:%s' % (resource.args['ip'], resource.args['bind_port']) - ) diff --git a/resources/glance_registry_service/1.0.0/actions/remove.yaml b/resources/glance_registry_service/1.0.0/actions/remove.yaml deleted file mode 100644 index b73c63a5..00000000 --- a/resources/glance_registry_service/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,8 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: glance registry container - docker: - name: {{ resource_name }} - image: {{ image }} - state: absent diff --git a/resources/glance_registry_service/1.0.0/actions/run.yaml b/resources/glance_registry_service/1.0.0/actions/run.yaml deleted file mode 100644 index c5a6801e..00000000 --- a/resources/glance_registry_service/1.0.0/actions/run.yaml +++ /dev/null @@ -1,32 +0,0 @@ - -- hosts: [{{host}}] - sudo: yes - tasks: - - docker: - #command: /bin/bash -c "glance-manage db_sync && /usr/bin/glance-registry" - command: /usr/bin/glance-registry - name: {{ resource_name }} - image: {{ image }} - state: running - net: host - {% if ports.value %} - ports: - {% for port in ports.value %} - {% for p in port['value'] %} - - {{ p['value'] }}:{{ p['value'] }} - {% endfor %} - {% endfor %} - {% endif %} - {% if host_binds.value %} - volumes: - # TODO: host_binds might need more work - # Currently it's not that trivial to pass custom src: dst here - # (when a config variable is passed here from other resource) - # so we mount it to the same directory as on host - {% for bind in host_binds.value %} - - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} - {% endfor %} - {% endif %} - - - name: wait for glance registry - wait_for: host={{ ip }} port=9191 timeout=20 diff --git a/resources/glance_registry_service/1.0.0/meta.yaml b/resources/glance_registry_service/1.0.0/meta.yaml deleted file mode 100644 index cbbcc4b7..00000000 --- a/resources/glance_registry_service/1.0.0/meta.yaml +++ /dev/null @@ -1,26 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - ip: - schema: str! - value: - image: - schema: str! - value: cgenie/centos-rdo-glance-registry - ports: - schema: [{value: [{value: int}]}] - value: [] - host_binds: - schema: [{value: {src: str, dst: str, mode: str}}] - value: [] - volume_binds: - schema: [{src: str, dst: str, mode: str}] - value: [] - # ssh_user: - # schema: str! - # value: [] - # ssh_key: - # schema: str! - # value: [] - -tags: [resource/container] diff --git a/resources/haproxy_config/1.0.0/README.md b/resources/haproxy_config/1.0.0/README.md deleted file mode 100644 index c0ee178a..00000000 --- a/resources/haproxy_config/1.0.0/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# `haproxy_config` resource - -This resource represents configuration for the `haproxy_service` resource. -Each service represented by Haproxy is connected to this resource via -`haproxy_service_config` resource. This is because in Haproxy there is no -support for something like `/etc/haproxy/conf.d` directory where you put -each config in a separate file, but instead you must collect all configuration -in one file. - -So this resource renders this file from data provided by collecting individual -`haproxy_service_config` data. diff --git a/resources/haproxy_config/1.0.0/actions/remove.yaml b/resources/haproxy_config/1.0.0/actions/remove.yaml deleted file mode 100644 index 3e05d9c7..00000000 --- a/resources/haproxy_config/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# TODO -- hosts: [{{host}}] - sudo: yes - tasks: - - file: path={{ config_dir.value['src'] }} state=absent diff --git a/resources/haproxy_config/1.0.0/actions/run.yaml b/resources/haproxy_config/1.0.0/actions/run.yaml deleted file mode 100644 index e4aaf9b9..00000000 --- a/resources/haproxy_config/1.0.0/actions/run.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# TODO -- hosts: [{{host}}] - sudo: yes - vars: - config_dir: {src: {{ config_dir['src'] }}, dst: {{ config_dir['dst'] }}} - haproxy_ip: {{ ip }} - haproxy_services: - {% for single in config %} - - name: {{ single['name'] }} - listen_port: {{ single['listen_port'] }} - protocol: {{ single['protocol'] }} - servers: - {% for backend in single['backends'] %} - - name: {{ backend['server'] }}_{{ backend['port'] }} - ip: {{ backend['server'] }} - port: {{ backend['port'] }} - {% endfor %} - {% endfor %} - tasks: - - file: path={{ config_dir['src'] }}/ state=directory - - file: path={{ config_dir['src'] }}/haproxy.cfg state=touch - - template: src={{templates_dir}}/haproxy.cfg dest=/etc/haproxy/haproxy.cfg diff --git a/resources/haproxy_config/1.0.0/actions/update.yaml b/resources/haproxy_config/1.0.0/actions/update.yaml deleted file mode 100644 index e4aaf9b9..00000000 --- a/resources/haproxy_config/1.0.0/actions/update.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# TODO -- hosts: [{{host}}] - sudo: yes - vars: - config_dir: {src: {{ config_dir['src'] }}, dst: {{ config_dir['dst'] }}} - haproxy_ip: {{ ip }} - haproxy_services: - {% for single in config %} - - name: {{ single['name'] }} - listen_port: {{ single['listen_port'] }} - protocol: {{ single['protocol'] }} - servers: - {% for backend in single['backends'] %} - - name: {{ backend['server'] }}_{{ backend['port'] }} - ip: {{ backend['server'] }} - port: {{ backend['port'] }} - {% endfor %} - {% endfor %} - tasks: - - file: path={{ config_dir['src'] }}/ state=directory - - file: path={{ config_dir['src'] }}/haproxy.cfg state=touch - - template: src={{templates_dir}}/haproxy.cfg dest=/etc/haproxy/haproxy.cfg diff --git a/resources/haproxy_config/1.0.0/meta.yaml b/resources/haproxy_config/1.0.0/meta.yaml deleted file mode 100644 index 9af75372..00000000 --- a/resources/haproxy_config/1.0.0/meta.yaml +++ /dev/null @@ -1,20 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - ip: - schema: str! - value: - config_dir: - schema: {src: str!, dst: str!} - value: {src: /etc/solar/haproxy, dst: /etc/haproxy} - config: - schema: [{backends: [{server: str!, port: int!}], listen_port: int!, protocol: str!, name: str!}] - value: [{}] - # ssh_user: - # schema: str! - # value: - # ssh_key: - # schema: str! - # value: - -tags: [resources=haproxy] diff --git a/resources/haproxy_config/1.0.0/templates/haproxy.cfg b/resources/haproxy_config/1.0.0/templates/haproxy.cfg deleted file mode 100644 index 02f0a857..00000000 --- a/resources/haproxy_config/1.0.0/templates/haproxy.cfg +++ /dev/null @@ -1,59 +0,0 @@ -global - log 127.0.0.1 local0 - log 127.0.0.1 local1 notice - maxconn 4096 -# tune.ssl.default-dh-param 2048 - pidfile /var/run/haproxy.pid - user haproxy - group haproxy - daemon - stats socket /var/run/haproxy.stats level admin -# ssl-default-bind-options no-sslv3 # ubuntu 14.04 have too old haproxy - -defaults - log global - mode http - option redispatch - option httplog - option dontlognull - option forwardfor - timeout connect 5000 - timeout client 50000 - timeout server 50000 - -#frontend default_frontend -# bind 0.0.0.0:80 -# default_backend default_service - -#backend default_service -# balance roundrobin - -{% for service in haproxy_services %} -listen {{ service['name'] }} 0.0.0.0:{{ service['listen_port'] }} -{% if service['protocol'] == 'http' %} - mode http - stats enable - stats uri /haproxy?stats - stats realm Strictly\ Private - stats auth A_Username:YourPassword - stats auth Another_User:passwd - balance roundrobin - option httpclose - option forwardfor - {% for server in service['servers'] %} - server {{ server['name'] }} {{ server['ip'] }}:{{ server['port'] }} check - {% endfor %} -{% elif service['protocol'] == 'tcp' %} -# tcp there - mode tcp - mode tcp - option tcpka - option srvtcpka - balance leastconn - {% for server in service['servers'] %} - server {{ server['name'] }} {{ server['ip'] }}:{{ server['port'] }} check - {% endfor %} -{% endif %} - - -{% endfor %} diff --git a/resources/haproxy_service/1.0.0/README.md b/resources/haproxy_service/1.0.0/README.md deleted file mode 100644 index 348cd1e4..00000000 --- a/resources/haproxy_service/1.0.0/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# `haproxy_service` resource - -This resource sets up a Docker container with Haproxy code. It requires -config to be provided by the `haproxy_config` resource (mounted under -`/etc/haproxy`). - -About container philosophy, see the `README.md` file in `keystone_service` -resource. diff --git a/resources/haproxy_service/1.0.0/actions/apply_config.yaml b/resources/haproxy_service/1.0.0/actions/apply_config.yaml deleted file mode 100644 index 8dc29831..00000000 --- a/resources/haproxy_service/1.0.0/actions/apply_config.yaml +++ /dev/null @@ -1,7 +0,0 @@ - -- hosts: [{{ host }}] - sudo: yes - tasks: - - service: - name: haproxy - state: reloaded diff --git a/resources/haproxy_service/1.0.0/actions/install.yaml b/resources/haproxy_service/1.0.0/actions/install.yaml deleted file mode 100644 index 6817374e..00000000 --- a/resources/haproxy_service/1.0.0/actions/install.yaml +++ /dev/null @@ -1,10 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - apt: - name: haproxy - state: present - - replace: - dest: '/etc/default/haproxy' - regexp: ENABLED=0 - replace: ENABLED=1 diff --git a/resources/haproxy_service/1.0.0/actions/remove.yaml b/resources/haproxy_service/1.0.0/actions/remove.yaml deleted file mode 100644 index ce6e8b81..00000000 --- a/resources/haproxy_service/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,8 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: haproxy container - docker: - name: {{ resource_name }} - image: {{ image }} - state: absent diff --git a/resources/haproxy_service/1.0.0/actions/run.yaml b/resources/haproxy_service/1.0.0/actions/run.yaml deleted file mode 100644 index 6817374e..00000000 --- a/resources/haproxy_service/1.0.0/actions/run.yaml +++ /dev/null @@ -1,10 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - apt: - name: haproxy - state: present - - replace: - dest: '/etc/default/haproxy' - regexp: ENABLED=0 - replace: ENABLED=1 diff --git a/resources/haproxy_service/1.0.0/actions/update.yaml b/resources/haproxy_service/1.0.0/actions/update.yaml deleted file mode 100644 index 6f91682a..00000000 --- a/resources/haproxy_service/1.0.0/actions/update.yaml +++ /dev/null @@ -1,7 +0,0 @@ - -- hosts: [{{host}}] - sudo: yes - tasks: - - service: - name: haproxy - state: reloaded diff --git a/resources/haproxy_service/1.0.0/meta.yaml b/resources/haproxy_service/1.0.0/meta.yaml deleted file mode 100644 index 6228498c..00000000 --- a/resources/haproxy_service/1.0.0/meta.yaml +++ /dev/null @@ -1,14 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - ip: - schema: str! - value: - # ssh_user: - # schema: str! - # value: - # ssh_key: - # schema: str! - # value: - -tags: [resources=haproxy] diff --git a/resources/haproxy_service/1.0.0/test.py b/resources/haproxy_service/1.0.0/test.py deleted file mode 100644 index 426e677c..00000000 --- a/resources/haproxy_service/1.0.0/test.py +++ /dev/null @@ -1,10 +0,0 @@ -import requests - -from solar.core.log import log - - -def test(resource): - log.debug('Testing haproxy_service') - requests.get( - 'http://%s:%s' % (resource.args['ip'], resource.args['ports'][0][0]) - ) diff --git a/resources/haproxy_service_config/1.0.0/README.md b/resources/haproxy_service_config/1.0.0/README.md deleted file mode 100644 index fdbf60d4..00000000 --- a/resources/haproxy_service_config/1.0.0/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# `haproxy_service_config` resource - -This resource represents config for a single service handled by Haproxy. -It connects into `haproxy_config`. It collects all services which are to -be load-balanced by Haproxy. diff --git a/resources/haproxy_service_config/1.0.0/meta.yaml b/resources/haproxy_service_config/1.0.0/meta.yaml deleted file mode 100644 index f551d885..00000000 --- a/resources/haproxy_service_config/1.0.0/meta.yaml +++ /dev/null @@ -1,22 +0,0 @@ -handler: none -version: 1.0.0 -input: - name: - schema: str! - value: general_haproxy - backends: - schema: [{server: str!, port: int!}] - listen_port: - schema: int! - value: 9999 - protocol: - schema: str! - value: http - # ports: - # schema: [int] - # value: [] - # servers: - # schema: [str] - # value: [] - -tags: [resources=haproxy] diff --git a/resources/hosts_file/1.0.0/actions/remove.yaml b/resources/hosts_file/1.0.0/actions/remove.yaml deleted file mode 100644 index d211b58a..00000000 --- a/resources/hosts_file/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,5 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: Remove hosts file - shell: echo '# flushed by ansible' > /etc/hosts diff --git a/resources/hosts_file/1.0.0/actions/run.yaml b/resources/hosts_file/1.0.0/actions/run.yaml deleted file mode 100644 index 8ad5f23f..00000000 --- a/resources/hosts_file/1.0.0/actions/run.yaml +++ /dev/null @@ -1,11 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - {% for val in hosts %} - - name: Create hosts entries for {{val['name']}} => {{val['ip']}} - lineinfile: - dest: /etc/hosts - regexp: ".*{{val['name']}}$" - line: "{{val['ip']}} {{val['name']}}" - state: present - {% endfor %} diff --git a/resources/hosts_file/1.0.0/actions/update.yaml b/resources/hosts_file/1.0.0/actions/update.yaml deleted file mode 100644 index 8ad5f23f..00000000 --- a/resources/hosts_file/1.0.0/actions/update.yaml +++ /dev/null @@ -1,11 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - {% for val in hosts %} - - name: Create hosts entries for {{val['name']}} => {{val['ip']}} - lineinfile: - dest: /etc/hosts - regexp: ".*{{val['name']}}$" - line: "{{val['ip']}} {{val['name']}}" - state: present - {% endfor %} diff --git a/resources/hosts_file/1.0.0/meta.yaml b/resources/hosts_file/1.0.0/meta.yaml deleted file mode 100644 index 879521d8..00000000 --- a/resources/hosts_file/1.0.0/meta.yaml +++ /dev/null @@ -1,9 +0,0 @@ -handler: ansible -version: 1.0.0 - -input: - hosts: - schema: [{name: str!, ip: str!}] - value: [] - -tags: [resource=hosts_file] diff --git a/resources/keystone_config/1.0.0/actions/remove.yaml b/resources/keystone_config/1.0.0/actions/remove.yaml deleted file mode 100644 index 48094d21..00000000 --- a/resources/keystone_config/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,4 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - file: path={{config_dir}} state=absent diff --git a/resources/keystone_config/1.0.0/actions/run.yaml b/resources/keystone_config/1.0.0/actions/run.yaml deleted file mode 100644 index 3265bbc0..00000000 --- a/resources/keystone_config/1.0.0/actions/run.yaml +++ /dev/null @@ -1,17 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - vars: - admin_token: {{admin_token}} - keystone_host: {{ ip }} - keystone_port: {{ port }} - db_user: {{db_user}} - db_password: {{db_password}} - db_host: {{db_host}} - db_name: {{db_name}} - tasks: - - file: path={{config_dir}} state=directory - - template: src={{templates_dir}}/keystone.conf dest={{config_dir}}/keystone.conf - - template: src={{templates_dir}}/default_catalog.templates dest={{config_dir}}/default_catalog.templates - - template: src={{templates_dir}}/logging.conf dest={{config_dir}}/logging.conf - - template: src={{templates_dir}}/policy.json dest={{config_dir}}/policy.json - - template: src={{templates_dir}}/exports dest={{ config_dir }}/keystone-exports diff --git a/resources/keystone_config/1.0.0/meta.yaml b/resources/keystone_config/1.0.0/meta.yaml deleted file mode 100644 index eb83904d..00000000 --- a/resources/keystone_config/1.0.0/meta.yaml +++ /dev/null @@ -1,36 +0,0 @@ -handler: ansible -version: 1.0.0 - -input: - config_dir: - schema: str! - value: /etc/solar/keystone - admin_token: - schema: str! - value: admin - db_password: - schema: str! - value: password - db_user: - schema: str! - value: keystone - db_host: - schema: str! - value: - db_port: - schema: int! - value: - db_name: - schema: str! - value: keystone - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/keystone_config, resources/keystone] diff --git a/resources/keystone_config/1.0.0/templates/default_catalog.templates b/resources/keystone_config/1.0.0/templates/default_catalog.templates deleted file mode 100644 index a69b7f06..00000000 --- a/resources/keystone_config/1.0.0/templates/default_catalog.templates +++ /dev/null @@ -1,27 +0,0 @@ -# config for templated.Catalog, using camelCase because I don't want to do -# translations for keystone compat -catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0 -catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0 -catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0 -catalog.RegionOne.identity.name = Identity Service - -# fake compute service for now to help novaclient tests work -catalog.RegionOne.compute.publicURL = http://localhost:8774/v1.1/$(tenant_id)s -catalog.RegionOne.compute.adminURL = http://localhost:8774/v1.1/$(tenant_id)s -catalog.RegionOne.compute.internalURL = http://localhost:8774/v1.1/$(tenant_id)s -catalog.RegionOne.compute.name = Compute Service - -catalog.RegionOne.volume.publicURL = http://localhost:8776/v1/$(tenant_id)s -catalog.RegionOne.volume.adminURL = http://localhost:8776/v1/$(tenant_id)s -catalog.RegionOne.volume.internalURL = http://localhost:8776/v1/$(tenant_id)s -catalog.RegionOne.volume.name = Volume Service - -catalog.RegionOne.ec2.publicURL = http://localhost:8773/services/Cloud -catalog.RegionOne.ec2.adminURL = http://localhost:8773/services/Admin -catalog.RegionOne.ec2.internalURL = http://localhost:8773/services/Cloud -catalog.RegionOne.ec2.name = EC2 Service - -catalog.RegionOne.image.publicURL = http://localhost:9292/v1 -catalog.RegionOne.image.adminURL = http://localhost:9292/v1 -catalog.RegionOne.image.internalURL = http://localhost:9292/v1 -catalog.RegionOne.image.name = Image Service diff --git a/resources/keystone_config/1.0.0/templates/exports b/resources/keystone_config/1.0.0/templates/exports deleted file mode 100644 index cc3a1c1c..00000000 --- a/resources/keystone_config/1.0.0/templates/exports +++ /dev/null @@ -1,2 +0,0 @@ -export OS_SERVICE_ENDPOINT=http://localhost:35357/v2.0/ -export OS_SERVICE_TOKEN={{ admin_token }} diff --git a/resources/keystone_config/1.0.0/templates/keystone.conf b/resources/keystone_config/1.0.0/templates/keystone.conf deleted file mode 100644 index e8bfb466..00000000 --- a/resources/keystone_config/1.0.0/templates/keystone.conf +++ /dev/null @@ -1,1589 +0,0 @@ -[DEFAULT] - -# -# Options defined in keystone -# - -# A "shared secret" that can be used to bootstrap Keystone. -# This "token" does not represent a user, and carries no -# explicit authorization. To disable in production (highly -# recommended), remove AdminTokenAuthMiddleware from your -# paste application pipelines (for example, in keystone- -# paste.ini). (string value) -admin_token={{admin_token}} - -# The IP address of the network interface for the public -# service to listen on. (string value) -# Deprecated group/name - [DEFAULT]/bind_host -#public_bind_host=0.0.0.0 - -# The IP address of the network interface for the admin -# service to listen on. (string value) -# Deprecated group/name - [DEFAULT]/bind_host -#admin_bind_host=0.0.0.0 - -# (Deprecated) The port which the OpenStack Compute service -# listens on. This option was only used for string replacement -# in the templated catalog backend. Templated catalogs should -# replace the "$(compute_port)s" substitution with the static -# port of the compute service. As of Juno, this option is -# deprecated and will be removed in the L release. (integer -# value) -#compute_port=8774 - -# The port number which the admin service listens on. (integer -# value) -admin_port=35357 - -# The port number which the public service listens on. -# (integer value) -public_port=5000 - -# The base public endpoint URL for Keystone that is advertised -# to clients (NOTE: this does NOT affect how Keystone listens -# for connections). Defaults to the base host URL of the -# request. E.g. a request to http://server:5000/v2.0/users -# will default to http://server:5000. You should only need to -# set this value if the base URL contains a path (e.g. -# /prefix/v2.0) or the endpoint should be found on a different -# server. (string value) -#public_endpoint= - -# The base admin endpoint URL for Keystone that is advertised -# to clients (NOTE: this does NOT affect how Keystone listens -# for connections). Defaults to the base host URL of the -# request. E.g. a request to http://server:35357/v2.0/users -# will default to http://server:35357. You should only need to -# set this value if the base URL contains a path (e.g. -# /prefix/v2.0) or the endpoint should be found on a different -# server. (string value) -#admin_endpoint= - -# The number of worker processes to serve the public WSGI -# application. Defaults to number of CPUs (minimum of 2). -# (integer value) -#public_workers= - -# The number of worker processes to serve the admin WSGI -# application. Defaults to number of CPUs (minimum of 2). -# (integer value) -#admin_workers= - -# Enforced by optional sizelimit middleware -# (keystone.middleware:RequestBodySizeLimiter). (integer -# value) -#max_request_body_size=114688 - -# Limit the sizes of user & project ID/names. (integer value) -#max_param_size=64 - -# Similar to max_param_size, but provides an exception for -# token values. (integer value) -#max_token_size=8192 - -# During a SQL upgrade member_role_id will be used to create a -# new role that will replace records in the assignment table -# with explicit role grants. After migration, the -# member_role_id will be used in the API add_user_to_project. -# (string value) -#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab - -# During a SQL upgrade member_role_name will be used to create -# a new role that will replace records in the assignment table -# with explicit role grants. After migration, member_role_name -# will be ignored. (string value) -#member_role_name=_member_ - -# The value passed as the keyword "rounds" to passlib's -# encrypt method. (integer value) -#crypt_strength=40000 - -# Set this to true if you want to enable TCP_KEEPALIVE on -# server sockets, i.e. sockets used by the Keystone wsgi -# server for client connections. (boolean value) -#tcp_keepalive=false - -# Sets the value of TCP_KEEPIDLE in seconds for each server -# socket. Only applies if tcp_keepalive is true. Not supported -# on OS X. (integer value) -#tcp_keepidle=600 - -# The maximum number of entities that will be returned in a -# collection, with no limit set by default. This global limit -# may be then overridden for a specific driver, by specifying -# a list_limit in the appropriate section (e.g. [assignment]). -# (integer value) -#list_limit= - -# Set this to false if you want to enable the ability for -# user, group and project entities to be moved between domains -# by updating their domain_id. Allowing such movement is not -# recommended if the scope of a domain admin is being -# restricted by use of an appropriate policy file (see -# policy.v3cloudsample as an example). (boolean value) -#domain_id_immutable=true - -# If set to true, strict password length checking is performed -# for password manipulation. If a password exceeds the maximum -# length, the operation will fail with an HTTP 403 Forbidden -# error. If set to false, passwords are automatically -# truncated to the maximum length. (boolean value) -#strict_password_check=false - - -# -# Options defined in oslo.messaging -# - -# Use durable queues in amqp. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_durable_queues -#amqp_durable_queues=false - -# Auto-delete queues in amqp. (boolean value) -#amqp_auto_delete=false - -# Size of RPC connection pool. (integer value) -#rpc_conn_pool_size=30 - -# Qpid broker hostname. (string value) -#qpid_hostname=localhost - -# Qpid broker port. (integer value) -#qpid_port=5672 - -# Qpid HA cluster host:port pairs. (list value) -#qpid_hosts=$qpid_hostname:$qpid_port - -# Username for Qpid connection. (string value) -#qpid_username= - -# Password for Qpid connection. (string value) -#qpid_password= - -# Space separated list of SASL mechanisms to use for auth. -# (string value) -#qpid_sasl_mechanisms= - -# Seconds between connection keepalive heartbeats. (integer -# value) -#qpid_heartbeat=60 - -# Transport to use, either 'tcp' or 'ssl'. (string value) -#qpid_protocol=tcp - -# Whether to disable the Nagle algorithm. (boolean value) -#qpid_tcp_nodelay=true - -# The number of prefetched messages held by receiver. (integer -# value) -#qpid_receiver_capacity=1 - -# The qpid topology version to use. Version 1 is what was -# originally used by impl_qpid. Version 2 includes some -# backwards-incompatible changes that allow broker federation -# to work. Users should update to version 2 when they are -# able to take everything down, as it requires a clean break. -# (integer value) -#qpid_topology_version=1 - -# SSL version to use (valid only if SSL enabled). valid values -# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some -# distributions. (string value) -#kombu_ssl_version= - -# SSL key file (valid only if SSL enabled). (string value) -#kombu_ssl_keyfile= - -# SSL cert file (valid only if SSL enabled). (string value) -#kombu_ssl_certfile= - -# SSL certification authority file (valid only if SSL -# enabled). (string value) -#kombu_ssl_ca_certs= - -# How long to wait before reconnecting in response to an AMQP -# consumer cancel notification. (floating point value) -#kombu_reconnect_delay=1.0 - -# The RabbitMQ broker address where a single node is used. -# (string value) -#rabbit_host=localhost - -# The RabbitMQ broker port where a single node is used. -# (integer value) -#rabbit_port=5672 - -# RabbitMQ HA cluster host:port pairs. (list value) -#rabbit_hosts=$rabbit_host:$rabbit_port - -# Connect over SSL for RabbitMQ. (boolean value) -#rabbit_use_ssl=false - -# The RabbitMQ userid. (string value) -#rabbit_userid=guest - -# The RabbitMQ password. (string value) -#rabbit_password=guest - -# the RabbitMQ login method (string value) -#rabbit_login_method=AMQPLAIN - -# The RabbitMQ virtual host. (string value) -#rabbit_virtual_host=/ - -# How frequently to retry connecting with RabbitMQ. (integer -# value) -#rabbit_retry_interval=1 - -# How long to backoff for between retries when connecting to -# RabbitMQ. (integer value) -#rabbit_retry_backoff=2 - -# Maximum number of RabbitMQ connection retries. Default is 0 -# (infinite retry count). (integer value) -#rabbit_max_retries=0 - -# Use HA queues in RabbitMQ (x-ha-policy: all). If you change -# this option, you must wipe the RabbitMQ database. (boolean -# value) -#rabbit_ha_queues=false - -# If passed, use a fake RabbitMQ provider. (boolean value) -#fake_rabbit=false - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet -# interface, or IP. The "host" option should point or resolve -# to this address. (string value) -#rpc_zmq_bind_address=* - -# MatchMaker driver. (string value) -#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost - -# ZeroMQ receiver listening port. (integer value) -#rpc_zmq_port=9501 - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -#rpc_zmq_contexts=1 - -# Maximum number of ingress messages to locally buffer per -# topic. Default is unlimited. (integer value) -#rpc_zmq_topic_backlog= - -# Directory for holding IPC sockets. (string value) -#rpc_zmq_ipc_dir=/var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP -# address. Must match "host" option, if running Nova. (string -# value) -#rpc_zmq_host=keystone - -# Seconds to wait before a cast expires (TTL). Only supported -# by impl_zmq. (integer value) -#rpc_cast_timeout=30 - -# Heartbeat frequency. (integer value) -#matchmaker_heartbeat_freq=300 - -# Heartbeat time-to-live. (integer value) -#matchmaker_heartbeat_ttl=600 - -# Size of RPC greenthread pool. (integer value) -#rpc_thread_pool_size=64 - -# Driver or drivers to handle sending notifications. (multi -# valued) -#notification_driver= - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -#notification_topics=notifications - -# Seconds to wait for a response from a call. (integer value) -#rpc_response_timeout=60 - -# A URL representing the messaging driver to use and its full -# configuration. If not set, we fall back to the rpc_backend -# option and driver specific configuration. (string value) -#transport_url= - -# The messaging driver to use, defaults to rabbit. Other -# drivers include qpid and zmq. (string value) -#rpc_backend=rabbit - -# The default exchange under which topics are scoped. May be -# overridden by an exchange name specified in the -# transport_url option. (string value) -#control_exchange=keystone - - -# -# Options defined in keystone.notifications -# - -# Default publisher_id for outgoing notifications (string -# value) -#default_publisher_id= - - -# -# Options defined in keystone.openstack.common.eventlet_backdoor -# - -# Enable eventlet backdoor. Acceptable values are 0, , -# and :, where 0 results in listening on a random -# tcp port number; results in listening on the -# specified port number (and not enabling backdoor if that -# port is in use); and : results in listening on -# the smallest unused port number within the specified range -# of port numbers. The chosen port is displayed in the -# service's log file. (string value) -#backdoor_port= - - -# -# Options defined in keystone.openstack.common.log -# - -# Print debugging output (set logging level to DEBUG instead -# of default WARNING level). (boolean value) -#debug=false - -# Print more verbose output (set logging level to INFO instead -# of default WARNING level). (boolean value) -#verbose=false - -# Log output to standard error. (boolean value) -#use_stderr=false - -# Format string to use for log messages with context. (string -# value) -#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages without context. -# (string value) -#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Data to append to log format when level is DEBUG. (string -# value) -#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. -# (string value) -#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s - -# List of logger=LEVEL pairs. (list value) -#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN - -# Enables or disables publication of error events. (boolean -# value) -#publish_errors=false - -# Enables or disables fatal status of deprecations. (boolean -# value) -#fatal_deprecations=false - -# The format for an instance that is passed with the log -# message. (string value) -#instance_format="[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log -# message. (string value) -#instance_uuid_format="[instance: %(uuid)s] " - -# The name of a logging configuration file. This file is -# appended to any existing logging configuration files. For -# details about logging configuration files, see the Python -# logging module documentation. (string value) -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append= - -# DEPRECATED. A logging.Formatter log message format string -# which may use any of the available logging.LogRecord -# attributes. This option is deprecated. Please use -# logging_context_format_string and -# logging_default_format_string instead. (string value) -#log_format= - -# Format string for %%(asctime)s in log records. Default: -# %(default)s . (string value) -#log_date_format=%Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to output to. (string value) -# If not set here, logging will go to /var/log/keystone/keystone.log, -# default from keystone-dist.conf. -# Deprecated group/name - [DEFAULT]/logfile -#log_file=/var/log/keystone/keystone.log - -# (Optional) The base directory used for relative --log-file -# paths. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir= - -# Use syslog for logging. Existing syslog format is DEPRECATED -# during I, and will change in J to honor RFC5424. (boolean -# value) -#use_syslog=false - -# (Optional) Enables or disables syslog rfc5424 format for -# logging. If enabled, prefixes the MSG part of the syslog -# message with APP-NAME (RFC5424). The format without the APP- -# NAME is deprecated in I, and will be removed in J. (boolean -# value) -#use_syslog_rfc_format=false - -# Syslog facility to receive log lines. (string value) -#syslog_log_facility=LOG_USER - - -# -# Options defined in keystone.openstack.common.policy -# - -# The JSON file that defines policies. (string value) -#policy_file=policy.json - -# Default rule. Enforced when a requested rule is not found. -# (string value) -#policy_default_rule=default - - -[assignment] - -# -# Options defined in keystone -# - -# Assignment backend driver. (string value) -#driver= - -# Toggle for assignment caching. This has no effect unless -# global caching is enabled. (boolean value) -#caching=true - -# TTL (in seconds) to cache assignment data. This has no -# effect unless global caching is enabled. (integer value) -#cache_time= - -# Maximum number of entities that will be returned in an -# assignment collection. (integer value) -#list_limit= - - -[auth] - -# -# Options defined in keystone -# - -# Default auth methods. (list value) -#methods=external,password,token - -# The password auth plugin module. (string value) -#password=keystone.auth.plugins.password.Password - -# The token auth plugin module. (string value) -#token=keystone.auth.plugins.token.Token - -# The external (REMOTE_USER) auth plugin module. (string -# value) -#external=keystone.auth.plugins.external.DefaultDomain - - -[cache] - -# -# Options defined in keystone -# - -# Prefix for building the configuration dictionary for the -# cache region. This should not need to be changed unless -# there is another dogpile.cache region with the same -# configuration name. (string value) -#config_prefix=cache.keystone - -# Default TTL, in seconds, for any cached item in the -# dogpile.cache region. This applies to any cached method that -# doesn't have an explicit cache expiration time defined for -# it. (integer value) -#expiration_time=600 - -# Dogpile.cache backend module. It is recommended that -# Memcache with pooling (keystone.cache.memcache_pool) or -# Redis (dogpile.cache.redis) be used in production -# deployments. Small workloads (single process) like devstack -# can use the dogpile.cache.memory backend. (string value) -#backend=keystone.common.cache.noop - -# Arguments supplied to the backend module. Specify this -# option once per argument to be passed to the dogpile.cache -# backend. Example format: ":". (multi valued) -#backend_argument= - -# Proxy classes to import that will affect the way the -# dogpile.cache backend functions. See the dogpile.cache -# documentation on changing-backend-behavior. (list value) -#proxies= - -# Global toggle for all caching using the should_cache_fn -# mechanism. (boolean value) -#enabled=false - -# Extra debugging from the cache backend (cache keys, -# get/set/delete/etc calls). This is only really useful if you -# need to see the specific cache-backend get/set/delete calls -# with the keys/values. Typically this should be left set to -# false. (boolean value) -#debug_cache_backend=false - -# Memcache servers in the format of "host:port". -# (dogpile.cache.memcache and keystone.cache.memcache_pool -# backends only) (list value) -#memcache_servers=localhost:11211 - -# Number of seconds memcached server is considered dead before -# it is tried again. (dogpile.cache.memcache and -# keystone.cache.memcache_pool backends only) (integer value) -#memcache_dead_retry=300 - -# Timeout in seconds for every call to a server. -# (dogpile.cache.memcache and keystone.cache.memcache_pool -# backends only) (integer value) -#memcache_socket_timeout=3 - -# Max total number of open connections to every memcached -# server. (keystone.cache.memcache_pool backend only) (integer -# value) -#memcache_pool_maxsize=10 - -# Number of seconds a connection to memcached is held unused -# in the pool before it is closed. -# (keystone.cache.memcache_pool backend only) (integer value) -#memcache_pool_unused_timeout=60 - -# Number of seconds that an operation will wait to get a -# memcache client connection. (integer value) -#memcache_pool_connection_get_timeout=10 - - -[catalog] - -# -# Options defined in keystone -# - -# Catalog template file name for use with the template catalog -# backend. (string value) -#template_file=/etc/keystone/default_catalog.templates - -# Catalog backend driver. (string value) -#driver=keystone.catalog.backends.sql.Catalog - -# Toggle for catalog caching. This has no effect unless global -# caching is enabled. (boolean value) -#caching=true - -# Time to cache catalog data (in seconds). This has no effect -# unless global and catalog caching are enabled. (integer -# value) -#cache_time= - -# Maximum number of entities that will be returned in a -# catalog collection. (integer value) -#list_limit= - -# (Deprecated) List of possible substitutions for use in -# formatting endpoints. Use caution when modifying this list. -# It will give users with permission to create endpoints the -# ability to see those values in your configuration file. This -# option will be removed in Juno. (list value) -#endpoint_substitution_whitelist=tenant_id,user_id,public_bind_host,admin_bind_host,compute_host,compute_port,admin_port,public_port,public_endpoint,admin_endpoint - - -[credential] - -# -# Options defined in keystone -# - -# Credential backend driver. (string value) -#driver=keystone.credential.backends.sql.Credential - - -[database] - -# -# Options defined in oslo.db -# - -# The file name to use with SQLite. (string value) -#sqlite_db=oslo.sqlite - -# If True, SQLite uses synchronous mode. (boolean value) -#sqlite_synchronous=true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend=sqlalchemy - -# The SQLAlchemy connection string to use to connect to the -# database. (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection=mysql://keystone:keystone@localhost/keystone -connection=mysql://{{db_user}}:{{db_password}}@{{db_host}}/{{db_name}} - -# The SQLAlchemy connection string to use to connect to the -# slave database. (string value) -#slave_connection= - -# The SQL mode to be used for MySQL sessions. This option, -# including the default, overrides any server-set SQL mode. To -# use whatever SQL mode is set by the server configuration, -# set this to no value. Example: mysql_sql_mode= (string -# value) -#mysql_sql_mode=TRADITIONAL - -# Timeout before idle SQL connections are reaped. (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout=3600 - -# Minimum number of SQL connections to keep open in a pool. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size=1 - -# Maximum number of SQL connections to keep open in a pool. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size= - -# Maximum db connection retries during startup. Set to -1 to -# specify an infinite retry count. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries=10 - -# Interval between retries of opening a SQL connection. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval=10 - -# If set, use this value for max_overflow with SQLAlchemy. -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow= - -# Verbosity of SQL debugging information: 0=None, -# 100=Everything. (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug=0 - -# Add Python stack traces to SQL as comment strings. (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace=false - -# If set, use this value for pool_timeout with SQLAlchemy. -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout= - -# Enable the experimental use of database reconnect on -# connection lost. (boolean value) -#use_db_reconnect=false - -# Seconds between database connection retries. (integer value) -#db_retry_interval=1 - -# If True, increases the interval between database connection -# retries up to db_max_retry_interval. (boolean value) -#db_inc_retry_interval=true - -# If db_inc_retry_interval is set, the maximum seconds between -# database connection retries. (integer value) -#db_max_retry_interval=10 - -# Maximum database connection retries before error is raised. -# Set to -1 to specify an infinite retry count. (integer -# value) -#db_max_retries=20 - - -[ec2] - -# -# Options defined in keystone -# - -# EC2Credential backend driver. (string value) -#driver=keystone.contrib.ec2.backends.sql.Ec2 - - -[endpoint_filter] - -# -# Options defined in keystone -# - -# Endpoint Filter backend driver (string value) -#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter - -# Toggle to return all active endpoints if no filter exists. -# (boolean value) -#return_all_endpoints_if_no_filter=true - - -[endpoint_policy] - -# -# Options defined in keystone -# - -# Endpoint policy backend driver (string value) -#driver=keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy - - -[federation] - -# -# Options defined in keystone -# - -# Federation backend driver. (string value) -#driver=keystone.contrib.federation.backends.sql.Federation - -# Value to be used when filtering assertion parameters from -# the environment. (string value) -#assertion_prefix= - - -[identity] - -# -# Options defined in keystone -# - -# This references the domain to use for all Identity API v2 -# requests (which are not aware of domains). A domain with -# this ID will be created for you by keystone-manage db_sync -# in migration 008. The domain referenced by this ID cannot be -# deleted on the v3 API, to prevent accidentally breaking the -# v2 API. There is nothing special about this domain, other -# than the fact that it must exist to order to maintain -# support for your v2 clients. (string value) -#default_domain_id=default - -# A subset (or all) of domains can have their own identity -# driver, each with their own partial configuration file in a -# domain configuration directory. Only values specific to the -# domain need to be placed in the domain specific -# configuration file. This feature is disabled by default; set -# to true to enable. (boolean value) -#domain_specific_drivers_enabled=false - -# Path for Keystone to locate the domain specific identity -# configuration files if domain_specific_drivers_enabled is -# set to true. (string value) -#domain_config_dir=/etc/keystone/domains - -# Identity backend driver. (string value) -#driver=keystone.identity.backends.sql.Identity - -# Maximum supported length for user passwords; decrease to -# improve performance. (integer value) -#max_password_length=4096 - -# Maximum number of entities that will be returned in an -# identity collection. (integer value) -#list_limit= - - -[identity_mapping] - -# -# Options defined in keystone -# - -# Keystone Identity Mapping backend driver. (string value) -#driver=keystone.identity.mapping_backends.sql.Mapping - -# Public ID generator for user and group entities. The -# Keystone identity mapper only supports generators that -# produce no more than 64 characters. (string value) -#generator=keystone.identity.id_generators.sha256.Generator - -# The format of user and group IDs changed in Juno for -# backends that do not generate UUIDs (e.g. LDAP), with -# keystone providing a hash mapping to the underlying -# attribute in LDAP. By default this mapping is disabled, -# which ensures that existing IDs will not change. Even when -# the mapping is enabled by using domain specific drivers, any -# users and groups from the default domain being handled by -# LDAP will still not be mapped to ensure their IDs remain -# backward compatible. Setting this value to False will enable -# the mapping for even the default LDAP driver. It is only -# safe to do this if you do not already have assignments for -# users and groups from the default LDAP domain, and it is -# acceptable for Keystone to provide the different IDs to -# clients than it did previously. Typically this means that -# the only time you can set this value to False is when -# configuring a fresh installation. (boolean value) -#backward_compatible_ids=true - - -[kvs] - -# -# Options defined in keystone -# - -# Extra dogpile.cache backend modules to register with the -# dogpile.cache library. (list value) -#backends= - -# Prefix for building the configuration dictionary for the KVS -# region. This should not need to be changed unless there is -# another dogpile.cache region with the same configuration -# name. (string value) -#config_prefix=keystone.kvs - -# Toggle to disable using a key-mangling function to ensure -# fixed length keys. This is toggle-able for debugging -# purposes, it is highly recommended to always leave this set -# to true. (boolean value) -#enable_key_mangler=true - -# Default lock timeout for distributed locking. (integer -# value) -#default_lock_timeout=5 - - -[ldap] - -# -# Options defined in keystone -# - -# URL for connecting to the LDAP server. (string value) -#url=ldap://localhost - -# User BindDN to query the LDAP server. (string value) -#user= - -# Password for the BindDN to query the LDAP server. (string -# value) -#password= - -# LDAP server suffix (string value) -#suffix=cn=example,cn=com - -# If true, will add a dummy member to groups. This is required -# if the objectclass for groups requires the "member" -# attribute. (boolean value) -#use_dumb_member=false - -# DN of the "dummy member" to use when "use_dumb_member" is -# enabled. (string value) -#dumb_member=cn=dumb,dc=nonexistent - -# Delete subtrees using the subtree delete control. Only -# enable this option if your LDAP server supports subtree -# deletion. (boolean value) -#allow_subtree_delete=false - -# The LDAP scope for queries, this can be either "one" -# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree). -# (string value) -#query_scope=one - -# Maximum results per page; a value of zero ("0") disables -# paging. (integer value) -#page_size=0 - -# The LDAP dereferencing option for queries. This can be -# either "never", "searching", "always", "finding" or -# "default". The "default" option falls back to using default -# dereferencing configured by your ldap.conf. (string value) -#alias_dereferencing=default - -# Sets the LDAP debugging level for LDAP calls. A value of 0 -# means that debugging is not enabled. This value is a -# bitmask, consult your LDAP documentation for possible -# values. (integer value) -#debug_level= - -# Override the system's default referral chasing behavior for -# queries. (boolean value) -#chase_referrals= - -# Search base for users. (string value) -#user_tree_dn= - -# LDAP search filter for users. (string value) -#user_filter= - -# LDAP objectclass for users. (string value) -#user_objectclass=inetOrgPerson - -# LDAP attribute mapped to user id. WARNING: must not be a -# multivalued attribute. (string value) -#user_id_attribute=cn - -# LDAP attribute mapped to user name. (string value) -#user_name_attribute=sn - -# LDAP attribute mapped to user email. (string value) -#user_mail_attribute=mail - -# LDAP attribute mapped to password. (string value) -#user_pass_attribute=userPassword - -# LDAP attribute mapped to user enabled flag. (string value) -#user_enabled_attribute=enabled - -# Invert the meaning of the boolean enabled values. Some LDAP -# servers use a boolean lock attribute where "true" means an -# account is disabled. Setting "user_enabled_invert = true" -# will allow these lock attributes to be used. This setting -# will have no effect if "user_enabled_mask" or -# "user_enabled_emulation" settings are in use. (boolean -# value) -#user_enabled_invert=false - -# Bitmask integer to indicate the bit that the enabled value -# is stored in if the LDAP server represents "enabled" as a -# bit on an integer rather than a boolean. A value of "0" -# indicates the mask is not used. If this is not set to "0" -# the typical value is "2". This is typically used when -# "user_enabled_attribute = userAccountControl". (integer -# value) -#user_enabled_mask=0 - -# Default value to enable users. This should match an -# appropriate int value if the LDAP server uses non-boolean -# (bitmask) values to indicate if a user is enabled or -# disabled. If this is not set to "True" the typical value is -# "512". This is typically used when "user_enabled_attribute = -# userAccountControl". (string value) -#user_enabled_default=True - -# List of attributes stripped off the user on update. (list -# value) -#user_attribute_ignore=default_project_id,tenants - -# LDAP attribute mapped to default_project_id for users. -# (string value) -#user_default_project_id_attribute= - -# Allow user creation in LDAP backend. (boolean value) -#user_allow_create=true - -# Allow user updates in LDAP backend. (boolean value) -#user_allow_update=true - -# Allow user deletion in LDAP backend. (boolean value) -#user_allow_delete=true - -# If true, Keystone uses an alternative method to determine if -# a user is enabled or not by checking if they are a member of -# the "user_enabled_emulation_dn" group. (boolean value) -#user_enabled_emulation=false - -# DN of the group entry to hold enabled users when using -# enabled emulation. (string value) -#user_enabled_emulation_dn= - -# List of additional LDAP attributes used for mapping -# additional attribute mappings for users. Attribute mapping -# format is :, where ldap_attr is the -# attribute in the LDAP entry and user_attr is the Identity -# API attribute. (list value) -#user_additional_attribute_mapping= - -# Search base for projects (string value) -# Deprecated group/name - [ldap]/tenant_tree_dn -#project_tree_dn= - -# LDAP search filter for projects. (string value) -# Deprecated group/name - [ldap]/tenant_filter -#project_filter= - -# LDAP objectclass for projects. (string value) -# Deprecated group/name - [ldap]/tenant_objectclass -#project_objectclass=groupOfNames - -# LDAP attribute mapped to project id. (string value) -# Deprecated group/name - [ldap]/tenant_id_attribute -#project_id_attribute=cn - -# LDAP attribute mapped to project membership for user. -# (string value) -# Deprecated group/name - [ldap]/tenant_member_attribute -#project_member_attribute=member - -# LDAP attribute mapped to project name. (string value) -# Deprecated group/name - [ldap]/tenant_name_attribute -#project_name_attribute=ou - -# LDAP attribute mapped to project description. (string value) -# Deprecated group/name - [ldap]/tenant_desc_attribute -#project_desc_attribute=description - -# LDAP attribute mapped to project enabled. (string value) -# Deprecated group/name - [ldap]/tenant_enabled_attribute -#project_enabled_attribute=enabled - -# LDAP attribute mapped to project domain_id. (string value) -# Deprecated group/name - [ldap]/tenant_domain_id_attribute -#project_domain_id_attribute=businessCategory - -# List of attributes stripped off the project on update. (list -# value) -# Deprecated group/name - [ldap]/tenant_attribute_ignore -#project_attribute_ignore= - -# Allow project creation in LDAP backend. (boolean value) -# Deprecated group/name - [ldap]/tenant_allow_create -#project_allow_create=true - -# Allow project update in LDAP backend. (boolean value) -# Deprecated group/name - [ldap]/tenant_allow_update -#project_allow_update=true - -# Allow project deletion in LDAP backend. (boolean value) -# Deprecated group/name - [ldap]/tenant_allow_delete -#project_allow_delete=true - -# If true, Keystone uses an alternative method to determine if -# a project is enabled or not by checking if they are a member -# of the "project_enabled_emulation_dn" group. (boolean value) -# Deprecated group/name - [ldap]/tenant_enabled_emulation -#project_enabled_emulation=false - -# DN of the group entry to hold enabled projects when using -# enabled emulation. (string value) -# Deprecated group/name - [ldap]/tenant_enabled_emulation_dn -#project_enabled_emulation_dn= - -# Additional attribute mappings for projects. Attribute -# mapping format is :, where ldap_attr -# is the attribute in the LDAP entry and user_attr is the -# Identity API attribute. (list value) -# Deprecated group/name - [ldap]/tenant_additional_attribute_mapping -#project_additional_attribute_mapping= - -# Search base for roles. (string value) -#role_tree_dn= - -# LDAP search filter for roles. (string value) -#role_filter= - -# LDAP objectclass for roles. (string value) -#role_objectclass=organizationalRole - -# LDAP attribute mapped to role id. (string value) -#role_id_attribute=cn - -# LDAP attribute mapped to role name. (string value) -#role_name_attribute=ou - -# LDAP attribute mapped to role membership. (string value) -#role_member_attribute=roleOccupant - -# List of attributes stripped off the role on update. (list -# value) -#role_attribute_ignore= - -# Allow role creation in LDAP backend. (boolean value) -#role_allow_create=true - -# Allow role update in LDAP backend. (boolean value) -#role_allow_update=true - -# Allow role deletion in LDAP backend. (boolean value) -#role_allow_delete=true - -# Additional attribute mappings for roles. Attribute mapping -# format is :, where ldap_attr is the -# attribute in the LDAP entry and user_attr is the Identity -# API attribute. (list value) -#role_additional_attribute_mapping= - -# Search base for groups. (string value) -#group_tree_dn= - -# LDAP search filter for groups. (string value) -#group_filter= - -# LDAP objectclass for groups. (string value) -#group_objectclass=groupOfNames - -# LDAP attribute mapped to group id. (string value) -#group_id_attribute=cn - -# LDAP attribute mapped to group name. (string value) -#group_name_attribute=ou - -# LDAP attribute mapped to show group membership. (string -# value) -#group_member_attribute=member - -# LDAP attribute mapped to group description. (string value) -#group_desc_attribute=description - -# List of attributes stripped off the group on update. (list -# value) -#group_attribute_ignore= - -# Allow group creation in LDAP backend. (boolean value) -#group_allow_create=true - -# Allow group update in LDAP backend. (boolean value) -#group_allow_update=true - -# Allow group deletion in LDAP backend. (boolean value) -#group_allow_delete=true - -# Additional attribute mappings for groups. Attribute mapping -# format is :, where ldap_attr is the -# attribute in the LDAP entry and user_attr is the Identity -# API attribute. (list value) -#group_additional_attribute_mapping= - -# CA certificate file path for communicating with LDAP -# servers. (string value) -#tls_cacertfile= - -# CA certificate directory path for communicating with LDAP -# servers. (string value) -#tls_cacertdir= - -# Enable TLS for communicating with LDAP servers. (boolean -# value) -#use_tls=false - -# Valid options for tls_req_cert are demand, never, and allow. -# (string value) -#tls_req_cert=demand - -# Enable LDAP connection pooling. (boolean value) -#use_pool=false - -# Connection pool size. (integer value) -#pool_size=10 - -# Maximum count of reconnect trials. (integer value) -#pool_retry_max=3 - -# Time span in seconds to wait between two reconnect trials. -# (floating point value) -#pool_retry_delay=0.1 - -# Connector timeout in seconds. Value -1 indicates indefinite -# wait for response. (integer value) -#pool_connection_timeout=-1 - -# Connection lifetime in seconds. (integer value) -#pool_connection_lifetime=600 - -# Enable LDAP connection pooling for end user authentication. -# If use_pool is disabled, then this setting is meaningless -# and is not used at all. (boolean value) -#use_auth_pool=false - -# End user auth connection pool size. (integer value) -#auth_pool_size=100 - -# End user auth connection lifetime in seconds. (integer -# value) -#auth_pool_connection_lifetime=60 - - -[matchmaker_redis] - -# -# Options defined in oslo.messaging -# - -# Host to locate redis. (string value) -#host=127.0.0.1 - -# Use this port to connect to redis host. (integer value) -#port=6379 - -# Password for Redis server (optional). (string value) -#password= - - -[matchmaker_ring] - -# -# Options defined in oslo.messaging -# - -# Matchmaker ring file (JSON). (string value) -# Deprecated group/name - [DEFAULT]/matchmaker_ringfile -#ringfile=/etc/oslo/matchmaker_ring.json - - -[memcache] - -# -# Options defined in keystone -# - -# Memcache servers in the format of "host:port". (list value) -#servers=localhost:11211 - -# Number of seconds memcached server is considered dead before -# it is tried again. This is used by the key value store -# system (e.g. token pooled memcached persistence backend). -# (integer value) -#dead_retry=300 - -# Timeout in seconds for every call to a server. This is used -# by the key value store system (e.g. token pooled memcached -# persistence backend). (integer value) -#socket_timeout=3 - -# Max total number of open connections to every memcached -# server. This is used by the key value store system (e.g. -# token pooled memcached persistence backend). (integer value) -#pool_maxsize=10 - -# Number of seconds a connection to memcached is held unused -# in the pool before it is closed. This is used by the key -# value store system (e.g. token pooled memcached persistence -# backend). (integer value) -#pool_unused_timeout=60 - -# Number of seconds that an operation will wait to get a -# memcache client connection. This is used by the key value -# store system (e.g. token pooled memcached persistence -# backend). (integer value) -#pool_connection_get_timeout=10 - - -[oauth1] - -# -# Options defined in keystone -# - -# Credential backend driver. (string value) -#driver=keystone.contrib.oauth1.backends.sql.OAuth1 - -# Duration (in seconds) for the OAuth Request Token. (integer -# value) -#request_token_duration=28800 - -# Duration (in seconds) for the OAuth Access Token. (integer -# value) -#access_token_duration=86400 - - -[os_inherit] - -# -# Options defined in keystone -# - -# role-assignment inheritance to projects from owning domain -# can be optionally enabled. (boolean value) -#enabled=false - - -[paste_deploy] - -# -# Options defined in keystone -# - -# Name of the paste configuration file that defines the -# available pipelines. (string value) -#config_file=/usr/share/keystone/keystone-dist-paste.ini - - -[policy] - -# -# Options defined in keystone -# - -# Policy backend driver. (string value) -#driver=keystone.policy.backends.sql.Policy - -# Maximum number of entities that will be returned in a policy -# collection. (integer value) -#list_limit= - - -[revoke] - -# -# Options defined in keystone -# - -# An implementation of the backend for persisting revocation -# events. (string value) -#driver=keystone.contrib.revoke.backends.kvs.Revoke - -# This value (calculated in seconds) is added to token -# expiration before a revocation event may be removed from the -# backend. (integer value) -#expiration_buffer=1800 - -# Toggle for revocation event caching. This has no effect -# unless global caching is enabled. (boolean value) -#caching=true - - -[saml] - -# -# Options defined in keystone -# - -# Default TTL, in seconds, for any generated SAML assertion -# created by Keystone. (integer value) -#assertion_expiration_time=3600 - -# Binary to be called for XML signing. Install the appropriate -# package, specify absolute path or adjust your PATH -# environment variable if the binary cannot be found. (string -# value) -#xmlsec1_binary=xmlsec1 - -# Path of the certfile for SAML signing. For non-production -# environments, you may be interested in using `keystone- -# manage pki_setup` to generate self-signed certificates. -# Note, the path cannot contain a comma. (string value) -#certfile=/etc/keystone/ssl/certs/signing_cert.pem - -# Path of the keyfile for SAML signing. Note, the path cannot -# contain a comma. (string value) -#keyfile=/etc/keystone/ssl/private/signing_key.pem - -# Entity ID value for unique Identity Provider identification. -# Usually FQDN is set with a suffix. A value is required to -# generate IDP Metadata. For example: -# https://keystone.example.com/v3/OS-FEDERATION/saml2/idp -# (string value) -#idp_entity_id= - -# Identity Provider Single-Sign-On service value, required in -# the Identity Provider's metadata. A value is required to -# generate IDP Metadata. For example: -# https://keystone.example.com/v3/OS-FEDERATION/saml2/sso -# (string value) -#idp_sso_endpoint= - -# Language used by the organization. (string value) -#idp_lang=en - -# Organization name the installation belongs to. (string -# value) -#idp_organization_name= - -# Organization name to be displayed. (string value) -#idp_organization_display_name= - -# URL of the organization. (string value) -#idp_organization_url= - -# Company of contact person. (string value) -#idp_contact_company= - -# Given name of contact person (string value) -#idp_contact_name= - -# Surname of contact person. (string value) -#idp_contact_surname= - -# Email address of contact person. (string value) -#idp_contact_email= - -# Telephone number of contact person. (string value) -#idp_contact_telephone= - -# Contact type. Allowed values are: technical, support, -# administrative billing, and other (string value) -#idp_contact_type=other - -# Path to the Identity Provider Metadata file. This file -# should be generated with the keystone-manage -# saml_idp_metadata command. (string value) -#idp_metadata_path=/etc/keystone/saml2_idp_metadata.xml - - -[signing] - -# -# Options defined in keystone -# - -# Deprecated in favor of provider in the [token] section. -# (string value) -#token_format= - -# Path of the certfile for token signing. For non-production -# environments, you may be interested in using `keystone- -# manage pki_setup` to generate self-signed certificates. -# (string value) -#certfile=/etc/keystone/ssl/certs/signing_cert.pem - -# Path of the keyfile for token signing. (string value) -#keyfile=/etc/keystone/ssl/private/signing_key.pem - -# Path of the CA for token signing. (string value) -#ca_certs=/etc/keystone/ssl/certs/ca.pem - -# Path of the CA key for token signing. (string value) -#ca_key=/etc/keystone/ssl/private/cakey.pem - -# Key size (in bits) for token signing cert (auto generated -# certificate). (integer value) -#key_size=2048 - -# Days the token signing cert is valid for (auto generated -# certificate). (integer value) -#valid_days=3650 - -# Certificate subject (auto generated certificate) for token -# signing. (string value) -#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com - - -[ssl] - -# -# Options defined in keystone -# - -# Toggle for SSL support on the Keystone eventlet servers. -# (boolean value) -#enable=false - -# Path of the certfile for SSL. For non-production -# environments, you may be interested in using `keystone- -# manage ssl_setup` to generate self-signed certificates. -# (string value) -#certfile=/etc/keystone/ssl/certs/keystone.pem - -# Path of the keyfile for SSL. (string value) -#keyfile=/etc/keystone/ssl/private/keystonekey.pem - -# Path of the ca cert file for SSL. (string value) -#ca_certs=/etc/keystone/ssl/certs/ca.pem - -# Path of the CA key file for SSL. (string value) -#ca_key=/etc/keystone/ssl/private/cakey.pem - -# Require client certificate. (boolean value) -#cert_required=false - -# SSL key length (in bits) (auto generated certificate). -# (integer value) -#key_size=1024 - -# Days the certificate is valid for once signed (auto -# generated certificate). (integer value) -#valid_days=3650 - -# SSL certificate subject (auto generated certificate). -# (string value) -#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost - - -[stats] - -# -# Options defined in keystone -# - -# Stats backend driver. (string value) -#driver=keystone.contrib.stats.backends.kvs.Stats - - -[token] - -# -# Options defined in keystone -# - -# External auth mechanisms that should add bind information to -# token, e.g., kerberos,x509. (list value) -#bind= - -# Enforcement policy on tokens presented to Keystone with bind -# information. One of disabled, permissive, strict, required -# or a specifically required bind mode, e.g., kerberos or x509 -# to require binding to that authentication. (string value) -#enforce_token_bind=permissive - -# Amount of time a token should remain valid (in seconds). -# (integer value) -#expiration=3600 - -# Controls the token construction, validation, and revocation -# operations. Core providers are -# "keystone.token.providers.[pkiz|pki|uuid].Provider". The -# default provider is uuid. (string value) -#provider= - -# Token persistence backend driver. (string value) -#driver=keystone.token.persistence.backends.sql.Token - -# Toggle for token system caching. This has no effect unless -# global caching is enabled. (boolean value) -#caching=true - -# Time to cache the revocation list and the revocation events -# if revoke extension is enabled (in seconds). This has no -# effect unless global and token caching are enabled. (integer -# value) -#revocation_cache_time=3600 - -# Time to cache tokens (in seconds). This has no effect unless -# global and token caching are enabled. (integer value) -#cache_time= - -# Revoke token by token identifier. Setting revoke_by_id to -# true enables various forms of enumerating tokens, e.g. `list -# tokens for user`. These enumerations are processed to -# determine the list of tokens to revoke. Only disable if you -# are switching to using the Revoke extension with a backend -# other than KVS, which stores events in memory. (boolean -# value) -#revoke_by_id=true - -# The hash algorithm to use for PKI tokens. This can be set to -# any algorithm that hashlib supports. WARNING: Before -# changing this value, the auth_token middleware must be -# configured with the hash_algorithms, otherwise token -# revocation will not be processed correctly. (string value) -#hash_algorithm=md5 - - -[trust] - -# -# Options defined in keystone -# - -# Delegation and impersonation features can be optionally -# disabled. (boolean value) -#enabled=true - -# Trust backend driver. (string value) -#driver=keystone.trust.backends.sql.Trust - - diff --git a/resources/keystone_config/1.0.0/templates/logging.conf b/resources/keystone_config/1.0.0/templates/logging.conf deleted file mode 100644 index 6cb8c425..00000000 --- a/resources/keystone_config/1.0.0/templates/logging.conf +++ /dev/null @@ -1,65 +0,0 @@ -[loggers] -keys=root,access - -[handlers] -keys=production,file,access_file,devel - -[formatters] -keys=minimal,normal,debug - - -########### -# Loggers # -########### - -[logger_root] -level=WARNING -handlers=file - -[logger_access] -level=INFO -qualname=access -handlers=access_file - - -################ -# Log Handlers # -################ - -[handler_production] -class=handlers.SysLogHandler -level=ERROR -formatter=normal -args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) - -[handler_file] -class=handlers.WatchedFileHandler -level=WARNING -formatter=normal -args=('error.log',) - -[handler_access_file] -class=handlers.WatchedFileHandler -level=INFO -formatter=minimal -args=('access.log',) - -[handler_devel] -class=StreamHandler -level=NOTSET -formatter=debug -args=(sys.stdout,) - - -################## -# Log Formatters # -################## - -[formatter_minimal] -format=%(message)s - -[formatter_normal] -format=(%(name)s): %(asctime)s %(levelname)s %(message)s - -[formatter_debug] -format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/resources/keystone_config/1.0.0/templates/policy.json b/resources/keystone_config/1.0.0/templates/policy.json deleted file mode 100644 index af65205e..00000000 --- a/resources/keystone_config/1.0.0/templates/policy.json +++ /dev/null @@ -1,171 +0,0 @@ -{ - "admin_required": "role:admin or is_admin:1", - "service_role": "role:service", - "service_or_admin": "rule:admin_required or rule:service_role", - "owner" : "user_id:%(user_id)s", - "admin_or_owner": "rule:admin_required or rule:owner", - - "default": "rule:admin_required", - - "identity:get_region": "", - "identity:list_regions": "", - "identity:create_region": "rule:admin_required", - "identity:update_region": "rule:admin_required", - "identity:delete_region": "rule:admin_required", - - "identity:get_service": "rule:admin_required", - "identity:list_services": "rule:admin_required", - "identity:create_service": "rule:admin_required", - "identity:update_service": "rule:admin_required", - "identity:delete_service": "rule:admin_required", - - "identity:get_endpoint": "rule:admin_required", - "identity:list_endpoints": "rule:admin_required", - "identity:create_endpoint": "rule:admin_required", - "identity:update_endpoint": "rule:admin_required", - "identity:delete_endpoint": "rule:admin_required", - - "identity:get_domain": "rule:admin_required", - "identity:list_domains": "rule:admin_required", - "identity:create_domain": "rule:admin_required", - "identity:update_domain": "rule:admin_required", - "identity:delete_domain": "rule:admin_required", - - "identity:get_project": "rule:admin_required", - "identity:list_projects": "rule:admin_required", - "identity:list_user_projects": "rule:admin_or_owner", - "identity:create_project": "rule:admin_required", - "identity:update_project": "rule:admin_required", - "identity:delete_project": "rule:admin_required", - - "identity:get_user": "rule:admin_required", - "identity:list_users": "rule:admin_required", - "identity:create_user": "rule:admin_required", - "identity:update_user": "rule:admin_required", - "identity:delete_user": "rule:admin_required", - "identity:change_password": "rule:admin_or_owner", - - "identity:get_group": "rule:admin_required", - "identity:list_groups": "rule:admin_required", - "identity:list_groups_for_user": "rule:admin_or_owner", - "identity:create_group": "rule:admin_required", - "identity:update_group": "rule:admin_required", - "identity:delete_group": "rule:admin_required", - "identity:list_users_in_group": "rule:admin_required", - "identity:remove_user_from_group": "rule:admin_required", - "identity:check_user_in_group": "rule:admin_required", - "identity:add_user_to_group": "rule:admin_required", - - "identity:get_credential": "rule:admin_required", - "identity:list_credentials": "rule:admin_required", - "identity:create_credential": "rule:admin_required", - "identity:update_credential": "rule:admin_required", - "identity:delete_credential": "rule:admin_required", - - "identity:ec2_get_credential": "rule:admin_or_owner", - "identity:ec2_list_credentials": "rule:admin_or_owner", - "identity:ec2_create_credential": "rule:admin_or_owner", - "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", - - "identity:get_role": "rule:admin_required", - "identity:list_roles": "rule:admin_required", - "identity:create_role": "rule:admin_required", - "identity:update_role": "rule:admin_required", - "identity:delete_role": "rule:admin_required", - - "identity:check_grant": "rule:admin_required", - "identity:list_grants": "rule:admin_required", - "identity:create_grant": "rule:admin_required", - "identity:revoke_grant": "rule:admin_required", - - "identity:list_role_assignments": "rule:admin_required", - - "identity:get_policy": "rule:admin_required", - "identity:list_policies": "rule:admin_required", - "identity:create_policy": "rule:admin_required", - "identity:update_policy": "rule:admin_required", - "identity:delete_policy": "rule:admin_required", - - "identity:check_token": "rule:admin_required", - "identity:validate_token": "rule:service_or_admin", - "identity:validate_token_head": "rule:service_or_admin", - "identity:revocation_list": "rule:service_or_admin", - "identity:revoke_token": "rule:admin_or_owner", - - "identity:create_trust": "user_id:%(trust.trustor_user_id)s", - "identity:get_trust": "rule:admin_or_owner", - "identity:list_trusts": "", - "identity:list_roles_for_trust": "", - "identity:check_role_for_trust": "", - "identity:get_role_for_trust": "", - "identity:delete_trust": "", - - "identity:create_consumer": "rule:admin_required", - "identity:get_consumer": "rule:admin_required", - "identity:list_consumers": "rule:admin_required", - "identity:delete_consumer": "rule:admin_required", - "identity:update_consumer": "rule:admin_required", - - "identity:authorize_request_token": "rule:admin_required", - "identity:list_access_token_roles": "rule:admin_required", - "identity:get_access_token_role": "rule:admin_required", - "identity:list_access_tokens": "rule:admin_required", - "identity:get_access_token": "rule:admin_required", - "identity:delete_access_token": "rule:admin_required", - - "identity:list_projects_for_endpoint": "rule:admin_required", - "identity:add_endpoint_to_project": "rule:admin_required", - "identity:check_endpoint_in_project": "rule:admin_required", - "identity:list_endpoints_for_project": "rule:admin_required", - "identity:remove_endpoint_from_project": "rule:admin_required", - - "identity:create_endpoint_group": "rule:admin_required", - "identity:list_endpoint_groups": "rule:admin_required", - "identity:get_endpoint_group": "rule:admin_required", - "identity:update_endpoint_group": "rule:admin_required", - "identity:delete_endpoint_group": "rule:admin_required", - "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", - "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", - "identity:list_endpoint_groups_for_project": "rule:admin_required", - "identity:add_endpoint_group_to_project": "rule:admin_required", - "identity:remove_endpoint_group_from_project": "rule:admin_required", - - "identity:create_identity_provider": "rule:admin_required", - "identity:list_identity_providers": "rule:admin_required", - "identity:get_identity_providers": "rule:admin_required", - "identity:update_identity_provider": "rule:admin_required", - "identity:delete_identity_provider": "rule:admin_required", - - "identity:create_protocol": "rule:admin_required", - "identity:update_protocol": "rule:admin_required", - "identity:get_protocol": "rule:admin_required", - "identity:list_protocols": "rule:admin_required", - "identity:delete_protocol": "rule:admin_required", - - "identity:create_mapping": "rule:admin_required", - "identity:get_mapping": "rule:admin_required", - "identity:list_mappings": "rule:admin_required", - "identity:delete_mapping": "rule:admin_required", - "identity:update_mapping": "rule:admin_required", - - "identity:get_auth_catalog": "", - "identity:get_auth_projects": "", - "identity:get_auth_domains": "", - - "identity:list_projects_for_groups": "", - "identity:list_domains_for_groups": "", - - "identity:list_revoke_events": "", - - "identity:create_policy_association_for_endpoint": "rule:admin_required", - "identity:check_policy_association_for_endpoint": "rule:admin_required", - "identity:delete_policy_association_for_endpoint": "rule:admin_required", - "identity:create_policy_association_for_service": "rule:admin_required", - "identity:check_policy_association_for_service": "rule:admin_required", - "identity:delete_policy_association_for_service": "rule:admin_required", - "identity:create_policy_association_for_region_and_service": "rule:admin_required", - "identity:check_policy_association_for_region_and_service": "rule:admin_required", - "identity:delete_policy_association_for_region_and_service": "rule:admin_required", - "identity:get_policy_for_endpoint": "rule:admin_required", - "identity:list_endpoints_for_policy": "rule:admin_required" -} diff --git a/resources/keystone_puppet/1.0.0/README.md b/resources/keystone_puppet/1.0.0/README.md deleted file mode 100644 index 1db48d72..00000000 --- a/resources/keystone_puppet/1.0.0/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# `keystone_puppet` resource - -This resource implements inputs for the official OpenStack Keystone Puppet manifests -from https://github.com/openstack/puppet-keystone (`stable/juno` branch). - -Basic tests are present that test HTTP connectivity to the service. diff --git a/resources/keystone_puppet/1.0.0/actions/remove.pp b/resources/keystone_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 958de8c7..00000000 --- a/resources/keystone_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,4 +0,0 @@ -class {'keystone': - admin_token => '{{ admin_token }}', - package_ensure => 'absent' -} diff --git a/resources/keystone_puppet/1.0.0/actions/run.pp b/resources/keystone_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 73c818af..00000000 --- a/resources/keystone_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,28 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] -$admin_token = $resource['input']['admin_token'] -$db_user = $resource['input']['db_user'] -$db_host = $resource['input']['db_host'] -$db_password = $resource['input']['db_password'] -$db_name = $resource['input']['db_name'] -$db_port = $resource['input']['db_port'] -$admin_port = $resource['input']['admin_port'] -$port = $resource['input']['port'] - -class {'keystone': - package_ensure => 'present', - verbose => true, - catalog_type => 'sql', - admin_token => $admin_token, - database_connection => "mysql://$db_user:$db_password@$db_host:$db_port/$db_name", - public_port => "$port", - admin_port => "$admin_port", - token_driver => 'keystone.token.persistence.backends.sql.Token' -} - -#file { '/etc/keystone/keystone-exports': -# owner => 'root', -# group => 'root', -# content => template('keystone/exports.erb') -#} diff --git a/resources/keystone_puppet/1.0.0/actions/update.pp b/resources/keystone_puppet/1.0.0/actions/update.pp deleted file mode 100644 index c295c3a7..00000000 --- a/resources/keystone_puppet/1.0.0/actions/update.pp +++ /dev/null @@ -1,21 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] -$admin_token = $resource['input']['admin_token'] -$db_user = $resource['input']['db_user'] -$db_host = $resource['input']['db_host'] -$db_password = $resource['input']['db_password'] -$db_name = $resource['input']['db_name'] -$db_port = $resource['input']['db_port'] -$admin_port = $resource['input']['admin_port'] -$port = $resource['input']['port'] - -class {'keystone': - package_ensure => 'present', - verbose => true, - catalog_type => 'sql', - admin_token => $admin_token, - database_connection => "mysql://$db_user:$db_password@$db_host:$db_port/$db_name", - public_port => "$port", - admin_port => "$admin_port", -} diff --git a/resources/keystone_puppet/1.0.0/meta.yaml b/resources/keystone_puppet/1.0.0/meta.yaml deleted file mode 100644 index e395c31e..00000000 --- a/resources/keystone_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,44 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - admin_token: - schema: str! - value: admin_token - db_user: - schema: str! - value: - db_password: - schema: str! - value: - db_name: - schema: str! - value: - db_host: - schema: str! - value: - db_port: - schema: int! - value: - - admin_port: - schema: int! - value: 35357 - port: - schema: int! - value: 5000 - - module: - schema: {name: str!, type: str, url: str, ref: str} - value: {name: 'keystone', type: 'git', url: 'https://github.com/openstack/puppet-keystone', ref: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/keystone_service, resources/keystone] diff --git a/resources/keystone_puppet/1.0.0/test.py b/resources/keystone_puppet/1.0.0/test.py deleted file mode 100644 index 67673116..00000000 --- a/resources/keystone_puppet/1.0.0/test.py +++ /dev/null @@ -1,10 +0,0 @@ -import requests - -from solar.core.log import log - - -def test(resource): - log.debug('Testing keystone_puppet') - requests.get( - 'http://%s:%s' % (resource.args['ip'], resource.args['port']) - ) diff --git a/resources/keystone_role/1.0.0/actions/remove.yaml b/resources/keystone_role/1.0.0/actions/remove.yaml deleted file mode 100644 index 6b723faf..00000000 --- a/resources/keystone_role/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: keystone role - #TODO: not implemented in module - pause: seconds=1 diff --git a/resources/keystone_role/1.0.0/actions/run.yaml b/resources/keystone_role/1.0.0/actions/run.yaml deleted file mode 100644 index 983185d4..00000000 --- a/resources/keystone_role/1.0.0/actions/run.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: install python-keystoneclient - shell: apt-get install python-keystoneclient - - name: keystone role - keystone_user: endpoint=http://{{keystone_host}}:{{keystone_port}}/v2.0/ token={{admin_token}} user={{user_name}} tenant={{tenant_name}} role={{role_name}} state=present diff --git a/resources/keystone_role/1.0.0/meta.yaml b/resources/keystone_role/1.0.0/meta.yaml deleted file mode 100644 index e7340fc6..00000000 --- a/resources/keystone_role/1.0.0/meta.yaml +++ /dev/null @@ -1,32 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - keystone_host: - schema: str! - value: - keystone_port: - schema: int! - value: - admin_token: - schema: str! - value: - user_name: - schema: str! - value: admin - tenant_name: - schema: str! - value: - role_name: - schema: str! - value: admin - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/keystone_role, resources/keystone] diff --git a/resources/keystone_service/1.0.0/README.md b/resources/keystone_service/1.0.0/README.md deleted file mode 100644 index aac0b259..00000000 --- a/resources/keystone_service/1.0.0/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# `keystone_service` resource - -This resource sets up a Docker container with Keystone code. It requires -config to be provided by the `keystone_config` resource (mounted under -`/etc/keystone`). - -Basically, the philosophy behind containers in Solar is to have stateless -containers with service code and mount stateful resources with config, -volumes, etc. to that container. Upgrade of code then would be just about -replacing the stateless container with new one and remounting state to that -new container. diff --git a/resources/keystone_service/1.0.0/actions/remove.yaml b/resources/keystone_service/1.0.0/actions/remove.yaml deleted file mode 100644 index 73ef93a9..00000000 --- a/resources/keystone_service/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# TODO -- hosts: [{{host}}] - sudo: yes - tasks: - - name: keystone container - docker: - image: {{ image }} - name: {{ resource_name }} - state: absent diff --git a/resources/keystone_service/1.0.0/actions/run.yaml b/resources/keystone_service/1.0.0/actions/run.yaml deleted file mode 100644 index 49eb2e0b..00000000 --- a/resources/keystone_service/1.0.0/actions/run.yaml +++ /dev/null @@ -1,19 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: keystone container - docker: - command: /bin/bash -c "keystone-manage db_sync && /usr/bin/keystone-all" - name: {{ resource_name }} - image: {{ image }} - state: running - expose: - - 5000 - - 35357 - ports: - - {{ port }}:5000 - - {{ admin_port }}:35357 - volumes: - - {{ config_dir }}:/etc/keystone - - name: wait for keystone - wait_for: host={{ip}} port={{port}} timeout=20 diff --git a/resources/keystone_service/1.0.0/meta.yaml b/resources/keystone_service/1.0.0/meta.yaml deleted file mode 100644 index 8213e640..00000000 --- a/resources/keystone_service/1.0.0/meta.yaml +++ /dev/null @@ -1,26 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - image: - schema: str! - value: kollaglue/centos-rdo-j-keystone - config_dir: - schema: str! - value: /etc/solar/keystone - port: - schema: int! - value: 5000 - admin_port: - schema: int! - value: 35357 - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/keystone_service, resources/keystone] diff --git a/resources/keystone_service/1.0.0/test.py b/resources/keystone_service/1.0.0/test.py deleted file mode 100644 index 26268c7c..00000000 --- a/resources/keystone_service/1.0.0/test.py +++ /dev/null @@ -1,10 +0,0 @@ -import requests - -from solar.core.log import log - - -def test(resource): - log.debug('Testing keystone_service') - requests.get( - 'http://%s:%s' % (resource.args['ip'], resource.args['port']) - ) diff --git a/resources/keystone_service_endpoint/1.0.0/actions/remove.yaml b/resources/keystone_service_endpoint/1.0.0/actions/remove.yaml deleted file mode 100644 index 8d366fa5..00000000 --- a/resources/keystone_service_endpoint/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,20 +0,0 @@ -- hosts: [{{ host }}] - sudo: yes - vars: - ip: {{ip}} - port: {{port}} - admin_port: {{admin_port}} - tasks: - - name: remove keystone service and endpoint - keystone_service: - token: {{admin_token}} - name: {{name}} - type: {{type}} - description: {{description}} - publicurl: {{publicurl}} - internalurl: {{internalurl}} - adminurl: {{adminurl}} - region: "RegionOne" - state: present - endpoint: http://{{keystone_host}}:{{keystone_admin_port}}/v2.0/ - diff --git a/resources/keystone_service_endpoint/1.0.0/actions/run.yaml b/resources/keystone_service_endpoint/1.0.0/actions/run.yaml deleted file mode 100644 index 2bc3143d..00000000 --- a/resources/keystone_service_endpoint/1.0.0/actions/run.yaml +++ /dev/null @@ -1,15 +0,0 @@ -- hosts: [{{ host }}] - sudo: yes - tasks: - - name: keystone service and endpoint - keystone_service: - token: {{admin_token}} - name: {{endpoint_name}} - type: {{type}} - description: {{description}} - publicurl: {{publicurl}} - internalurl: {{internalurl}} - adminurl: {{adminurl}} - region: "RegionOne" - state: present - endpoint: http://{{keystone_host}}:{{keystone_admin_port}}/v2.0/ diff --git a/resources/keystone_service_endpoint/1.0.0/meta.yaml b/resources/keystone_service_endpoint/1.0.0/meta.yaml deleted file mode 100644 index b543a85e..00000000 --- a/resources/keystone_service_endpoint/1.0.0/meta.yaml +++ /dev/null @@ -1,61 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - keystone_host: - schema: str! - value: - keystone_admin_port: - schema: int! - value: - admin_token: - schema: str! - value: - - endpoint_name: - schema: str! - value: - type: - schema: str! - value: - description: - schema: str! - value: - public_ip: - schema: str! - value: - public_port: - schema: int! - value: - publicurl: - schema: str! - value: http://{{public_ip}}:{{public_port}}/v2.0 - internal_ip: - schema: str! - value: - internal_port: - schema: int! - value: - internalurl: - schema: str! - value: http://{{internal_ip}}:{{internal_port}}/v2.0 - admin_ip: - schema: str! - value: - admin_port: - schema: int! - value: - adminurl: - schema: str! - value: http://{{admin_ip}}:{{admin_port}}/v2.0 - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/keystone_service_endpoint, resources/keystone] diff --git a/resources/keystone_service_endpoint/1.0.0/test.py b/resources/keystone_service_endpoint/1.0.0/test.py deleted file mode 100644 index cc2cc600..00000000 --- a/resources/keystone_service_endpoint/1.0.0/test.py +++ /dev/null @@ -1,48 +0,0 @@ -import jinja2 -import json -import requests - -from solar.core.log import log - - -def test(resource): - log.debug('Testing keystone_service_endpoint %s', resource.name) - - resp = requests.get( - 'http://%s:%s/v3/services' % (resource.args['ip'], resource.args['keystone_admin_port']), - headers={ - 'X-Auth-Token': resource.args['admin_token'], - } - ) - - resp_json = resp.json() - assert 'services' in resp_json - - service = [s for s in resp_json['services'] if s['name'] == resource.args['endpoint_name']][0] - service_id = service['id'] - - assert service['description'] == resource.args['description'] - - log.debug('%s service: %s', resource.name, json.dumps(service, indent=2)) - - resp = requests.get( - 'http://%s:%s/v3/endpoints' % (resource.args['ip'], resource.args['keystone_admin_port']), - headers={ - 'X-Auth-Token': resource.args['admin_token'], - } - ) - - resp_json = resp.json() - assert 'endpoints' in resp_json - - endpoints = {} - - for endpoint in resp_json['endpoints']: - if endpoint['service_id'] == service_id: - endpoints[endpoint['interface']] = endpoint - - assert jinja2.Template(resource.args['adminurl']).render(**resource.args) == endpoints['admin']['url'] - assert jinja2.Template(resource.args['internalurl']).render(**resource.args) == endpoints['internal']['url'] - assert jinja2.Template(resource.args['publicurl']).render(**resource.args) == endpoints['public']['url'] - - log.debug('%s endpoints: %s', resource.name, json.dumps(endpoints, indent=2)) diff --git a/resources/keystone_tenant/1.0.0/actions/remove.yaml b/resources/keystone_tenant/1.0.0/actions/remove.yaml deleted file mode 100644 index c41de2d4..00000000 --- a/resources/keystone_tenant/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,5 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: keystone tenant - keystone_user: endpoint=http://{{keystone_host}}:{{keystone_port}}/v2.0/ token={{admin_token}} tenant={{tenant_name}} state=absent diff --git a/resources/keystone_tenant/1.0.0/actions/run.yaml b/resources/keystone_tenant/1.0.0/actions/run.yaml deleted file mode 100644 index 925522fd..00000000 --- a/resources/keystone_tenant/1.0.0/actions/run.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: install python-keystoneclient - shell: apt-get install python-keystoneclient - - name: keystone tenant - keystone_user: endpoint=http://{{keystone_host}}:{{keystone_port}}/v2.0/ token={{admin_token}} tenant={{tenant_name}} state=present diff --git a/resources/keystone_tenant/1.0.0/meta.yaml b/resources/keystone_tenant/1.0.0/meta.yaml deleted file mode 100644 index e01a9fea..00000000 --- a/resources/keystone_tenant/1.0.0/meta.yaml +++ /dev/null @@ -1,26 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - keystone_host: - schema: str! - value: - keystone_port: - schema: int! - value: - admin_token: - schema: str! - value: - tenant_name: - schema: str! - value: admin - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/keystone_tenant, resources/keystone] diff --git a/resources/keystone_user/1.0.0/actions/remove.yaml b/resources/keystone_user/1.0.0/actions/remove.yaml deleted file mode 100644 index 8748d491..00000000 --- a/resources/keystone_user/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: keystone user - #TODO: not implemented in module - pause: seconds=1 diff --git a/resources/keystone_user/1.0.0/actions/run.yaml b/resources/keystone_user/1.0.0/actions/run.yaml deleted file mode 100644 index ecccca58..00000000 --- a/resources/keystone_user/1.0.0/actions/run.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: install python-keystoneclient - shell: apt-get install python-keystoneclient - - name: keystone user - keystone_user: endpoint=http://{{ keystone_host }}:{{ keystone_port }}/v2.0/ token={{ admin_token }} user={{ user_name }} password={{ user_password }} tenant={{ tenant_name }} state=present diff --git a/resources/keystone_user/1.0.0/meta.yaml b/resources/keystone_user/1.0.0/meta.yaml deleted file mode 100644 index 67237233..00000000 --- a/resources/keystone_user/1.0.0/meta.yaml +++ /dev/null @@ -1,32 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - keystone_host: - schema: str! - value: - keystone_port: - schema: int! - value: - admin_token: - schema: str! - value: - user_name: - schema: str! - value: admin - user_password: - schema: str! - value: admin - tenant_name: - schema: str! - value: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/keystone_user, resources/keystone] diff --git a/resources/keystone_user/1.0.0/test.py b/resources/keystone_user/1.0.0/test.py deleted file mode 100644 index 21054ddb..00000000 --- a/resources/keystone_user/1.0.0/test.py +++ /dev/null @@ -1,18 +0,0 @@ -import requests - -from solar.core.log import log -from solar.core import validation - - -def test(resource): - log.debug('Testing keystone_user %s', resource.args['user_name']) - - args = resource.args - - token, _ = validation.validate_token( - keystone_host=args['keystone_host'], - keystone_port=args['keystone_port'], - user=args['user_name'], - tenant=args['tenant_name'], - password=args['user_password'], - ) diff --git a/resources/librarian/0.0.1/actions/remove.yaml b/resources/librarian/0.0.1/actions/remove.yaml deleted file mode 100644 index c948ead8..00000000 --- a/resources/librarian/0.0.1/actions/remove.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - file: path={{modules_path}} state=absent - - file: path={{modules_path}}/../Puppetfile state=absent - - shell: gem uninstall librarian-puppet diff --git a/resources/librarian/0.0.1/actions/run.yaml b/resources/librarian/0.0.1/actions/run.yaml deleted file mode 100644 index 391e6f8a..00000000 --- a/resources/librarian/0.0.1/actions/run.yaml +++ /dev/null @@ -1,10 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - # XXX: check apt package http://packages.ubuntu.com/trusty/ruby/librarian-puppet - - shell: gem install librarian-puppet --no-ri --no-rdoc - - file: path={{modules_path}} state=directory - - template: - src={{templates_dir}}/Puppetfile - dest={{modules_path}}/../Puppetfile - - shell: librarian-puppet install chdir={{modules_path}} diff --git a/resources/librarian/0.0.1/meta.yaml b/resources/librarian/0.0.1/meta.yaml deleted file mode 100644 index b9d8ad28..00000000 --- a/resources/librarian/0.0.1/meta.yaml +++ /dev/null @@ -1,16 +0,0 @@ -handler: ansible -version: 0.0.1 -actions: - run: run.yaml - update: run.yaml - remove: remove.yaml -input: - modules: - schema: [{}] - value: [] - modules_path: - schema: str! - value: /etc/puppet/modules - forge: - schema: str! - value: https://forgeapi.puppetlabs.com diff --git a/resources/librarian/0.0.1/templates/Puppetfile b/resources/librarian/0.0.1/templates/Puppetfile deleted file mode 100644 index 90ba1d92..00000000 --- a/resources/librarian/0.0.1/templates/Puppetfile +++ /dev/null @@ -1,24 +0,0 @@ -forge "{{forge}}" - -{%- for module in modules %} - - {% if 'type' not in module or module.type == 'forge' -%} -mod '{{module.name}}' - {%- if 'version' in module -%} -, '{{module.version}}' - {%- endif -%} - {%- endif -%} - - {%- if 'type' in module and module.type == 'git' -%} -mod '{{module.name}}' - {%- if 'url' in module -%} -, - :git => '{{module.url}}' - {%- endif -%} - {%- if 'ref' in module -%} -, - :ref => '{{module.ref}}' - {%- endif -%} - {%- endif -%} - -{%- endfor -%} diff --git a/resources/lxc_container/1.0.0/actions/run.yaml b/resources/lxc_container/1.0.0/actions/run.yaml deleted file mode 100644 index 9b8a4b6a..00000000 --- a/resources/lxc_container/1.0.0/actions/run.yaml +++ /dev/null @@ -1,25 +0,0 @@ -- hosts: '*' - sudo: yes - gather_facts: false - # this is default variables, they will be overwritten by resource one - vars: - ansible_ssh_host: 10.0.0.3 - physical_host: 10.0.0.3 - container_name: test3 - inventory_hostname: test3 - properties: - container_release: trusty - container_networks: - mgmt: - address: 172.18.10.6 - bridge: br-test0 - bridge_address: 172.18.10.252/24 - interface: eth1 - netmask: 255.255.255.0 - type: veth - pub_key: '' - pre_tasks: - - set_fact: - lxc_container_ssh_key: "{{ lookup('file', pub_key) }}" - roles: - - { role: "lxc_container_create", tags: [ "lxc-container-create" ] } diff --git a/resources/lxc_container/1.0.0/meta.yaml b/resources/lxc_container/1.0.0/meta.yaml deleted file mode 100644 index d2e66fbe..00000000 --- a/resources/lxc_container/1.0.0/meta.yaml +++ /dev/null @@ -1,54 +0,0 @@ -handler: ansible_playbook -version: 1.0.0 -actions: -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - ansible_ssh_host: - schema: str! - value: - user: - schema: str! - value: - user_key: - schema: str! - value: - mgmt_ip: - schema: str! - value: - physical_host: - schema: str! - value: - container_address: - schema: str! - value: - container_name: - schema: str! - value: - inventory_hostname: - schema: str! - value: - container_networks: - schema: {} - value: - properties: - schema: {} - value: - pub_key: - schema: str! - value: - requires: - schema: str - value: - roles: - schema: [{value: str}] - value: - - https://github.com/stackforge/os-ansible-deployment/trunk/playbooks/roles/lxc_container_create - - https://github.com/stackforge/os-ansible-deployment/trunk/playbooks/roles/lxc_container_destroy diff --git a/resources/lxc_host/1.0.0/actions/run.yaml b/resources/lxc_host/1.0.0/actions/run.yaml deleted file mode 100644 index 64805e29..00000000 --- a/resources/lxc_host/1.0.0/actions/run.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- hosts: '*' - sudo: yes - roles: - - { role: "lxc_hosts", tags: [ "lxc-host", "host-setup" ] } - post_tasks: - - shell: pip install git+https://github.com/lxc/python2-lxc.git#egg=lxc \ No newline at end of file diff --git a/resources/lxc_host/1.0.0/meta.yaml b/resources/lxc_host/1.0.0/meta.yaml deleted file mode 100644 index 2852cc36..00000000 --- a/resources/lxc_host/1.0.0/meta.yaml +++ /dev/null @@ -1,22 +0,0 @@ -handler: ansible_playbook -version: 1.0.0 -actions: -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - provides: - schema: str - value: infra - roles: - schema: [{value: str}] - value: - - https://github.com/stackforge/os-ansible-deployment/trunk/playbooks/roles/lxc_hosts - - https://github.com/stackforge/os-ansible-deployment/trunk/playbooks/roles/pip_install - - https://github.com/stackforge/os-ansible-deployment/trunk/playbooks/roles/apt_package_pinning diff --git a/resources/managed_apt/1.0.0/actions/run.yaml b/resources/managed_apt/1.0.0/actions/run.yaml deleted file mode 100644 index 2776ef02..00000000 --- a/resources/managed_apt/1.0.0/actions/run.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - shell: echo 'Managed by solar' > /etc/apt/sources.list - when: {{ensure_other_removed}} - - shell: apt-get update - when: {{ensure_other_removed}} diff --git a/resources/managed_apt/1.0.0/meta.yaml b/resources/managed_apt/1.0.0/meta.yaml deleted file mode 100644 index 7ac8f122..00000000 --- a/resources/managed_apt/1.0.0/meta.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# This resource will clean -handler: ansible -version: 1.0.0 -input: - ip: - schema: str! - value: - repos: - schema: [str!] - value: - names: - schema: [str!] - value: - ensure_other_removed: - schema: bool - value: true diff --git a/resources/mariadb_db/1.0.0/actions/remove.yaml b/resources/mariadb_db/1.0.0/actions/remove.yaml deleted file mode 100644 index ca5f9601..00000000 --- a/resources/mariadb_db/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,13 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: mariadb db - mysql_db: - name: {{db_name}} - state: absent - login_user: root - login_password: {{login_password}} - login_port: {{login_port}} - login_host: {{db_host}} - collation: {{collation}} - encoding: {{encoding}} diff --git a/resources/mariadb_db/1.0.0/actions/run.yaml b/resources/mariadb_db/1.0.0/actions/run.yaml deleted file mode 100644 index 11adc5ba..00000000 --- a/resources/mariadb_db/1.0.0/actions/run.yaml +++ /dev/null @@ -1,13 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: mariadb db - mysql_db: - name: {{ db_name }} - state: present - login_user: root - login_password: {{ login_password }} - login_port: {{ login_port }} - login_host: {{db_host}} - collation: {{collation}} - encoding: {{encoding}} diff --git a/resources/mariadb_db/1.0.0/meta.yaml b/resources/mariadb_db/1.0.0/meta.yaml deleted file mode 100644 index 130c67a8..00000000 --- a/resources/mariadb_db/1.0.0/meta.yaml +++ /dev/null @@ -1,39 +0,0 @@ -handler: ansible -version: 1.0.0 -actions: - run: run.yaml - remove: remove.yaml - update: run.yaml -input: - db_name: - schema: str! - value: - db_host: - schema: str! - value: - - login_user: - schema: str! - value: - login_password: - schema: str! - value: - login_port: - schema: int! - value: - collation: - schema: str - value: 'utf8_general_ci' - encoding: - schema: str - value: 'utf8' - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: diff --git a/resources/mariadb_service/1.0.0/actions/remove.yaml b/resources/mariadb_service/1.0.0/actions/remove.yaml deleted file mode 100644 index 0ec5e3ff..00000000 --- a/resources/mariadb_service/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: mariadb container - docker: - name: {{ resource_name }} - image: {{ image }} - state: absent - - file: path=/var/lib/docker/data/{{resource_name}} state=absent diff --git a/resources/mariadb_service/1.0.0/actions/run.yaml b/resources/mariadb_service/1.0.0/actions/run.yaml deleted file mode 100644 index 0719a98b..00000000 --- a/resources/mariadb_service/1.0.0/actions/run.yaml +++ /dev/null @@ -1,22 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - file: path=/var/lib/docker/data/{{resource_name}} state=directory - - name: mariadb container - docker: - name: {{ resource_name }} - image: {{ image }} - state: reloaded - ports: - - {{ port }}:3306 - env: - MYSQL_ROOT_PASSWORD: {{ root_password }} - volumes: - - /var/lib/docker/data/{{resource_name}}:/var/lib/mysql - - - shell: docker exec -t {{ resource_name }} mysql -p{{ root_password }} -uroot -e "SELECT 1" - register: result - until: result.rc == 0 - retries: 30 - delay: 0.5 - diff --git a/resources/mariadb_service/1.0.0/meta.yaml b/resources/mariadb_service/1.0.0/meta.yaml deleted file mode 100644 index a64c9361..00000000 --- a/resources/mariadb_service/1.0.0/meta.yaml +++ /dev/null @@ -1,29 +0,0 @@ -handler: ansible -version: 1.0.0 -actions: - run: run.yaml - update: run.yaml -input: - image: - schema: str! - value: mariadb - root_user: - schema: str! - value: root - root_password: - schema: str! - value: mariadb - port: - schema: int! - value: 3306 - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/mariadb_service, resources/mariadb] diff --git a/resources/mariadb_user/1.0.0/actions/remove.yaml b/resources/mariadb_user/1.0.0/actions/remove.yaml deleted file mode 100644 index e07cd07d..00000000 --- a/resources/mariadb_user/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,11 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: mariadb user - mysql_user: - name: {{user_name}} - state: absent - login_user: root - login_password: {{login_password}} - login_port: {{login_port}} - login_host: {{db_host}} diff --git a/resources/mariadb_user/1.0.0/actions/run.yaml b/resources/mariadb_user/1.0.0/actions/run.yaml deleted file mode 100644 index 8ae46019..00000000 --- a/resources/mariadb_user/1.0.0/actions/run.yaml +++ /dev/null @@ -1,14 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: mariadb user - mysql_user: - name: {{ user_name }} - password: {{ user_password }} - priv: {{ db_name }}.*:ALL - host: '%' - state: present - login_user: root - login_password: {{ login_password }} - login_port: {{ login_port }} - login_host: {{db_host}} diff --git a/resources/mariadb_user/1.0.0/actions/update.yaml b/resources/mariadb_user/1.0.0/actions/update.yaml deleted file mode 100644 index 3c0831f8..00000000 --- a/resources/mariadb_user/1.0.0/actions/update.yaml +++ /dev/null @@ -1,15 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - name: mariadb user - mysql_user: - name: {{ user_name }} - password: {{ user_password }} - priv: {{ db_name }}.*:ALL - host: '%' - state: present - update_password: always - login_user: root - login_password: {{ login_password }} - login_port: {{ login_port }} - login_host: {{db_host}} diff --git a/resources/mariadb_user/1.0.0/meta.yaml b/resources/mariadb_user/1.0.0/meta.yaml deleted file mode 100644 index 1a6774db..00000000 --- a/resources/mariadb_user/1.0.0/meta.yaml +++ /dev/null @@ -1,40 +0,0 @@ -handler: ansible -version: 1.0.0 -actions: - run: run.yaml - update: update.yaml - remove: remove.yaml -input: - user_password: - schema: str! - value: - user_name: - schema: str! - value: - - db_name: - schema: str! - value: - db_host: - schema: str! - value: - - login_password: - schema: str! - value: - login_port: - schema: int! - value: - login_user: - schema: str! - value: - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: diff --git a/resources/neutron_agents_dhcp_puppet/1.0.0/README.md b/resources/neutron_agents_dhcp_puppet/1.0.0/README.md deleted file mode 100644 index ea1fcedb..00000000 --- a/resources/neutron_agents_dhcp_puppet/1.0.0/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# Neutron DHCP agent puppet resource - -Installs and configures the Neutron DHCP service - -# Parameters - -https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/agents/dhcp.pp - - ``package_ensure`` - (optional) Ensure state for package. Defaults to 'present'. - - ``debug`` - (optional) Show debugging output in log. Defaults to false. - - ``state_path`` - (optional) Where to store dnsmasq state files. This directory must be - writable by the user executing the agent. Defaults to '/var/lib/neutron'. - - ``resync_interval`` - (optional) The DHCP agent will resync its state with Neutron to recover - from any transient notification or rpc errors. The interval is number of - seconds between attempts. Defaults to 30. - - ``interface_driver`` - (optional) Defaults to 'neutron.agent.linux.interface.OVSInterfaceDriver'. - - ``dhcp_driver`` - (optional) Defaults to 'neutron.agent.linux.dhcp.Dnsmasq'. - - ``root_helper`` - (optional) Defaults to 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf'. - Addresses bug: https://bugs.launchpad.net/neutron/+bug/1182616 - Note: This can safely be removed once the module only targets the Havana release. - - ``use_namespaces`` - (optional) Allow overlapping IP (Must have kernel build with - CONFIG_NET_NS=y and iproute2 package that supports namespaces). - Defaults to true. - - ``dnsmasq_config_file`` - (optional) Override the default dnsmasq settings with this file. - Defaults to undef - - ``dhcp_delete_namespaces`` - (optional) Delete namespace after removing a dhcp server - Defaults to false. - - ``enable_isolated_metadata`` - (optional) enable metadata support on isolated networks. - Defaults to false. - - ``enable_metadata_network`` - (optional) Allows for serving metadata requests coming from a dedicated metadata - access network whose cidr is 169.254.169.254/16 (or larger prefix), and is - connected to a Neutron router from which the VMs send metadata request. - This option requires enable_isolated_metadata = True - Defaults to false. \ No newline at end of file diff --git a/resources/neutron_agents_dhcp_puppet/1.0.0/actions/remove.pp b/resources/neutron_agents_dhcp_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 72dc2b1a..00000000 --- a/resources/neutron_agents_dhcp_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,16 +0,0 @@ -class { 'neutron::agents::dhcp': - package_ensure => 'absent', - enabled => false, -} - -include neutron::params - -package { 'neutron': - ensure => 'absent', - name => $::neutron::params::package_name, -} - -# Remove external class dependency -Service <| title == 'neutron-dhcp-service' |> { - require => undef -} \ No newline at end of file diff --git a/resources/neutron_agents_dhcp_puppet/1.0.0/actions/run.pp b/resources/neutron_agents_dhcp_puppet/1.0.0/actions/run.pp deleted file mode 100644 index b33e087d..00000000 --- a/resources/neutron_agents_dhcp_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,45 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] - -$package_ensure = $resource['input']['package_ensure'] -$debug = $resource['input']['debug'] -$state_path = $resource['input']['state_path'] -$resync_interval = $resource['input']['resync_interval'] -$interface_driver = $resource['input']['interface_driver'] -$dhcp_driver = $resource['input']['dhcp_driver'] -$root_helper = $resource['input']['root_helper'] -$use_namespaces = $resource['input']['use_namespaces'] -$dnsmasq_config_file = $resource['input']['dnsmasq_config_file'] -$dhcp_delete_namespaces = $resource['input']['dhcp_delete_namespaces'] -$enable_isolated_metadata = $resource['input']['enable_isolated_metadata'] -$enable_metadata_network = $resource['input']['enable_metadata_network'] - -class { 'neutron::agents::dhcp': - enabled => true, - manage_service => true, - package_ensure => $package_ensure, - debug => $debug, - state_path => $state_path, - resync_interval => $resync_interval, - interface_driver => $interface_driver, - dhcp_driver => $dhcp_driver, - root_helper => $root_helper, - use_namespaces => $use_namespaces, - dnsmasq_config_file => $dnsmasq_config_file, - dhcp_delete_namespaces => $dhcp_delete_namespaces, - enable_isolated_metadata => $enable_isolated_metadata, - enable_metadata_network => $enable_metadata_network, -} - -include neutron::params - -package { 'neutron': - ensure => $package_ensure, - name => $::neutron::params::package_name, -} - -# Remove external class dependency -Service <| title == 'neutron-dhcp-service' |> { - require => undef -} \ No newline at end of file diff --git a/resources/neutron_agents_dhcp_puppet/1.0.0/meta.yaml b/resources/neutron_agents_dhcp_puppet/1.0.0/meta.yaml deleted file mode 100644 index bce2698d..00000000 --- a/resources/neutron_agents_dhcp_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,55 +0,0 @@ -handler: puppet -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - - package_ensure: - schema: str - value: present - debug: - schema: bool - value: false - state_path: - schema: str - value: '/var/lib/neutron' - resync_interval: - schema: int - value: 30 - interface_driver: - schema: str - value: 'neutron.agent.linux.interface.OVSInterfaceDriver' - dhcp_driver: - schema: str - value: 'neutron.agent.linux.dhcp.Dnsmasq' - root_helper: - schema: str - value: 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' - use_namespaces: - schema: bool - value: true - dnsmasq_config_file: - schema: str - value: - dhcp_delete_namespaces: - schema: bool - value: false - enable_isolated_metadata: - schema: bool - value: false - enable_metadata_network: - schema: bool - value: false - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'} - -tags: [resource/neutron, resource/neutron_agents_dhcp] -version: 1.0.0 diff --git a/resources/neutron_agents_l3_puppet/1.0.0/README.md b/resources/neutron_agents_l3_puppet/1.0.0/README.md deleted file mode 100644 index 4a74b464..00000000 --- a/resources/neutron_agents_l3_puppet/1.0.0/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# Neutron L3 agent puppet resource - -Installs and configures the Neutron L3 service -TODO: create ability to have multiple L3 services - -# Parameters - -https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/agents/l3.pp - - ``package_ensure`` - (optional) The state of the package - Defaults to present - - ``debug`` - (optional) Print debug info in logs - Defaults to false - - ``external_network_bridge`` - (optional) The name of the external bridge - Defaults to br-ex - - ``use_namespaces`` - (optional) Enable overlapping IPs / network namespaces - Defaults to false - - ``interface_driver`` - (optional) Driver to interface with neutron - Defaults to OVSInterfaceDriver - - ``router_id`` - (optional) The ID of the external router in neutron - Defaults to blank - - ``gateway_external_network_id`` - (optional) The ID of the external network in neutron - Defaults to blank - - ``handle_internal_only_routers`` - (optional) L3 Agent will handle non-external routers - Defaults to true - - ``metadata_port`` - (optional) The port of the metadata server - Defaults to 9697 - - ``send_arp_for_ha`` - (optional) Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 - to disable this feature. - Defaults to 3 - - ``periodic_interval`` - (optional) seconds between re-sync routers' data if needed - Defaults to 40 - - ``periodic_fuzzy_delay`` - (optional) seconds to start to sync routers' data after starting agent - Defaults to 5 - - ``enable_metadata_proxy`` - (optional) can be set to False if the Nova metadata server is not available - Defaults to True - - ``network_device_mtu`` - (optional) The MTU size for the interfaces managed by the L3 agent - Defaults to undef - Should be deprecated in the next major release in favor of a global parameter - - ``router_delete_namespaces`` - (optional) namespaces can be deleted cleanly on the host running the L3 agent - Defaults to False - - ``ha_enabled`` - (optional) Enabled or not HA for L3 agent. - Defaults to false - - ``ha_vrrp_auth_type`` - (optional) VRRP authentication type. Can be AH or PASS. - Defaults to "PASS" - - ``ha_vrrp_auth_password`` - (optional) VRRP authentication password. Required if ha_enabled = true. - Defaults to undef - - ``ha_vrrp_advert_int`` - (optional) The advertisement interval in seconds. - Defaults to '2' - - ``agent_mode`` - (optional) The working mode for the agent. - 'legacy': default behavior (without DVR) - 'dvr': enable DVR for an L3 agent running on compute node (DVR in production) - 'dvr_snat': enable DVR with centralized SNAT support (DVR for single-host, for testing only) - Defaults to 'legacy' - - ``allow_automatic_l3agent_failover`` - (optional) Automatically reschedule routers from offline L3 agents to online - L3 agents. - This is another way to run virtual routers in highly available way but with slow - failover performances compared to Keepalived feature in Neutron L3 Agent. - Defaults to 'False' \ No newline at end of file diff --git a/resources/neutron_agents_l3_puppet/1.0.0/actions/remove.pp b/resources/neutron_agents_l3_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 11b04be2..00000000 --- a/resources/neutron_agents_l3_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,16 +0,0 @@ -class { 'neutron::agents::l3': - package_ensure => 'absent', - enabled => false, -} - -include neutron::params - -package { 'neutron': - ensure => 'absent', - name => $::neutron::params::package_name, -} - -# Remove external class dependency -Service <| title == 'neutron-l3' |> { - require => undef -} \ No newline at end of file diff --git a/resources/neutron_agents_l3_puppet/1.0.0/actions/run.pp b/resources/neutron_agents_l3_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 0871d88c..00000000 --- a/resources/neutron_agents_l3_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,63 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] - -$package_ensure = $resource['input']['package_ensure'] -$debug = $resource['input']['debug'] -$external_network_bridge = $resource['input']['external_network_bridge'] -$use_namespaces = $resource['input']['use_namespaces'] -$interface_driver = $resource['input']['interface_driver'] -$router_id = $resource['input']['router_id'] -$gateway_external_network_id = $resource['input']['gateway_external_network_id'] -$handle_internal_only_routers = $resource['input']['handle_internal_only_routers'] -$metadata_port = $resource['input']['metadata_port'] -$send_arp_for_ha = $resource['input']['send_arp_for_ha'] -$periodic_interval = $resource['input']['periodic_interval'] -$periodic_fuzzy_delay = $resource['input']['periodic_fuzzy_delay'] -$enable_metadata_proxy = $resource['input']['enable_metadata_proxy'] -$network_device_mtu = $resource['input']['network_device_mtu'] -$router_delete_namespaces = $resource['input']['router_delete_namespaces'] -$ha_enabled = $resource['input']['ha_enabled'] -$ha_vrrp_auth_type = $resource['input']['ha_vrrp_auth_type'] -$ha_vrrp_auth_password = $resource['input']['ha_vrrp_auth_password'] -$ha_vrrp_advert_int = $resource['input']['ha_vrrp_advert_int'] -$agent_mode = $resource['input']['agent_mode'] -$allow_automatic_l3agent_failover = $resource['input']['allow_automatic_l3agent_failover'] - -class { 'neutron::agents::l3': - enabled => true, - manage_service => true, - package_ensure => $package_ensure, - debug => $debug, - external_network_bridge => $external_network_bridge, - use_namespaces => $use_namespaces, - interface_driver => $interface_driver, - router_id => $router_id, - gateway_external_network_id => $gateway_external_network_id, - handle_internal_only_routers => $handle_internal_only_routers, - metadata_port => $metadata_port, - send_arp_for_ha => $send_arp_for_ha, - periodic_interval => $periodic_interval, - periodic_fuzzy_delay => $periodic_fuzzy_delay, - enable_metadata_proxy => $enable_metadata_proxy, - network_device_mtu => $network_device_mtu, - router_delete_namespaces => $router_delete_namespaces, - ha_enabled => $ha_enabled, - ha_vrrp_auth_type => $ha_vrrp_auth_type, - ha_vrrp_auth_password => $ha_vrrp_auth_password, - ha_vrrp_advert_int => $ha_vrrp_advert_int, - agent_mode => $agent_mode, - allow_automatic_l3agent_failover => $allow_automatic_l3agent_failover, -} - -include neutron::params - -package { 'neutron': - ensure => $package_ensure, - name => $::neutron::params::package_name, -} - -# Remove external class dependency -Service <| title == 'neutron-l3' |> { - require => undef -} \ No newline at end of file diff --git a/resources/neutron_agents_l3_puppet/1.0.0/meta.yaml b/resources/neutron_agents_l3_puppet/1.0.0/meta.yaml deleted file mode 100644 index e4253811..00000000 --- a/resources/neutron_agents_l3_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,82 +0,0 @@ -handler: puppet -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - - package_ensure: - schema: str - value: 'present' - debug: - schema: bool - value: false - external_network_bridge: - schema: str - value: 'br-ex' - use_namespaces: - schema: bool - value: true - interface_driver: - schema: str - value: 'neutron.agent.linux.interface.OVSInterfaceDriver' - router_id: - schema: str - value: - gateway_external_network_id: - schema: str - value: - handle_internal_only_routers: - schema: bool - value: true - metadata_port: - schema: int - value: 9697 - send_arp_for_ha: - schema: int - value: 3 - periodic_interval: - schema: int - value: 40 - periodic_fuzzy_delay: - schema: int - value: 5 - enable_metadata_proxy: - schema: bool - value: true - network_device_mtu: - schema: str - value: - router_delete_namespaces: - schema: bool - value: false - ha_enabled: - schema: bool - value: false - ha_vrrp_auth_type: - schema: str - value: 'PASS' - ha_vrrp_auth_password: - schema: str - value: - ha_vrrp_advert_int: - schema: int - value: 3 - agent_mode: - schema: str - value: 'legacy' - allow_automatic_l3agent_failover: - schema: bool - value: false - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'} - -tags: [resource/neutron, resource/neutron_agents_l3] -version: 1.0.0 diff --git a/resources/neutron_agents_metadata_puppet/1.0.0/README.md b/resources/neutron_agents_metadata_puppet/1.0.0/README.md deleted file mode 100644 index 94cba4c7..00000000 --- a/resources/neutron_agents_metadata_puppet/1.0.0/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# Neutron DHCP agent puppet resource - -Setup and configure Neutron metadata agent - -# Parameters - -https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/agents/metadata.pp - - ``auth_password`` - (required) The password for the administrative user. - - ``shared_secret`` - (required) Shared secret to validate proxies Neutron metadata requests. - - ``package_ensure`` - Ensure state of the package. Defaults to 'present'. - - ``debug`` - Debug. Defaults to false. - - ``auth_tenant`` - The administrative user's tenant name. Defaults to 'services'. - - ``auth_user`` - The administrative user name for OpenStack Networking. - Defaults to 'neutron'. - - ``auth_url`` - The URL used to validate tokens. Defaults to 'http://localhost:35357/v2.0'. - Note, for this resource it is decomposed to auth_host and auth_port - due to implementation restrictions - - ``auth_insecure`` - turn off verification of the certificate for ssl (Defaults to false) - - ``auth_ca_cert`` - CA cert to check against with for ssl keystone. (Defaults to undef) - - ``auth_region`` - The authentication region. Defaults to 'RegionOne'. - - ``metadata_ip`` - The IP address of the metadata service. Defaults to '127.0.0.1'. - - ``metadata_port`` - The TCP port of the metadata service. Defaults to 8775. - - ``metadata_workers`` - (optional) Number of separate worker processes to spawn. - The default, count of machine's processors, runs the worker thread in the - current process. - Greater than 0 launches that number of child processes as workers. - The parent process manages them. Having more workers will help to improve performances. - Defaults to: $::processorcount - - ``metadata_backlog`` - (optional) Number of backlog requests to configure the metadata server socket with. - Defaults to 4096 - - ``metadata_memory_cache_ttl`` - (optional) Specifies time in seconds a metadata cache entry is valid in - memory caching backend. - Set to 0 will cause cache entries to never expire. - Set to undef or false to disable cache. - Defaults to 5 \ No newline at end of file diff --git a/resources/neutron_agents_metadata_puppet/1.0.0/actions/remove.pp b/resources/neutron_agents_metadata_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 15000590..00000000 --- a/resources/neutron_agents_metadata_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,16 +0,0 @@ -class { 'neutron::agents::metadata': - package_ensure => 'absent', - enabled => false, -} - -include neutron::params - -package { 'neutron': - ensure => 'absent', - name => $::neutron::params::package_name, -} - -# Remove external class dependency -Service <| title == 'neutron-metadata' |> { - require => undef -} \ No newline at end of file diff --git a/resources/neutron_agents_metadata_puppet/1.0.0/actions/run.pp b/resources/neutron_agents_metadata_puppet/1.0.0/actions/run.pp deleted file mode 100644 index d96955e0..00000000 --- a/resources/neutron_agents_metadata_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,53 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] - -$auth_host = $resource['input']['auth_host'] -$auth_port = $resource['input']['auth_port'] - -$auth_password = $resource['input']['auth_password'] -$shared_secret = $resource['input']['shared_secret'] -$package_ensure = $resource['input']['package_ensure'] -$debug = $resource['input']['debug'] -$auth_tenant = $resource['input']['auth_tenant'] -$auth_user = $resource['input']['auth_user'] -$auth_insecure = $resource['input']['auth_insecure'] -$auth_ca_cert = $resource['input']['auth_ca_cert'] -$auth_region = $resource['input']['auth_region'] -$metadata_ip = $resource['input']['metadata_ip'] -$metadata_port = $resource['input']['metadata_port'] -$metadata_workers = $resource['input']['metadata_workers'] -$metadata_backlog = $resource['input']['metadata_backlog'] -$metadata_memory_cache_ttl = $resource['input']['metadata_memory_cache_ttl'] - -class { 'neutron::agents::metadata': - enabled => true, - manage_service => true, - auth_password => $auth_password, - shared_secret => $shared_secret, - package_ensure => $package_ensure, - debug => $debug, - auth_tenant => $auth_tenant, - auth_user => $auth_user, - auth_url => "http://${auth_host}:${auth_port}/v2.0", - auth_insecure => $auth_insecure, - auth_ca_cert => $auth_ca_cert, - auth_region => $auth_region, - metadata_ip => $metadata_ip, - metadata_port => $metadata_port, - metadata_workers => $metadata_workers, - metadata_backlog => $metadata_backlog, - metadata_memory_cache_ttl => $metadata_memory_cache_ttl, -} - -include neutron::params - -package { 'neutron': - ensure => $package_ensure, - name => $::neutron::params::package_name, -} - -# Remove external class dependency -Service <| title == 'neutron-metadata' |> { - require => undef -} \ No newline at end of file diff --git a/resources/neutron_agents_metadata_puppet/1.0.0/meta.yaml b/resources/neutron_agents_metadata_puppet/1.0.0/meta.yaml deleted file mode 100644 index 500b5c4f..00000000 --- a/resources/neutron_agents_metadata_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,68 +0,0 @@ -handler: puppet -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - - auth_password: - schema: str! - value: - shared_secret: - schema: str! - value: - package_ensure: - schema: str - value: 'present' - debug: - schema: bool - value: false - auth_tenant: - schema: str - value: 'services' - auth_user: - schema: str - value: 'neutron' - auth_insecure: - schema: bool - value: false - auth_ca_cert: - schema: str - value: - auth_region: - schema: str - value: 'RegionOne' - metadata_ip: - schema: str - value: '127.0.0.1' - metadata_port: - schema: int - value: 8775 - metadata_workers: - schema: int - value: 1 - metadata_backlog: - schema: int - value: 4096 - metadata_memory_cache_ttl: - schema: int - value: 5 - - auth_host: - schema: str - value: 'localhost' - auth_port: - schema: int - value: 35357 - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'} - -tags: [resource/neutron, resource/neutron_agents_metadata] -version: 1.0.0 diff --git a/resources/neutron_agents_ml2_ovs_puppet/1.0.0/README.md b/resources/neutron_agents_ml2_ovs_puppet/1.0.0/README.md deleted file mode 100644 index b9ea2e28..00000000 --- a/resources/neutron_agents_ml2_ovs_puppet/1.0.0/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# Neutron OVS agent with ML2 plugin puppet resource - -Setups OVS neutron agent when using ML2 plugin - -# === Parameters - -source https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/agents/ml2/ovs.pp - - ``package_ensure`` - (optional) The state of the package - Defaults to 'present' - - ``enabled`` - (required) Whether or not to enable the OVS Agent - Defaults to true - - ``bridge_uplinks`` - (optional) List of interfaces to connect to the bridge when doing - bridge mapping. - Defaults to empty list - - ``bridge_mapping`` - (optional) List of : - Defaults to empty list - - ``integration_bridge`` - (optional) Integration bridge in OVS - Defaults to 'br-int' - - ``enable_tunneling`` - (optional) Enable or not tunneling - Defaults to false - - ``tunnel_types`` - (optional) List of types of tunnels to use when utilizing tunnels, - either 'gre' or 'vxlan'. - Defaults to false - - ``local_ip`` - (optional) Local IP address of GRE tunnel endpoints. - Required when enabling tunneling - Defaults to false - - ``tunnel_bridge`` - (optional) Bridge used to transport tunnels - Defaults to 'br-tun' - - ``vxlan_udp_port`` - (optional) The UDP port to use for VXLAN tunnels. - Defaults to '4789' - - ``polling_interval`` - (optional) The number of seconds the agent will wait between - polling for local device changes. - Defaults to '2" - - ``l2_population`` - (optional) Extension to use alongside ml2 plugin's l2population - mechanism driver. - Defaults to false - - ``arp_responder`` - (optional) Enable or not the ARP responder. - Recommanded when using l2 population mechanism driver. - Defaults to false - - ``firewall_driver`` - (optional) Firewall driver for realizing neutron security group function. - Defaults to 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver'. - - ``enable_distributed_routing`` - (optional) Set to True on L2 agents to enable support - for distributed virtual routing. - Defaults to false \ No newline at end of file diff --git a/resources/neutron_agents_ml2_ovs_puppet/1.0.0/actions/remove.pp b/resources/neutron_agents_ml2_ovs_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 7a197915..00000000 --- a/resources/neutron_agents_ml2_ovs_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,4 +0,0 @@ -class { 'neutron::agents::ml2::ovs': - package_ensure => 'absent', - enabled => false, -} \ No newline at end of file diff --git a/resources/neutron_agents_ml2_ovs_puppet/1.0.0/actions/run.pp b/resources/neutron_agents_ml2_ovs_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 115e1ce8..00000000 --- a/resources/neutron_agents_ml2_ovs_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,45 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] - -$package_ensure = $resource['input']['package_ensure'] -$enabled = $resource['input']['enabled'] -$bridge_uplinks = $resource['input']['bridge_uplinks'] -$bridge_mappings = $resource['input']['bridge_mappings'] -$integration_bridge = $resource['input']['integration_bridge'] -$enable_tunneling = $resource['input']['enable_tunneling'] -$tunnel_types = $resource['input']['tunnel_types'] -$local_ip = $resource['input']['local_ip'] -$tunnel_bridge = $resource['input']['tunnel_bridge'] -$vxlan_udp_port = $resource['input']['vxlan_udp_port'] -$polling_interval = $resource['input']['polling_interval'] -$l2_population = $resource['input']['l2_population'] -$arp_responder = $resource['input']['arp_responder'] -$firewall_driver = $resource['input']['firewall_driver'] -$enable_distributed_routing = $resource['input']['enable_distributed_routing'] - -class { 'neutron::agents::ml2::ovs': - enabled => true, - package_ensure => $package_ensure, - bridge_uplinks => $bridge_uplinks, - bridge_mappings => $bridge_mappings, - integration_bridge => $integration_bridge, - enable_tunneling => $enable_tunneling, - tunnel_types => $tunnel_types, - local_ip => $local_ip, - tunnel_bridge => $tunnel_bridge, - vxlan_udp_port => $vxlan_udp_port, - polling_interval => $polling_interval, - l2_population => $l2_population, - arp_responder => $arp_responder, - firewall_driver => $firewall_driver, - enable_distributed_routing => $enable_distributed_routing, -} - -# Remove external class dependency and restore required ones -Service <| title == 'neutron-ovs-agent-service' |> { - require => undef -} -Neutron_plugin_ml2<||> ~> Service['neutron-ovs-agent-service'] -File <| title == '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini' |> ~> -Service<| title == 'neutron-ovs-agent-service' |> \ No newline at end of file diff --git a/resources/neutron_agents_ml2_ovs_puppet/1.0.0/meta.yaml b/resources/neutron_agents_ml2_ovs_puppet/1.0.0/meta.yaml deleted file mode 100644 index 75c54d42..00000000 --- a/resources/neutron_agents_ml2_ovs_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,64 +0,0 @@ -handler: puppet -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - - package_ensure: - schema: str - value: 'present' - enabled: - schema: bool - value: true - bridge_uplinks: - schema: [str] - value: [] - bridge_mappings: - schema: [str] - value: [] - integration_bridge: - schema: str - value: 'br-int' - enable_tunneling: - schema: bool - value: false - tunnel_types: - schema: [str] - value: [] - local_ip: - schema: str - value: '' - tunnel_bridge: - schema: str - value: 'br-tun' - vxlan_udp_port: - schema: int - value: 4789 - polling_interval: - schema: int - value: 2 - l2_population: - schema: bool - value: false - arp_responder: - schema: bool - value: false - firewall_driver: - schema: str - value: 'neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver' - enable_distributed_routing: - schema: bool - value: false - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'} - -tags: [resource/neutron, resource/neutron_agents_ml2_ovs] -version: 1.0.0 diff --git a/resources/neutron_plugins_ml2_puppet/1.0.0/README.md b/resources/neutron_plugins_ml2_puppet/1.0.0/README.md deleted file mode 100644 index a81c5216..00000000 --- a/resources/neutron_plugins_ml2_puppet/1.0.0/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# Neutron ML2 plugin puppet resource - -# === Parameters - -source https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/plugins/ml2.pp - - ``type_drivers`` - (optional) List of network type driver entrypoints to be loaded - from the neutron.ml2.type_drivers namespace. - Could be an array that can have these elements: - local, flat, vlan, gre, vxlan - Defaults to ['local', 'flat', 'vlan', 'gre', 'vxlan']. - - ``tenant_network_types`` - (optional) Ordered list of network_types to allocate as tenant networks. - The value 'local' is only useful for single-box testing - but provides no connectivity between hosts. - Should be an array that can have these elements: - local, flat, vlan, gre, vxlan - Defaults to ['local', 'flat', 'vlan', 'gre', 'vxlan']. - - ``mechanism_drivers`` - (optional) An ordered list of networking mechanism driver - entrypoints to be loaded from the neutron.ml2.mechanism_drivers namespace. - Should be an array that can have these elements: - logger, test, linuxbridge, openvswitch, hyperv, ncs, arista, cisco_nexus, - l2population, sriovnicswitch - Default to ['openvswitch', 'linuxbridge']. - - ``flat_networks`` - (optional) List of physical_network names with which flat networks - can be created. Use * to allow flat networks with arbitrary - physical_network names. - Should be an array. - Default to *. - - ``network_vlan_ranges`` - (optional) List of :: or - specifying physical_network names - usable for VLAN provider and tenant networks, as - well as ranges of VLAN tags on each available for - allocation to tenant networks. - Should be an array with vlan_min = 1 & vlan_max = 4094 (IEEE 802.1Q) - Default to empty. - - ``tunnel_id_ranges`` - (optional) Comma-separated list of : tuples - enumerating ranges of GRE tunnel IDs that are - available for tenant network allocation - Should be an array with tun_max +1 - tun_min > 1000000 - Default to empty. - - ``vxlan_group`` - (optional) Multicast group for VXLAN. - Multicast group for VXLAN. If unset, disables VXLAN enable sending allocate - broadcast traffic to this multicast group. When left unconfigured, will - disable multicast VXLAN mode - Should be an Multicast IP (v4 or v6) address. - Default to 'None'. - - ``vni_ranges`` - (optional) Comma-separated list of : tuples - enumerating ranges of VXLAN VNI IDs that are - available for tenant network allocation. - Min value is 0 and Max value is 16777215. - Default to empty. - - ``enable_security_group`` - (optional) Controls if neutron security group is enabled or not. - It should be false when you use nova security group. - Defaults to true. - - ``supported_pci_vendor_devs`` - (optional) Supported PCI vendor devices, defined by - vendor_id:product_id according to the PCI ID - Repository. Should be an array of devices. - Defaults to ['15b3:1004', '8086:10ca'] (Intel & Mellanox SR-IOV capable NICs) - - ``sriov_agent_required`` - (optional) SRIOV neutron agent is required for port binding. - Only set to true if SRIOV network adapters support VF link state setting - and if admin state management is desired. - Defaults to false. diff --git a/resources/neutron_plugins_ml2_puppet/1.0.0/actions/remove.pp b/resources/neutron_plugins_ml2_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 1867d4cd..00000000 --- a/resources/neutron_plugins_ml2_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,3 +0,0 @@ -class { 'neutron::plugins::ml2': - package_ensure => 'absent', -} \ No newline at end of file diff --git a/resources/neutron_plugins_ml2_puppet/1.0.0/actions/run.pp b/resources/neutron_plugins_ml2_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 347c0623..00000000 --- a/resources/neutron_plugins_ml2_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,54 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] - -$type_drivers = $resource['input']['type_drivers'] -$tenant_network_types = $resource['input']['tenant_network_types'] -$mechanism_drivers = $resource['input']['mechanism_drivers'] -$flat_networks = $resource['input']['flat_networks'] -$network_vlan_ranges = $resource['input']['network_vlan_ranges'] -$tunnel_id_ranges = $resource['input']['tunnel_id_ranges'] -$vxlan_group = $resource['input']['vxlan_group'] -$vni_ranges = $resource['input']['vni_ranges'] -$enable_security_group = $resource['input']['enable_security_group'] -$package_ensure = $resource['input']['package_ensure'] -$supported_pci_vendor_devs = $resource['input']['supported_pci_vendor_devs'] -$sriov_agent_required = $resource['input']['sriov_agent_required'] - -# LP1490438 -file {'/etc/default/neutron-server': - ensure => present, - owner => 'root', - group => 'root', - mode => 644 -} -> - -class { 'neutron::plugins::ml2': - type_drivers => $type_drivers, - tenant_network_types => $tenant_network_types, - mechanism_drivers => $mechanism_drivers, - flat_networks => $flat_networks, - network_vlan_ranges => $network_vlan_ranges, - tunnel_id_ranges => $tunnel_id_ranges, - vxlan_group => $vxlan_group, - vni_ranges => $vni_ranges, - enable_security_group => $enable_security_group, - package_ensure => $package_ensure, - supported_pci_vendor_devs => $supported_pci_vendor_devs, - sriov_agent_required => $sriov_agent_required, -} -> - -exec { 'neutron-db-sync': - provider => 'shell', - command => "${command} stamp head", - path => [ '/usr/bin', '/bin' ], - onlyif => "${command} current | grep -qE '^Current revision.*None$' " -} - -include neutron::params - -package { 'neutron': - ensure => $package_ensure, - name => $::neutron::params::package_name, - before => Exec['neutron-db-sync'] -} \ No newline at end of file diff --git a/resources/neutron_plugins_ml2_puppet/1.0.0/meta.yaml b/resources/neutron_plugins_ml2_puppet/1.0.0/meta.yaml deleted file mode 100644 index 76d44f89..00000000 --- a/resources/neutron_plugins_ml2_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,55 +0,0 @@ -handler: puppet -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - - type_drivers: - schema: [str] - value: ['local', 'flat', 'vlan', 'gre', 'vxlan'] - tenant_network_types: - schema: [str] - value: ['local', 'flat', 'vlan', 'gre', 'vxlan'] - mechanism_drivers: - schema: [str] - value: ['openvswitch', 'linuxbridge'] - flat_networks: - schema: [str] - value: ['*'] - network_vlan_ranges: - schema: [str] - value: ['physnet1:1000:2999'] - tunnel_id_ranges: - schema: [str] - value: ['20:100'] - vxlan_group: - schema: str - value: '224.0.0.1' - vni_ranges: - schema: [str] - value: ['10:100'] - enable_security_group: - schema: bool - value: true - package_ensure: - schema: str - value: 'present' - supported_pci_vendor_devs: - schema: [str] - value: ['15b3:1004', '8086:10ca'] - sriov_agent_required: - schema: bool - value: false - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'} - -tags: [resource/neutron, resource/neutron_plugins_ml2] -version: 1.0.0 diff --git a/resources/neutron_puppet/1.0.0/README.md b/resources/neutron_puppet/1.0.0/README.md deleted file mode 100644 index 217cd25a..00000000 --- a/resources/neutron_puppet/1.0.0/README.md +++ /dev/null @@ -1,194 +0,0 @@ -# Neutron puppet resource - - Installs the neutron package and configures - /etc/neutron/neutron.conf for SSL, AMQP, logging, service plugins and other stuff. - Does not produce any services. - -# Parameters: - -source https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/init.pp - - ``package_ensure`` - (optional) The state of the package - Defaults to 'present' - - ``verbose`` - (optional) Verbose logging - Defaults to False - - ``debug`` - (optional) Print debug messages in the logs - Defaults to False - - ``bind_host`` - (optional) The IP/interface to bind to - Defaults to 0.0.0.0 (all interfaces) - - ``bind_port`` - (optional) The port to use - Defaults to 9696 - - ``core_plugin`` - (optional) Neutron plugin provider - Defaults to openvswitch - Could be bigswitch, brocade, cisco, embrane, hyperv, linuxbridge, midonet, ml2, mlnx, nec, nicira, plumgrid, ryu - - ``service_plugins`` - (optional) Advanced service modules. - Could be an array that can have these elements: - router, firewall, lbaas, vpnaas, metering - Defaults to empty - - ``auth_strategy`` - (optional) How to authenticate - Defaults to 'keystone'. 'noauth' is the only other valid option - - ``base_mac`` - (optional) The MAC address pattern to use. - Defaults to fa:16:3e:00:00:00 - - ``mac_generation_retries`` - (optional) How many times to try to generate a unique mac - Defaults to 16 - - ``dhcp_lease_duration`` - (optional) DHCP lease - Defaults to 86400 seconds - - ``dhcp_agents_per_network`` - (optional) Number of DHCP agents scheduled to host a network. - This enables redundant DHCP agents for configured networks. - Defaults to 1 - - ``network_device_mtu`` - (optional) The MTU size for the interfaces managed by neutron - Defaults to undef - - ``dhcp_agent_notification`` - (optional) Allow sending resource operation notification to DHCP agent. - Defaults to true - - ``allow_bulk`` - (optional) Enable bulk crud operations - Defaults to true - - ``allow_pagination`` - (optional) Enable pagination - Defaults to false - - ``allow_sorting`` - (optional) Enable sorting - Defaults to false - - ``allow_overlapping_ips`` - (optional) Enables network namespaces - Defaults to false - - ``api_extensions_path`` - (optional) Specify additional paths for API extensions that the - module in use needs to load. - Defaults to undef - - ``report_interval`` - (optional) Seconds between nodes reporting state to server; should be less than - agent_down_time, best if it is half or less than agent_down_time. - agent_down_time is a config for neutron-server, set by class neutron::server - report_interval is a config for neutron agents, set by class neutron - Defaults to: 30 - - ``control_exchange`` - (optional) What RPC queue/exchange to use - Defaults to neutron - - ``rpc_backend`` - (optional) what rpc/queuing service to use - Defaults to impl_kombu (rabbitmq) - - ``rabbit_password`` - ``rabbit_host`` - ``rabbit_port`` - ``rabbit_user`` - (optional) Various rabbitmq settings - - ``rabbit_hosts`` - (optional) array of rabbitmq servers for HA. - A single IP address, such as a VIP, can be used for load-balancing - multiple RabbitMQ Brokers. - Defaults to false - - ``rabbit_use_ssl`` - (optional) Connect over SSL for RabbitMQ - Defaults to false - - ``kombu_ssl_ca_certs`` - (optional) SSL certification authority file (valid only if SSL enabled). - Defaults to undef - - ``kombu_ssl_certfile`` - (optional) SSL cert file (valid only if SSL enabled). - Defaults to undef - - ``kombu_ssl_keyfile`` - (optional) SSL key file (valid only if SSL enabled). - Defaults to undef - - ``kombu_ssl_version`` - (optional) SSL version to use (valid only if SSL enabled). - Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be - available on some distributions. - Defaults to 'TLSv1' - - ``kombu_reconnect_delay`` - (optional) The amount of time to wait before attempting to reconnect - to MQ provider. This is used in some cases where you may need to wait - for the provider to propery premote the master before attempting to - reconnect. See https://review.openstack.org/#/c/76686 - Defaults to '1.0' - - ``qpid_hostname`` - ``qpid_port`` - ``qpid_username`` - ``qpid_password`` - ``qpid_heartbeat`` - ``qpid_protocol`` - ``qpid_tcp_nodelay`` - ``qpid_reconnect`` - ``qpid_reconnect_timeout`` - ``qpid_reconnect_limit`` - ``qpid_reconnect_interval`` - ``qpid_reconnect_interval_min`` - ``qpid_reconnect_interval_max`` - (optional) various QPID options - - ``use_ssl`` - (optinal) Enable SSL on the API server - Defaults to false, not set - - ``cert_file`` - (optinal) certificate file to use when starting api server securely - defaults to false, not set - - ``key_file`` - (optional) Private key file to use when starting API server securely - Defaults to false, not set - - ``ca_file`` - (optional) CA certificate file to use to verify connecting clients - Defaults to false, not set - - ``use_syslog`` - (optional) Use syslog for logging - Defaults to false - - ``log_facility`` - (optional) Syslog facility to receive log lines - Defaults to LOG_USER - - ``log_file`` - (optional) Where to log - Defaults to false - - ``log_dir`` - (optional) Directory where logs should be stored - If set to boolean false, it will not log to any directory - Defaults to /var/log/neutron diff --git a/resources/neutron_puppet/1.0.0/actions/remove.pp b/resources/neutron_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 52cfc1a4..00000000 --- a/resources/neutron_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,5 +0,0 @@ -class { 'neutron': - enabled => false, - package_ensure => 'absent', - rabbit_password => 'not important as removed', -} \ No newline at end of file diff --git a/resources/neutron_puppet/1.0.0/actions/run.pp b/resources/neutron_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 84a009bc..00000000 --- a/resources/neutron_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,120 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] - -$package_ensure = $resource['input']['package_ensure'] -$verbose = $resource['input']['verbose'] -$debug = $resource['input']['debug'] -$bind_host = $resource['input']['bind_host'] -$bind_port = $resource['input']['bind_port'] -$core_plugin = $resource['input']['core_plugin'] -$service_plugins = $resource['input']['service_plugins'] -$auth_strategy = $resource['input']['auth_strategy'] -$base_mac = $resource['input']['base_mac'] -$mac_generation_retries = $resource['input']['mac_generation_retries'] -$dhcp_lease_duration = $resource['input']['dhcp_lease_duration'] -$dhcp_agents_per_network = $resource['input']['dhcp_agents_per_network'] -$network_device_mtu = $resource['input']['network_device_mtu'] -$dhcp_agent_notification = $resource['input']['dhcp_agent_notification'] -$allow_bulk = $resource['input']['allow_bulk'] -$allow_pagination = $resource['input']['allow_pagination'] -$allow_sorting = $resource['input']['allow_sorting'] -$allow_overlapping_ips = $resource['input']['allow_overlapping_ips'] -$api_extensions_path = $resource['input']['api_extensions_path'] -$root_helper = $resource['input']['root_helper'] -$report_interval = $resource['input']['report_interval'] -$control_exchange = $resource['input']['control_exchange'] -$rpc_backend = $resource['input']['rpc_backend'] -$rabbit_password = $resource['input']['rabbit_password'] -$rabbit_host = $resource['input']['rabbit_host'] -$rabbit_hosts = $resource['input']['rabbit_hosts'] -$rabbit_port = $resource['input']['rabbit_port'] -$rabbit_user = $resource['input']['rabbit_user'] -$rabbit_virtual_host = $resource['input']['rabbit_virtual_host'] -$rabbit_use_ssl = $resource['input']['rabbit_use_ssl'] -$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs'] -$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile'] -$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile'] -$kombu_ssl_version = $resource['input']['kombu_ssl_version'] -$kombu_reconnect_delay = $resource['input']['kombu_reconnect_delay'] -$qpid_hostname = $resource['input']['qpid_hostname'] -$qpid_port = $resource['input']['qpid_port'] -$qpid_username = $resource['input']['qpid_username'] -$qpid_password = $resource['input']['qpid_password'] -$qpid_heartbeat = $resource['input']['qpid_heartbeat'] -$qpid_protocol = $resource['input']['qpid_protocol'] -$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay'] -$qpid_reconnect = $resource['input']['qpid_reconnect'] -$qpid_reconnect_timeout = $resource['input']['qpid_reconnect_timeout'] -$qpid_reconnect_limit = $resource['input']['qpid_reconnect_limit'] -$qpid_reconnect_interval_min = $resource['input']['qpid_reconnect_interval_min'] -$qpid_reconnect_interval_max = $resource['input']['qpid_reconnect_interval_max'] -$qpid_reconnect_interval = $resource['input']['qpid_reconnect_interval'] -$use_ssl = $resource['input']['use_ssl'] -$cert_file = $resource['input']['cert_file'] -$key_file = $resource['input']['key_file'] -$ca_file = $resource['input']['ca_file'] -$use_syslog = $resource['input']['use_syslog'] -$log_facility = $resource['input']['log_facility'] -$log_file = $resource['input']['log_file'] -$log_dir = $resource['input']['log_dir'] - -class { 'neutron': - enabled => true, - package_ensure => $package_ensure, - verbose => $verbose, - debug => $debug, - bind_host => $bind_host, - bind_port => $bind_port, - core_plugin => $core_plugin, - service_plugins => $service_plugins, - auth_strategy => $auth_strategy, - base_mac => $base_mac, - mac_generation_retries => $mac_generation_retries, - dhcp_lease_duration => $dhcp_lease_duration, - dhcp_agents_per_network => $dhcp_agents_per_network, - network_device_mtu => $network_device_mtu, - dhcp_agent_notification => $dhcp_agent_notification, - allow_bulk => $allow_bulk, - allow_pagination => $allow_pagination, - allow_sorting => $allow_sorting, - allow_overlapping_ips => $allow_overlapping_ips, - api_extensions_path => $api_extensions_path, - root_helper => $root_helper, - report_interval => $report_interval, - control_exchange => $control_exchange, - rpc_backend => $rpc_backend, - rabbit_password => $rabbit_password, - rabbit_host => $rabbit_host, - rabbit_hosts => $rabbit_hosts, - rabbit_port => $rabbit_port, - rabbit_user => $rabbit_user, - rabbit_virtual_host => $rabbit_virtual_host, - rabbit_use_ssl => $rabbit_use_ssl, - kombu_ssl_ca_certs => $kombu_ssl_ca_certs, - kombu_ssl_certfile => $kombu_ssl_certfile, - kombu_ssl_keyfile => $kombu_ssl_keyfile, - kombu_ssl_version => $kombu_ssl_version, - kombu_reconnect_delay => $kombu_reconnect_delay, - qpid_hostname => $qpid_hostname, - qpid_port => $qpid_port, - qpid_username => $qpid_username, - qpid_password => $qpid_password, - qpid_heartbeat => $qpid_heartbeat, - qpid_protocol => $qpid_protocol, - qpid_tcp_nodelay => $qpid_tcp_nodelay, - qpid_reconnect => $qpid_reconnect, - qpid_reconnect_timeout => $qpid_reconnect_timeout, - qpid_reconnect_limit => $qpid_reconnect_limit, - qpid_reconnect_interval_min => $qpid_reconnect_interval_min, - qpid_reconnect_interval_max => $qpid_reconnect_interval_max, - qpid_reconnect_interval => $qpid_reconnect_interval, - use_ssl => $use_ssl, - cert_file => $cert_file, - key_file => $key_file, - ca_file => $ca_file, - use_syslog => $use_syslog, - log_facility => $log_facility, - log_file => $log_file, - log_dir => $log_dir, -} diff --git a/resources/neutron_puppet/1.0.0/meta.yaml b/resources/neutron_puppet/1.0.0/meta.yaml deleted file mode 100644 index 969482fa..00000000 --- a/resources/neutron_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,187 +0,0 @@ -handler: puppet -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - - package_ensure: - schema: str - value: 'present' - verbose: - schema: bool - value: false - debug: - schema: bool - value: false - bind_host: - schema: str - value: '0.0.0.0' - bind_port: - schema: int - value: 9696 - core_plugin: - schema: str - value: 'openvswitch' - service_plugins: - schema: str - value: - auth_strategy: - schema: str - value: 'keystone' - base_mac: - schema: str - value: 'fa:16:3e:00:00:00' - mac_generation_retries: - schema: int - value: 16 - dhcp_lease_duration: - schema: int - value: 86400 - dhcp_agents_per_network: - schema: int - value: 1 - network_device_mtu: - schema: str - value: - dhcp_agent_notification: - schema: bool - value: true - allow_bulk: - schema: bool - value: true - allow_pagination: - schema: bool - value: false - allow_sorting: - schema: bool - value: false - allow_overlapping_ips: - schema: bool - value: false - api_extensions_path: - schema: str - value: - root_helper: - schema: str - value: 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' - report_interval: - schema: int - value: 30 - control_exchange: - schema: str - value: 'neutron' - rpc_backend: - schema: str - value: 'neutron.openstack.common.rpc.impl_kombu' - rabbit_password: - schema: str! - value: - rabbit_host: - schema: str - value: 'localhost' - rabbit_hosts: - schema: bool - value: false - rabbit_port: - schema: int - value: 5672 - rabbit_user: - schema: str - value: 'guest' - rabbit_virtual_host: - schema: str - value: '/' - rabbit_use_ssl: - schema: bool - value: false - kombu_ssl_ca_certs: - schema: str - value: - kombu_ssl_certfile: - schema: str - value: - kombu_ssl_keyfile: - schema: str - value: - kombu_ssl_version: - schema: str - value: 'TLSv1' - kombu_reconnect_delay: - schema: str - value: '1.0' - qpid_hostname: - schema: str - value: 'localhost' - qpid_port: - schema: int - value: 5672 - qpid_username: - schema: str - value: 'guest' - qpid_password: - schema: str! - value: 'guest' - qpid_heartbeat: - schema: int - value: 60 - qpid_protocol: - schema: str - value: 'tcp' - qpid_tcp_nodelay: - schema: bool - value: true - qpid_reconnect: - schema: bool - value: true - qpid_reconnect_timeout: - schema: int - value: 0 - qpid_reconnect_limit: - schema: int - value: 0 - qpid_reconnect_interval_min: - schema: int - value: 0 - qpid_reconnect_interval_max: - schema: int - value: 0 - qpid_reconnect_interval: - schema: int - value: 0 - use_ssl: - schema: bool - value: false - cert_file: - schema: str - value: - key_file: - schema: str - value: - ca_file: - schema: str - value: - use_syslog: - schema: bool - value: false - log_facility: - schema: str - value: 'LOG_USER' - log_file: - schema: str - value: - log_dir: - schema: str - value: '/var/log/neutron' - - module: - schema: {name: str!, type: str, url: str, ref: str} - value: {name: neutron, type: 'git', url: 'https://github.com/openstack/puppet-neutron', ref: '5.1.0'} - -tags: [resource/neutron] -version: 1.0.0 diff --git a/resources/neutron_server_puppet/1.0.0/README.md b/resources/neutron_server_puppet/1.0.0/README.md deleted file mode 100644 index f0cc5537..00000000 --- a/resources/neutron_server_puppet/1.0.0/README.md +++ /dev/null @@ -1,167 +0,0 @@ -# Neutron puppet resource - -Setup and configure the neutron API service and endpoint - -# Parameters: - -source https://github.com/openstack/puppet-neutron/blob/5.1.0/manifests/server.pp - - ``package_ensure`` - (optional) The state of the package - Defaults to present - - ``log_file`` - REMOVED: Use log_file of neutron class instead. - - ``log_dir`` - REMOVED: Use log_dir of neutron class instead. - - ``auth_password`` - (optional) The password to use for authentication (keystone) - Defaults to false. Set a value unless you are using noauth - - ``auth_type`` - (optional) What auth system to use - Defaults to 'keystone'. Can other be 'noauth' - - ``auth_host`` - (optional) The keystone host - Defaults to localhost - - ``auth_protocol`` - (optional) The protocol used to access the auth host - Defaults to http. - - ``auth_port`` - (optional) The keystone auth port - Defaults to 35357 - - ``auth_admin_prefix`` - (optional) The admin_prefix used to admin endpoint of the auth host - This allow admin auth URIs like http://auth_host:35357/keystone. - (where '/keystone' is the admin prefix) - Defaults to false for empty. If defined, should be a string with a leading '/' and no trailing '/'. - - ``auth_tenant`` - (optional) The tenant of the auth user - Defaults to services - - ``auth_user`` - (optional) The name of the auth user - Defaults to neutron - - ``auth_protocol`` - (optional) The protocol to connect to keystone - Defaults to http - - ``auth_uri`` - (optional) Complete public Identity API endpoint. - Defaults to: $auth_protocol://$auth_host:5000/ - - ``database_connection`` - (optional) Connection url for the neutron database. - (Defaults to 'sqlite:////var/lib/neutron/ovs.sqlite') - Note: for this resource it is decomposed to the - 'db_host', 'db_port', 'db_user', 'db_password' inputs - due to implementation limitations - - ``database_max_retries`` - (optional) Maximum database connection retries during startup. - (Defaults to 10) - - ``sql_max_retries`` - DEPRECATED: Use database_max_retries instead. - - ``max_retries`` - DEPRECATED: Use database_max_retries instead. - - ``database_idle_timeout`` - (optional) Timeout before idle database connections are reaped. - Deprecates sql_idle_timeout - (Defaults to 3600) - - ``sql_idle_timeout`` - DEPRECATED: Use database_idle_timeout instead. - - ``idle_timeout`` - DEPRECATED: Use database_idle_timeout instead. - - ``database_retry_interval`` - (optional) Interval between retries of opening a database connection. - (Defaults to 10) - - ``sql_reconnect_interval`` - DEPRECATED: Use database_retry_interval instead. - - ``retry_interval`` - DEPRECATED: Use database_retry_interval instead. - - ``database_min_pool_size`` - (optional) Minimum number of SQL connections to keep open in a pool. - Defaults to: 1 - - ``database_max_pool_size`` - (optional) Maximum number of SQL connections to keep open in a pool. - Defaults to: 10 - - ``database_max_overflow`` - (optional) If set, use this value for max_overflow with sqlalchemy. - Defaults to: 20 - - ``sync_db`` - (optional) Run neutron-db-manage on api nodes after installing the package. - Defaults to false - - ``api_workers`` - (optional) Number of separate worker processes to spawn. - The default, count of machine's processors, runs the worker thread in the - current process. - Greater than 0 launches that number of child processes as workers. - The parent process manages them. - Defaults to: $::processorcount - - ``rpc_workers`` - (optional) Number of separate RPC worker processes to spawn. - The default, count of machine's processors, runs the worker thread in the - current process. - Greater than 0 launches that number of child processes as workers. - The parent process manages them. - Defaults to: $::processorcount - - ``agent_down_time`` - (optional) Seconds to regard the agent as down; should be at least twice - report_interval, to be sure the agent is down for good. - agent_down_time is a config for neutron-server, set by class neutron::server - report_interval is a config for neutron agents, set by class neutron - Defaults to: 75 - - ``router_scheduler_driver`` - (optional) Driver to use for scheduling router to a default L3 agent. Could be: - neutron.scheduler.l3_agent_scheduler.ChanceScheduler to schedule a router in a random way - neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler to allocate on an L3 agent with the least number of routers bound. - Defaults to: neutron.scheduler.l3_agent_scheduler.ChanceScheduler - - ``mysql_module`` - (optional) Deprecated. Does nothing. - - ``router_distributed`` - (optional) Setting the "router_distributed" flag to "True" will default to the creation - of distributed tenant routers. - Also can be the type of the router on the create request (admin-only attribute). - Defaults to false - - ``l3_ha`` - (optional) Enable high availability for virtual routers. - Defaults to false - - ``max_l3_agents_per_router`` - (optional) Maximum number of l3 agents which a HA router will be scheduled on. If set to '0', a router will be scheduled on every agent. - Defaults to '3' - - ``min_l3_agents_per_router`` - (optional) Minimum number of l3 agents which a HA router will be scheduled on. - Defaults to '2' - - ``l3_ha_net_cidr`` - (optional) CIDR of the administrative network if HA mode is enabled. - Defaults to '169.254.192.0/18' \ No newline at end of file diff --git a/resources/neutron_server_puppet/1.0.0/actions/remove.pp b/resources/neutron_server_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index ecaed03a..00000000 --- a/resources/neutron_server_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,10 +0,0 @@ -class { 'neutron::server': - enabled => false, - package_ensure => 'absent', - auth_password => 'not important as removed', -} - -# Remove external class dependency -Service <| title == 'neutron-server' |> { - require => undef -} \ No newline at end of file diff --git a/resources/neutron_server_puppet/1.0.0/actions/run.pp b/resources/neutron_server_puppet/1.0.0/actions/run.pp deleted file mode 100644 index f47974da..00000000 --- a/resources/neutron_server_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,93 +0,0 @@ -$resource = hiera($::resource_name) - -$ip = $resource['input']['ip'] - -$db_user = $resource['input']['db_user'] -$db_host = $resource['input']['db_host'] -$db_port = $resource['input']['db_port'] -$db_password = $resource['input']['db_password'] -$db_name = $resource['input']['db_name'] - -$package_ensure = $resource['input']['package_ensure'] -$auth_password = $resource['input']['auth_password'] -$auth_type = $resource['input']['auth_type'] -$auth_host = $resource['input']['auth_host'] -$auth_port = $resource['input']['auth_port'] -$auth_admin_prefix = $resource['input']['auth_admin_prefix'] -$auth_tenant = $resource['input']['auth_tenant'] -$auth_user = $resource['input']['auth_user'] -$auth_protocol = $resource['input']['auth_protocol'] -$auth_uri = $resource['input']['auth_uri'] -$database_max_retries = $resource['input']['database_max_retries'] -$database_idle_timeout = $resource['input']['database_idle_timeout'] -$database_retry_interval = $resource['input']['database_retry_interval'] -$database_min_pool_size = $resource['input']['database_min_pool_size'] -$database_max_pool_size = $resource['input']['database_max_pool_size'] -$database_max_overflow = $resource['input']['database_max_overflow'] -$sync_db = $resource['input']['sync_db'] -$api_workers = $resource['input']['api_workers'] -$rpc_workers = $resource['input']['rpc_workers'] -$agent_down_time = $resource['input']['agent_down_time'] -$router_scheduler_driver = $resource['input']['router_scheduler_driver'] -$router_distributed = $resource['input']['router_distributed'] -$l3_ha = $resource['input']['l3_ha'] -$max_l3_agents_per_router = $resource['input']['max_l3_agents_per_router'] -$min_l3_agents_per_router = $resource['input']['min_l3_agents_per_router'] -$l3_ha_net_cidr = $resource['input']['l3_ha_net_cidr'] -$mysql_module = $resource['input']['mysql_module'] -$sql_max_retries = $resource['input']['sql_max_retries'] -$max_retries = $resource['input']['max_retries'] -$sql_idle_timeout = $resource['input']['sql_idle_timeout'] -$idle_timeout = $resource['input']['idle_timeout'] -$sql_reconnect_interval = $resource['input']['sql_reconnect_interval'] -$retry_interval = $resource['input']['retry_interval'] -$log_dir = $resource['input']['log_dir'] -$log_file = $resource['input']['log_file'] -$report_interval = $resource['input']['report_interval'] - -class { 'neutron::server': - enabled => true, - manage_service => true, - database_connection => "mysql://${db_user}:${db_password}@${db_host}:${db_port}/${db_name}", - package_ensure => $package_ensure, - auth_password => $auth_password, - auth_type => $auth_type, - auth_host => $auth_host, - auth_port => $auth_port, - auth_admin_prefix => $auth_admin_prefix, - auth_tenant => $auth_tenant, - auth_user => $auth_user, - auth_protocol => $auth_protocol, - auth_uri => $auth_uri, - database_max_retries => $database_max_retries, - database_idle_timeout => $database_idle_timeout, - database_retry_interval => $database_retry_interval, - database_min_pool_size => $database_min_pool_size, - database_max_pool_size => $database_max_pool_size, - database_max_overflow => $database_max_overflow, - sync_db => $sync_db, - api_workers => $api_workers, - rpc_workers => $rpc_workers, - agent_down_time => $agent_down_time, - router_scheduler_driver => $router_scheduler_driver, - router_distributed => $router_distributed, - l3_ha => $l3_ha, - max_l3_agents_per_router => $max_l3_agents_per_router, - min_l3_agents_per_router => $min_l3_agents_per_router, - l3_ha_net_cidr => $l3_ha_net_cidr, - mysql_module => $mysql_module, - sql_max_retries => $sql_max_retries, - max_retries => $max_retries, - sql_idle_timeout => $sql_idle_timeout, - idle_timeout => $idle_timeout, - sql_reconnect_interval => $sql_reconnect_interval, - retry_interval => $retry_interval, - log_dir => $log_dir, - log_file => $log_file, - report_interval => $report_interval, -} - -# Remove external class dependency -Service <| title == 'neutron-server' |> { - require => undef -} diff --git a/resources/neutron_server_puppet/1.0.0/meta.yaml b/resources/neutron_server_puppet/1.0.0/meta.yaml deleted file mode 100644 index 976110b1..00000000 --- a/resources/neutron_server_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,146 +0,0 @@ -handler: puppet -actions: - run: run.pp - update: run.pp -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - - db_user: - schema: str! - value: - db_password: - schema: str! - value: - db_name: - schema: str! - value: - db_host: - schema: str! - value: - db_port: - schema: int! - value: - - package_ensure: - schema: str - value: 'present' - auth_password: - schema: str! - value: - auth_type: - schema: str - value: 'keystone' - auth_host: - schema: str - value: 'localhost' - auth_port: - schema: int - value: 35357 - auth_admin_prefix: - schema: str - value: - auth_tenant: - schema: str - value: 'services' - auth_user: - schema: str - value: 'neutron' - auth_protocol: - schema: str - value: 'http' - auth_uri: - schema: str - value: - database_max_retries: - schema: int - value: 10 - database_idle_timeout: - schema: int - value: 3600 - database_retry_interval: - schema: int - value: 10 - database_min_pool_size: - schema: int - value: 1 - database_max_pool_size: - schema: int - value: 10 - database_max_overflow: - schema: int - value: 20 - sync_db: - schema: bool - value: false - api_workers: - schema: int - value: 1 - rpc_workers: - schema: int - value: 1 - agent_down_time: - schema: int - value: 75 - router_scheduler_driver: - schema: str - value: 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler' - router_distributed: - schema: bool - value: false - l3_ha: - schema: bool - value: false - max_l3_agents_per_router: - schema: int - value: 3 - min_l3_agents_per_router: - schema: int - value: 2 - l3_ha_net_cidr: - schema: str - value: '169.254.192.0/18' - mysql_module: - schema: str - value: - sql_max_retries: - schema: str - value: - max_retries: - schema: str - value: - sql_idle_timeout: - schema: str - value: - idle_timeout: - schema: str - value: - sql_reconnect_interval: - schema: str - value: - retry_interval: - schema: str - value: - log_dir: - schema: str - value: - log_file: - schema: str - value: - report_interval: - schema: str - value: - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'} - -tags: [resource/neutron, resource/neutron_service, resource/neutron_server, resource/neutron_api] -version: 1.0.0 diff --git a/resources/node_network_puppet/1.0.0/README.md b/resources/node_network_puppet/1.0.0/README.md deleted file mode 100644 index 0dd00cb1..00000000 --- a/resources/node_network_puppet/1.0.0/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Node network resource for puppet handler - -Setup and configure L23 networking for a node. -Leverages the powerful network_scheme structures to -create all required networking entities like interfaces, -bridges, bonds - both linux and ovs based. -Defaults are given for Debian OS family. - -source https://github.com/xenolog/l23network diff --git a/resources/node_network_puppet/1.0.0/actions/remove.pp b/resources/node_network_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 0a01f06d..00000000 --- a/resources/node_network_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,3 +0,0 @@ -class {'l23network': - ensure_package => 'absent', -} diff --git a/resources/node_network_puppet/1.0.0/actions/run.pp b/resources/node_network_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 800605c2..00000000 --- a/resources/node_network_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,41 +0,0 @@ -$resource = hiera($::resource_name) - -$ensure_package = $resource['input']['ensure_package'] -$use_lnx = $resource['input']['use_lnx'] -$use_ovs = $resource['input']['use_ovs'] -$install_ovs = $resource['input']['install_ovs'] -$install_brtool = $resource['input']['install_brtool'] -$install_ethtool = $resource['input']['install_ethtool'] -$install_bondtool = $resource['input']['install_bondtool'] -$install_vlantool = $resource['input']['install_vlantool'] -$ovs_modname = $resource['input']['ovs_modname'] -$ovs_datapath_package_name = $resource['input']['ovs_datapath_package_name'] -$ovs_common_package_name = $resource['input']['ovs_common_package_name'] -$network_scheme = $resource['input']['network_scheme'] - -class {'l23network': - ensure_package => $ensure_package, - use_lnx => $use_lnx, - use_ovs => $use_ovs, - install_ovs => $install_ovs, - install_brtool => $install_brtool, - install_ethtool => $install_ethtool, - install_bondtool => $install_bondtool, - install_vlantool => $install_vlantool, - ovs_modname => $ovs_modname, - ovs_datapath_package_name => $ovs_datapath_package_name, - ovs_common_package_name => $ovs_common_package_name, -} - -prepare_network_config($network_scheme) -$sdn = generate_network_config() -notify { $sdn: require => Class['l23network'], } - -# We need to wait at least 30 seconds for the bridges and other interfaces to -# come up after being created. This should allow for all interfaces to be up -# and ready for traffic before proceeding with further deploy steps. LP#1458954 -exec { 'wait-for-interfaces': - path => '/usr/bin:/bin', - command => 'sleep 32', - require => Notify[$sdn] -} \ No newline at end of file diff --git a/resources/node_network_puppet/1.0.0/meta.yaml b/resources/node_network_puppet/1.0.0/meta.yaml deleted file mode 100644 index ff97ab28..00000000 --- a/resources/node_network_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,58 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - package_ensure: - schema: str - value: 'present' - ensure_package: - schema: str - value: 'present' - use_lnx: - schema: bool - value: true - use_ovs: - schema: bool - value: false - install_ovs: - schema: bool - value: true - install_brtool: - schema: bool - value: true - install_ethtool: - schema: bool - value: true - install_bondtool: - schema: bool - value: true - install_vlantool: - schema: bool - value: true - ovs_modname: - schema: str - value: 'openvswitch' - ovs_datapath_package_name: - schema: str - value: 'openvswitch-datapath-dkms' - ovs_common_package_name: - schema: str - value: 'openvswitch-switch' - network_scheme: - schema: { - version: str!, - provider: str!, - interfaces: {}, - transformations: [{}], - endpoints: {}, - roles: {}, - } - - module: - schema: {repository: str!, branch: str!} - value: {name: 'l23network', type: 'git', url: 'https://github.com/xenolog/l23network', ref: '50098cfa1f0f8e8d58e6a6b77a22f1380aa5c426'} - - ip: - schema: str! - value: - -tags: [resources/node_network] diff --git a/resources/node_network_puppet/1.0.0/test.py b/resources/node_network_puppet/1.0.0/test.py deleted file mode 100644 index 56f5d05a..00000000 --- a/resources/node_network_puppet/1.0.0/test.py +++ /dev/null @@ -1,11 +0,0 @@ -import requests - -from solar.core.log import log - - -def test(resource): - log.debug('Testing node_network_puppet') -# requests.get( -# 'http://%s:%s' % (resource.args['ip'].value, resource.args['port'].value) -# TODO(bogdando) figure out how to test this -# ) diff --git a/resources/not_provisioned_node/1.0.0/actions/provision.sh b/resources/not_provisioned_node/1.0.0/actions/provision.sh deleted file mode 100644 index d41994bb..00000000 --- a/resources/not_provisioned_node/1.0.0/actions/provision.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -eux -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -# TODO should be a way to render configs, in order to do this -# we should have scripts dir variable passed from above -sed -i "s||${DIR}|" "${DIR}"/templates/agent.config - -provision --log-file /tmp/fa_provision.log -d --data_driver nailgun_simple --input_data_file "${DIR}"/templates/provisioning.json --config-file "${DIR}"/templates/agent.config diff --git a/resources/not_provisioned_node/1.0.0/actions/reboot.sh b/resources/not_provisioned_node/1.0.0/actions/reboot.sh deleted file mode 100644 index fc028c2e..00000000 --- a/resources/not_provisioned_node/1.0.0/actions/reboot.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -set -eux - -reboot now - diff --git a/resources/not_provisioned_node/1.0.0/actions/run.sh b/resources/not_provisioned_node/1.0.0/actions/run.sh deleted file mode 100644 index 5a937ebb..00000000 --- a/resources/not_provisioned_node/1.0.0/actions/run.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -eux - -# Fake run action which is required in order to make -# dependency `run` -> `provision` - -exit 0 diff --git a/resources/not_provisioned_node/1.0.0/meta.yaml b/resources/not_provisioned_node/1.0.0/meta.yaml deleted file mode 100644 index f383d41e..00000000 --- a/resources/not_provisioned_node/1.0.0/meta.yaml +++ /dev/null @@ -1,33 +0,0 @@ -handler: shell -version: 1.0.0 - -actions: - provision: provision.sh - run: run.sh - reboot: reboot.sh - -input: - ip: - schema: str! - value: - master_key: - schema: str! - value: - admin_mac: - schema: str! - value: - repos: - schema: list! - value: [] - name: - schema: str - value: a node - location_id: - schema: str! - value: $uuid - reverse: True - partitioning: - schema: dict! - value: - -tags: [resources=node] diff --git a/resources/not_provisioned_node/1.0.0/templates/agent.config b/resources/not_provisioned_node/1.0.0/templates/agent.config deleted file mode 100644 index 7f807556..00000000 --- a/resources/not_provisioned_node/1.0.0/templates/agent.config +++ /dev/null @@ -1,4 +0,0 @@ -[DEFAULT] -debug=true -nc_template_path=/templates/cloud-init-templates/ -log_file=/var/log/fuel-agent.log diff --git a/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/boothook_centos.jinja2 b/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/boothook_centos.jinja2 deleted file mode 100644 index 1be4a587..00000000 --- a/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/boothook_centos.jinja2 +++ /dev/null @@ -1,55 +0,0 @@ -#cloud-boothook -#!/bin/bash - -cloud-init-per instance disable_selinux_on_the_fly setenforce 0 - -cloud-init-per instance disable_selinux sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/sysconfig/selinux - -# configure udev rules - -# udev persistent net -cloud-init-per instance udev_persistent_net1 service network stop - -ADMIN_MAC={{ common.admin_mac }} -ADMIN_IF=$(echo {{ common.udevrules }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1) -cloud-init-per instance configure_admin_interface /bin/sh -c "echo -e \"# FROM COBBLER SNIPPET\nDEVICE=$ADMIN_IF\nIPADDR={{ common.admin_ip }}\nNETMASK={{ common.admin_mask }}\nBOOTPROTO=none\nONBOOT=yes\nUSERCTL=no\n\" | tee /etc/sysconfig/network-scripts/ifcfg-$ADMIN_IF" - -cloud-init-per instance set_gateway /bin/sh -c 'echo GATEWAY="{{ common.gw }}" | tee -a /etc/sysconfig/network' - -cloud-init-per instance udev_persistent_net5 service network start - -# end of udev - -#FIXME(agordeev): if operator updates dns settings on masternode after the node had been provisioned, -# cloud-init will start to generate resolv.conf with non-actual data -cloud-init-per instance resolv_conf_remove rm -f /etc/resolv.conf -cloud-init-per instance resolv_conf_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolv.conf' -cloud-init-per instance resolv_conf_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf' -cloud-init-per instance resolv_conf_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf' -cloud-init-per instance resolv_conf_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip }} | tee -a /etc/resolv.conf' - -# configure black module lists -# virt-what should be installed -if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then - ([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :) - modprobe -r i2c_piix4 -fi - -cloud-init-per instance conntrack_ipv4 /bin/sh -c 'echo nf_conntrack_ipv4 | tee -a /etc/rc.modules' -cloud-init-per instance conntrack_ipv6 /bin/sh -c 'echo nf_conntrack_ipv6 | tee -a /etc/rc.modules' -cloud-init-per instance conntrack_proto_gre /bin/sh -c 'echo nf_conntrack_proto_gre | tee -a /etc/rc.modules' -cloud-init-per instance chmod_rc_modules chmod +x /etc/rc.modules -cloud-init-per instance conntrack_max /bin/sh -c 'echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf' -cloud-init-per instance kernel_panic /bin/sh -c 'echo "kernel.panic=60" | tee -a /etc/sysctl.conf' - -cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4 -cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6 -cloud-init-per instance conntrack_proto_gre_load modprobe nf_conntrack_proto_gre -cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576" -cloud-init-per instance kernel_panic_set sysctl -w "kernel.panic=60" - -cloud-init-per instance mkdir_coredump mkdir -p /var/log/coredump -cloud-init-per instance set_coredump /bin/sh -c 'echo -e "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t" | tee -a /etc/sysctl.conf' -cloud-init-per instance set_coredump_sysctl sysctl -w "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t" -cloud-init-per instance set_chmod chmod 777 /var/log/coredump -cloud-init-per instance set_limits /bin/sh -c 'echo -e "* soft core unlimited\n* hard core unlimited" | tee -a /etc/security/limits.conf' diff --git a/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/boothook_ubuntu.jinja2 b/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/boothook_ubuntu.jinja2 deleted file mode 100644 index 753ef758..00000000 --- a/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/boothook_ubuntu.jinja2 +++ /dev/null @@ -1,55 +0,0 @@ -#cloud-boothook -#!/bin/bash - -# udev persistent net -cloud-init-per instance udev_persistent_net1 /etc/init.d/networking stop - -ADMIN_MAC={{ common.admin_mac }} -ADMIN_IF=$(echo {{ common.udevrules }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1) -# Check if we do not already have static config (or interface seems unconfigured) -if [ ! -d "/etc/network/interfaces.d" ]; then - mkdir -p /etc/network/interfaces.d - echo 'source /etc/network/interfaces.d/*' > /etc/network/interfaces -fi -if [ ! -e "/etc/network/interfaces.d/ifcfg-$ADMIN_IF" ]; then - echo -e "auto $ADMIN_IF\niface $ADMIN_IF inet static\n\taddress {{ common.admin_ip }}\n\tnetmask {{ common.admin_mask }}\n\tgateway {{ common.gw }}" > /etc/network/interfaces.d/ifcfg-"$ADMIN_IF" -fi - -cloud-init-per instance udev_persistent_net5 /etc/init.d/networking start - -# end of udev - -#FIXME(agordeev): if operator updates dns settings on masternode after the node had been provisioned, -# cloud-init will start to generate resolv.conf with non-actual data -cloud-init-per instance resolv_conf_mkdir mkdir -p /etc/resolvconf/resolv.conf.d -cloud-init-per instance resolv_conf_remove rm -f /etc/resolv.conf -cloud-init-per instance resolv_conf_head_remove rm -f /etc/resolvconf/resolv.conf.d/head -cloud-init-per instance resolv_conf_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolv.conf' -cloud-init-per instance resolv_conf_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf' -cloud-init-per instance resolv_conf_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf' -cloud-init-per instance resolv_conf_head_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolvconf/resolv.conf.d/head' -cloud-init-per instance resolv_conf_head_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolvconf/resolv.conf.d/head' -cloud-init-per instance resolv_conf_head_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolvconf/resolv.conf.d/head' -cloud-init-per instance resolv_conf_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip|replace('"','') }} | tee -a /etc/resolv.conf' -cloud-init-per instance resolv_conf_head_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip|replace('"','') }} | tee -a /etc/resolvconf/resolv.conf.d/head' - -# configure black module lists -# virt-what should be installed -if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then - ([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :) && update-initramfs -u -k all - modprobe -r i2c_piix4 -fi - -cloud-init-per instance conntrack_ipv4 /bin/sh -c 'echo nf_conntrack_ipv4 | tee -a /etc/modules' -cloud-init-per instance conntrack_ipv6 /bin/sh -c 'echo nf_conntrack_ipv6 | tee -a /etc/modules' -cloud-init-per instance conntrack_proto_gre /bin/sh -c 'echo nf_conntrack_proto_gre | tee -a /etc/modules' -cloud-init-per instance conntrack_max /bin/sh -c 'echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf' -cloud-init-per instance kernel_panic /bin/sh -c 'echo "kernel.panic=60" | tee -a /etc/sysctl.conf' - -cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4 -cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6 -cloud-init-per instance conntrack_proto_gre_load modprobe nf_conntrack_proto_gre -cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576" -cloud-init-per instance kernel_panic_set sysctl -w "kernel.panic=60" - -cloud-init-per instance dhclient /bin/sh -c 'echo "supersede routers 0;" | tee /etc/dhcp/dhclient.conf' diff --git a/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/cloud_config_centos.jinja2 b/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/cloud_config_centos.jinja2 deleted file mode 100644 index a29701b5..00000000 --- a/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/cloud_config_centos.jinja2 +++ /dev/null @@ -1,26 +0,0 @@ -#cloud-config -resize_rootfs: false -growpart: - mode: false -disable_ec2_metadata: true -disable_root: false - -# password: RANDOM -# chpasswd: { expire: True } - -ssh_pwauth: false -ssh_authorized_keys: -{% for key in common.ssh_auth_keys %} - - {{ key }} -{% endfor %} - -# set the locale to a given locale -# default: en_US.UTF-8 -locale: en_US.UTF-8 - -timezone: {{ common.timezone }} - -hostname: {{ common.hostname }} -fqdn: {{ common.fqdn }} - -final_message: "YAY! The system is finally up, after $UPTIME seconds" diff --git a/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/cloud_config_ubuntu.jinja2 b/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/cloud_config_ubuntu.jinja2 deleted file mode 100644 index 94e119fe..00000000 --- a/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/cloud_config_ubuntu.jinja2 +++ /dev/null @@ -1,25 +0,0 @@ -#cloud-config -resize_rootfs: false -growpart: - mode: false -disable_ec2_metadata: true -disable_root: false -user: root -password: r00tme -chpasswd: { expire: false } -ssh_pwauth: false -ssh_authorized_keys: -{% for key in common.ssh_auth_keys %} - - {{ key }} -{% endfor %} - -# set the locale to a given locale -# default: en_US.UTF-8 -locale: en_US.UTF-8 - -timezone: {{ common.timezone }} - -hostname: {{ common.hostname }} -fqdn: {{ common.fqdn }} - -final_message: "YAY! The system is finally up, after $UPTIME seconds" diff --git a/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/meta-data_centos.jinja2 b/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/meta-data_centos.jinja2 deleted file mode 100644 index f63a89bd..00000000 --- a/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/meta-data_centos.jinja2 +++ /dev/null @@ -1,11 +0,0 @@ -# instance-id will be autogenerated -# instance-id: iid-abcdefg -#network-interfaces: | -# auto {{ common.admin_iface_name|default("eth0") }} -# iface {{ common.admin_iface_name|default("eth0") }} inet static -# address {{ common.admin_ip }} -# # network 192.168.1.0 -# netmask {{ common.admin_mask }} -# # broadcast 192.168.1.255 -# # gateway 192.168.1.254 -hostname: {{ common.hostname }} diff --git a/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/meta-data_ubuntu.jinja2 b/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/meta-data_ubuntu.jinja2 deleted file mode 100644 index f63a89bd..00000000 --- a/resources/not_provisioned_node/1.0.0/templates/cloud-init-templates/meta-data_ubuntu.jinja2 +++ /dev/null @@ -1,11 +0,0 @@ -# instance-id will be autogenerated -# instance-id: iid-abcdefg -#network-interfaces: | -# auto {{ common.admin_iface_name|default("eth0") }} -# iface {{ common.admin_iface_name|default("eth0") }} inet static -# address {{ common.admin_ip }} -# # network 192.168.1.0 -# netmask {{ common.admin_mask }} -# # broadcast 192.168.1.255 -# # gateway 192.168.1.254 -hostname: {{ common.hostname }} diff --git a/resources/not_provisioned_node/1.0.0/templates/provisioning.json.jinja b/resources/not_provisioned_node/1.0.0/templates/provisioning.json.jinja deleted file mode 100644 index 74ef406c..00000000 --- a/resources/not_provisioned_node/1.0.0/templates/provisioning.json.jinja +++ /dev/null @@ -1,156 +0,0 @@ -{ - "partitioning": {{ partitioning | to_pretty_json }}, - "profile": "ubuntu_1404_x86_64", - "name_servers_search": "\"example.com\"", - "uid": "2", - "interfaces": { - "eth1": { - "static": "0", - "mac_address": "08:00:27:6e:6d:b4" - }, - "eth0": { - "ip_address": "{{ ip }}", - "dns_name": "{{ name }}.test.domain.local", - "netmask": "255.255.255.0", - "static": "0", - "mac_address": "08:00:27:ea:35:e7" - } - }, - "interfaces_extra": { - "eth1": { - "onboot": "no", - "peerdns": "no" - }, - "eth0": { - "onboot": "no", - "peerdns": "no" - } - }, - "power_type": "ssh", - "power_user": "root", - "kernel_options": { - "udevrules": "08:00:27:6e:6d:b4_eth1,08:00:27:ea:35:e7_eth0", - "netcfg/choose_interface": "08:00:27:ea:35:e7" - }, - "power_address": "10.20.0.1", - "name_servers": "\"127.0.0.1\"", - "ks_meta": { - "gw": "10.20.0.1", - "mco_enable": 1, - "mco_vhost": "mcollective", - "repo_setup": { - "installer_kernel": { - "local": "/var/www/nailgun/ubuntu/x86_64/images/linux", - "remote_relative": "dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux" - }, - "repos": {{ repos | to_pretty_json }}, - "metadata": { - "always_editable": true, - "weight": 50, - "label": "Repositories" - }, - "installer_initrd": { - "local": "/var/www/nailgun/ubuntu/x86_64/images/initrd.gz", - "remote_relative": "dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz" - } - }, - "authorized_keys": ["{{ master_key }}"], - "mlnx_iser_enabled": false, - "mco_pskey": "Gie6iega9ohngaenahthohngu8aebohxah9seidi", - "mco_user": "guest", - "puppet_enable": 0, - "fuel_version": "6.1", - "install_log_2_syslog": 1, - "image_data": { - "/boot": { - "container": "gzip", - "uri": "http://10.0.0.2:8001/tmp/targetimages/env_1_ubuntu_1404_amd64-boot.img.gz", - "format": "ext2" - }, - "/": { - "container": "gzip", - "uri": "http://10.0.0.2:8001/tmp/targetimages/env_1_ubuntu_1404_amd64.img.gz", - "format": "ext4" - } - }, - "timezone": "Etc/UTC", - "puppet_auto_setup": 1, - "puppet_master": "localhost", - "mco_auto_setup": 1, - "mco_password": "guest", - "auth_key": "\"\"", - "pm_data": { - "kernel_params": "console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 nomodeset", - "ks_spaces": [ - { - "name": "sda", - "extra": [], - "free_space": 304617, - "volumes": [ - { - "type": "boot", - "size": 300 - }, - { - "mount": "/boot", - "type": "raid", - "file_system": "ext2", - "name": "Boot", - "size": 200 - }, - { - "type": "lvm_meta_pool", - "size": 0 - }, - { - "vg": "os", - "type": "pv", - "lvm_meta_size": 64, - "size": 20000, - "orig_size": 59456 - } - ], - "type": "disk", - "id": "sda", - "size": 42800, - "orig_size": 305245 - }, - { - "_allocate_size": "min", - "label": "Base System", - "min_size": 19936, - "orig_min_size": 59392, - "volumes": [ - { - "mount": "/", - "size": 11744, - "type": "lv", - "name": "root", - "file_system": "ext4" - }, - { - "mount": "swap", - "size": 8192, - "type": "lv", - "name": "swap", - "file_system": "swap" - } - ], - "type": "vg", - "id": "os" - } - ] - }, - "mlnx_plugin_mode": "disabled", - "master_ip": "127.0.0.1", - "mco_connector": "rabbitmq", - "mlnx_vf_num": "16", - "admin_net": "10.20.0.0/24", - "mco_host": "localhost" - }, - "name": "{{ name }}", - "hostname": "{{ name }}.example.com", - "slave_name": "{{ name }}", - "power_pass": "/root/.ssh/bootstrap.rsa", - "netboot_enabled": "1" -} diff --git a/resources/nova_api_puppet/1.0.0/README.md b/resources/nova_api_puppet/1.0.0/README.md deleted file mode 100644 index 9fc2bb88..00000000 --- a/resources/nova_api_puppet/1.0.0/README.md +++ /dev/null @@ -1,141 +0,0 @@ -# Nova API resource for puppet handler - -Setup and configure the Nova API service - -# Parameters - -source https://github.com/openstack/puppet-nova_api/blob/5.1.0/manifests/api.pp - - ``admin_password`` - (required) The password to set for the nova admin user in keystone - - ``ensure_package`` - (optional) Whether the nova api package will be installed - Defaults to 'present' - - ``auth_strategy`` - (DEPRECATED) Does nothing and will be removed in Icehouse - Defaults to false - - ``auth_host`` - (optional) The IP of the server running keystone - Defaults to '127.0.0.1' - - ``auth_port`` - (optional) The port to use when authenticating against Keystone - Defaults to 35357 - - ``auth_protocol`` - (optional) The protocol to use when authenticating against Keystone - Defaults to 'http' - - ``auth_uri`` - (optional) The uri of a Keystone service to authenticate against - Defaults to false - - ``auth_admin_prefix`` - (optional) Prefix to prepend at the beginning of the keystone path - Defaults to false - - ``auth_version`` - (optional) API version of the admin Identity API endpoint - for example, use 'v3.0' for the keystone version 3.0 api - Defaults to false - - ``admin_tenant_name`` - (optional) The name of the tenant to create in keystone for use by the nova services - Defaults to 'services' - - ``admin_user`` - (optional) The name of the user to create in keystone for use by the nova services - Defaults to 'nova' - - ``api_bind_address`` - (optional) IP address for nova-api server to listen - Defaults to '0.0.0.0' - - ``metadata_listen`` - (optional) IP address for metadata server to listen - Defaults to '0.0.0.0' - - ``enabled_apis`` - (optional) A comma separated list of apis to enable - Defaults to 'ec2,osapi_compute,metadata' - - ``keystone_ec2_url`` - (optional) The keystone url where nova should send requests for ec2tokens - Defaults to false - - ``volume_api_class`` - (optional) The name of the class that nova will use to access volumes. Cinder is the only option. - Defaults to 'nova.volume.cinder.API' - - ``use_forwarded_for`` - (optional) Treat X-Forwarded-For as the canonical remote address. Only - enable this if you have a sanitizing proxy. - Defaults to false - - ``osapi_compute_workers`` - (optional) Number of workers for OpenStack API service - Defaults to $::processorcount - - ``ec2_workers`` - (optional) Number of workers for EC2 service - Defaults to $::processorcount - - ``metadata_workers`` - (optional) Number of workers for metadata service - Defaults to $::processorcount - - ``conductor_workers`` - (optional) DEPRECATED. Use workers parameter of nova::conductor - Class instead. - Defaults to undef - - ``sync_db`` - (optional) Run nova-manage db sync on api nodes after installing the package. - Defaults to true - - ``neutron_metadata_proxy_shared_secret`` - (optional) Shared secret to validate proxies Neutron metadata requests - Defaults to undef - - ``pci_alias`` - (optional) Pci passthrough for controller: - Defaults to undef - Example - "[ {'vendor_id':'1234', 'product_id':'5678', 'name':'default'}, {...} ]" - - ``ratelimits`` - (optional) A string that is a semicolon-separated list of 5-tuples. - See http://docs.openstack.org/trunk/config-reference/content/configuring-compute-API.html - Example: '(POST, "*", .*, 10, MINUTE);(POST, "*/servers", ^/servers, 50, DAY);(PUT, "*", .*, 10, MINUTE)' - Defaults to undef - - ``ratelimits_factory`` - (optional) The rate limiting factory to use - Defaults to 'nova.api.openstack.compute.limits:RateLimitingMiddleware.factory' - - ``osapi_v3`` - (optional) Enable or not Nova API v3 - Defaults to false - - ``validate`` - (optional) Whether to validate the service is working after any service refreshes - Defaults to false - - ``validation_options`` - (optional) Service validation options - Should be a hash of options defined in openstacklib::service_validation - If empty, defaults values are taken from openstacklib function. - Default command list nova flavors. - Require validate set at True. - Example: - nova::api::validation_options: - nova-api: - command: check_nova.py - path: /usr/bin:/bin:/usr/sbin:/sbin - provider: shell - tries: 5 - try_sleep: 10 - Defaults to {} diff --git a/resources/nova_api_puppet/1.0.0/actions/remove.pp b/resources/nova_api_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 4dfb8e20..00000000 --- a/resources/nova_api_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,17 +0,0 @@ -class { 'nova::api': - ensure_package => 'absent', - enabled => false, - admin_password => 'not important as removed' -} - -include nova::params - -exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', - refreshonly => true, -} - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => 'absent', -} diff --git a/resources/nova_api_puppet/1.0.0/actions/run.pp b/resources/nova_api_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 95885e81..00000000 --- a/resources/nova_api_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,78 +0,0 @@ -$resource = hiera($::resource_name) - -$ensure_package = $resource['input']['ensure_package'] -$auth_strategy = $resource['input']['auth_strategy'] -$auth_host = $resource['input']['auth_host'] -$auth_port = $resource['input']['auth_port'] -$auth_protocol = $resource['input']['auth_protocol'] -$auth_uri = $resource['input']['auth_uri'] -$auth_admin_prefix = $resource['input']['auth_admin_prefix'] -$auth_version = $resource['input']['auth_version'] -$admin_tenant_name = $resource['input']['admin_tenant_name'] -$admin_user = $resource['input']['admin_user'] -$admin_password = $resource['input']['admin_password'] -$api_bind_address = $resource['input']['api_bind_address'] -$metadata_listen = $resource['input']['metadata_listen'] -$enabled_apis = $resource['input']['enabled_apis'] -$keystone_ec2_url = $resource['input']['keystone_ec2_url'] -$volume_api_class = $resource['input']['volume_api_class'] -$use_forwarded_for = $resource['input']['use_forwarded_for'] -$osapi_compute_workers = $resource['input']['osapi_compute_workers'] -$ec2_workers = $resource['input']['ec2_workers'] -$metadata_workers = $resource['input']['metadata_workers'] -$sync_db = $resource['input']['sync_db'] -$neutron_metadata_proxy_shared_secret = $resource['input']['neutron_metadata_proxy_shared_secret'] -$osapi_v3 = $resource['input']['osapi_v3'] -$pci_alias = $resource['input']['pci_alias'] -$ratelimits = $resource['input']['ratelimits'] -$ratelimits_factory = $resource['input']['ratelimits_factory'] -$validate = $resource['input']['validate'] -$validation_options = $resource['input']['validation_options'] -$workers = $resource['input']['workers'] -$conductor_workers = $resource['input']['conductor_workers'] - -exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', -} - -include nova::params - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => $ensure_package, -} - -class { 'nova::api': - enabled => true, - manage_service => true, - ensure_package => $ensure_package, - auth_strategy => $auth_strategy, - auth_host => $auth_host, - auth_port => $auth_port, - auth_protocol => $auth_protocol, - auth_uri => $auth_uri, - auth_admin_prefix => $auth_admin_prefix, - auth_version => $auth_version, - admin_tenant_name => $admin_tenant_name, - admin_user => $admin_user, - admin_password => $admin_password, - api_bind_address => $api_bind_address, - metadata_listen => $metadata_listen, - enabled_apis => $enabled_apis, - keystone_ec2_url => $keystone_ec2_url, - volume_api_class => $volume_api_class, - use_forwarded_for => $use_forwarded_for, - osapi_compute_workers => $osapi_compute_workers, - ec2_workers => $ec2_workers, - metadata_workers => $metadata_workers, - sync_db => $sync_db, - neutron_metadata_proxy_shared_secret => $neutron_metadata_proxy_shared_secret, - osapi_v3 => $osapi_v3, - pci_alias => $pci_alias, - ratelimits => $ratelimits, - ratelimits_factory => $ratelimits_factory, - validate => $validate, - validation_options => $validation_options, - workers => $workers, - conductor_workers => $conductor_workers, -} diff --git a/resources/nova_api_puppet/1.0.0/actions/update.pp b/resources/nova_api_puppet/1.0.0/actions/update.pp deleted file mode 100644 index 90e409e5..00000000 --- a/resources/nova_api_puppet/1.0.0/actions/update.pp +++ /dev/null @@ -1,82 +0,0 @@ -$resource = hiera($::resource_name) - -$ensure_package = $resource['input']['ensure_package'] -$auth_strategy = $resource['input']['auth_strategy'] -$auth_host = $resource['input']['auth_host'] -$auth_port = $resource['input']['auth_port'] -$auth_protocol = $resource['input']['auth_protocol'] -$auth_uri = $resource['input']['auth_uri'] -$auth_admin_prefix = $resource['input']['auth_admin_prefix'] -$auth_version = $resource['input']['auth_version'] -$admin_tenant_name = $resource['input']['admin_tenant_name'] -$admin_user = $resource['input']['admin_user'] -$admin_password = $resource['input']['admin_password'] -$api_bind_address = $resource['input']['api_bind_address'] -$metadata_listen = $resource['input']['metadata_listen'] -$enabled_apis = $resource['input']['enabled_apis'] -$keystone_ec2_url = $resource['input']['keystone_ec2_url'] -$volume_api_class = $resource['input']['volume_api_class'] -$use_forwarded_for = $resource['input']['use_forwarded_for'] -$osapi_compute_workers = $resource['input']['osapi_compute_workers'] -$ec2_workers = $resource['input']['ec2_workers'] -$metadata_workers = $resource['input']['metadata_workers'] -$sync_db = $resource['input']['sync_db'] -$neutron_metadata_proxy_shared_secret = $resource['input']['neutron_metadata_proxy_shared_secret'] -$osapi_v3 = $resource['input']['osapi_v3'] -$pci_alias = $resource['input']['pci_alias'] -$ratelimits = $resource['input']['ratelimits'] -$ratelimits_factory = $resource['input']['ratelimits_factory'] -$validate = $resource['input']['validate'] -$validation_options = $resource['input']['validation_options'] -$workers = $resource['input']['workers'] -$conductor_workers = $resource['input']['conductor_workers'] - -exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', -} - -include nova::params - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => $ensure_package, -} - -class { 'nova::api': - enabled => true, - manage_service => true, - ensure_package => $ensure_package, - auth_strategy => $auth_strategy, - auth_host => $auth_host, - auth_port => $auth_port, - auth_protocol => $auth_protocol, - auth_uri => $auth_uri, - auth_admin_prefix => $auth_admin_prefix, - auth_version => $auth_version, - admin_tenant_name => $admin_tenant_name, - admin_user => $admin_user, - admin_password => $admin_password, - api_bind_address => $api_bind_address, - metadata_listen => $metadata_listen, - enabled_apis => $enabled_apis, - keystone_ec2_url => $keystone_ec2_url, - volume_api_class => $volume_api_class, - use_forwarded_for => $use_forwarded_for, - osapi_compute_workers => $osapi_compute_workers, - ec2_workers => $ec2_workers, - metadata_workers => $metadata_workers, - sync_db => $sync_db, - neutron_metadata_proxy_shared_secret => $neutron_metadata_proxy_shared_secret, - osapi_v3 => $osapi_v3, - pci_alias => $pci_alias, - ratelimits => $ratelimits, - ratelimits_factory => $ratelimits_factory, - validate => $validate, - validation_options => $validation_options, - workers => $workers, - conductor_workers => $conductor_workers, -} - -notify { "restart nova api": - notify => Service["nova-api"], -} diff --git a/resources/nova_api_puppet/1.0.0/meta.yaml b/resources/nova_api_puppet/1.0.0/meta.yaml deleted file mode 100644 index bd880825..00000000 --- a/resources/nova_api_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,109 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - ensure_package: - schema: str - value: 'present' - auth_strategy: - schema: str - value: - auth_host: - schema: str - value: '127.0.0.1' - auth_port: - schema: int - value: 35357 - auth_protocol: - schema: str - value: 'http' - auth_uri: - schema: str - value: - auth_admin_prefix: - schema: str - value: - auth_version: - schema: str - value: - admin_tenant_name: - schema: str - value: 'services' - admin_user: - schema: str - value: 'nova' - admin_password: - schema: str - value: 'nova' - api_bind_address: - schema: str - value: '0.0.0.0' - metadata_listen: - schema: str - value: '0.0.0.0' - enabled_apis: - schema: str - value: 'ec2,osapi_compute,metadata' - keystone_ec2_url: - schema: str - value: - volume_api_class: - schema: str - value: 'nova.volume.cinder.API' - use_forwarded_for: - schema: bool - value: false - osapi_compute_workers: - schema: int - value: 1 - ec2_workers: - schema: int - value: 1 - metadata_workers: - schema: int - value: 1 - sync_db: - schema: bool - value: true - neutron_metadata_proxy_shared_secret: - schema: str - value: - osapi_v3: - schema: bool - value: false - pci_alias: - schema: str - value: - ratelimits: - schema: str - value: - ratelimits_factory: - schema: str - value: 'nova.api.openstack.compute.limits:RateLimitingMiddleware.factory' - validate: - schema: bool - value: false - validation_options: - schema: {} - value: {} - workers: - schema: str - value: - conductor_workers: - schema: str - value: - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-nova', branch: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/nova_api_service, resources/nova_api, resources/nova] diff --git a/resources/nova_api_puppet/1.0.0/test.py b/resources/nova_api_puppet/1.0.0/test.py deleted file mode 100644 index f089c3d5..00000000 --- a/resources/nova_api_puppet/1.0.0/test.py +++ /dev/null @@ -1,91 +0,0 @@ -import json -import requests - -from solar.core.log import log -from solar.core import validation - - -def test(resource): - log.debug('Testing nova api') - - args = resource.args - - token, token_data = validation.validate_token( - keystone_host=args['auth_host'], - keystone_port=args['auth_port'], - user=args['admin_user'], - tenant=args['admin_tenant_name'], - password=args['admin_password'], - ) - - endpoints = [ - e['endpoints'] for e in token_data['access']['serviceCatalog'] - if e['name'] == 'nova' - ][0] - public_url = endpoints[0]['publicURL'] - - log.debug('nova admin_url: %s', public_url) - - servers = requests.get( - '{public_url}/servers/detail'.format(public_url=public_url), - headers={ - 'X-Auth-Token': token, - 'Content-Type': 'application/json', - } - ) - - servers_json = servers.json() - - log.debug( - 'NOVA API SERVERS: %s', - json.dumps(servers_json, indent=2) - ) - - assert 'servers' in servers_json - assert isinstance(servers_json['servers'], list) - - flavors = requests.get( - '{public_url}/flavors'.format(public_url=public_url), - headers={ - 'X-Auth-Token': token, - 'Content-Type': 'application/json', - } - ) - - flavors_json = flavors.json() - - log.debug('NOVA API FLAVORS: %s', json.dumps(flavors_json, indent=2)) - - assert 'flavors' in flavors_json - assert isinstance(flavors_json['flavors'], list) - assert len(flavors_json['flavors']) > 0 - - for flavor_data in flavors_json['flavors']: - url = [link['href'] for link in flavor_data['links'] - if link['rel'] == 'self'][0] - - flavor = requests.get( - url, - headers={ - 'X-Auth-Token': token, - 'Content-Type': 'application/json', - } - ) - - flavor_json = flavor.json() - - log.debug( - 'NOVA API FLAVOR %s data: %s', - flavor_data['name'], - json.dumps(flavor_json, indent=2) - ) - - images = requests.get( - '{public_url}/images'.format(public_url=public_url), - headers={ - 'X-Auth-Token': token, - 'Content-Type': 'application/json', - } - ) - - log.debug('NOVA API IMAGES: %s', images.json()) diff --git a/resources/nova_compute_libvirt_puppet/1.0.0/README.md b/resources/nova_compute_libvirt_puppet/1.0.0/README.md deleted file mode 100644 index 12a5928e..00000000 --- a/resources/nova_compute_libvirt_puppet/1.0.0/README.md +++ /dev/null @@ -1,84 +0,0 @@ -# Nova compute libvirt resource for puppet handler - -Install and manage nova-compute guests managed by libvirt. -Cannot be used separately from nova compute resource and -should share the same node. -Libvirt service name defaults are given for Debian OS family. - -# Parameters - -source https://github.com/openstack/puppet-nova_compute_libvirt/blob/5.1.0/manifests/compute/libvirt.pp - - ``libvirt_virt_type`` - (optional) Libvirt domain type. Options are: kvm, lxc, qemu, uml, xen - Replaces libvirt_type - Defaults to 'kvm' - - ``vncserver_listen`` - (optional) IP address on which instance vncservers should listen - Defaults to '127.0.0.1' - - ``migration_support`` - (optional) Whether to support virtual machine migration - Defaults to false - - ``libvirt_cpu_mode`` - (optional) The libvirt CPU mode to configure. Possible values - include custom, host-model, none, host-passthrough. - Defaults to 'host-model' if libvirt_virt_type is set to either - kvm or qemu, otherwise defaults to 'none'. - - ``libvirt_disk_cachemodes`` - (optional) A list of cachemodes for different disk types, e.g. - ["file=directsync", "block=none"] - If an empty list is specified, the disk_cachemodes directive - will be removed from nova.conf completely. - Defaults to an empty list - - ``libvirt_inject_password`` - (optional) Inject the admin password at boot time, without an agent. - Defaults to false - - ``libvirt_inject_key`` - (optional) Inject the ssh public key at boot time. - Defaults to false - - ``libvirt_inject_partition`` - (optional) The partition to inject to : -2 => disable, -1 => inspect - (libguestfs only), 0 => not partitioned, >0 => partition - number (integer value) - Defaults to -2 - - ``remove_unused_base_images`` - (optional) Should unused base images be removed? - If undef is specified, remove the line in nova.conf - otherwise, use a boolean to remove or not the base images. - Defaults to undef - - ``remove_unused_kernels`` - (optional) Should unused kernel images be removed? - This is only safe to enable if all compute nodes - have been updated to support this option. - If undef is specified, remove the line in nova.conf - otherwise, use a boolean to remove or not the kernels. - Defaults to undef - - ``remove_unused_resized_minimum_age_seconds`` - (optional) Unused resized base images younger - than this will not be removed - If undef is specified, remove the line in nova.conf - otherwise, use a integer or a string to define after - how many seconds it will be removed. - Defaults to undef - - ``remove_unused_original_minimum_age_seconds`` - (optional) Unused unresized base images younger - than this will not be removed - If undef is specified, remove the line in nova.conf - otherwise, use a integer or a string to define after - how many seconds it will be removed. - Defaults to undef - - ``libvirt_service_name`` - (optional) libvirt service name. - Defaults to $::nova::params::libvirt_service_name diff --git a/resources/nova_compute_libvirt_puppet/1.0.0/actions/remove.pp b/resources/nova_compute_libvirt_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index b575edb8..00000000 --- a/resources/nova_compute_libvirt_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1 +0,0 @@ -notify { 'Remove action is not supported upstream': } \ No newline at end of file diff --git a/resources/nova_compute_libvirt_puppet/1.0.0/actions/run.pp b/resources/nova_compute_libvirt_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 868c4532..00000000 --- a/resources/nova_compute_libvirt_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,56 +0,0 @@ -$resource = hiera($::resource_name) - -$libvirt_virt_type = $resource['input']['libvirt_virt_type'] -$vncserver_listen = $resource['input']['vncserver_listen'] -$migration_support = $resource['input']['migration_support'] -$libvirt_cpu_mode = $resource['input']['libvirt_cpu_mode'] -$libvirt_disk_cachemodes = $resource['input']['libvirt_disk_cachemodes'] -$libvirt_inject_password = $resource['input']['libvirt_inject_password'] -$libvirt_inject_key = $resource['input']['libvirt_inject_key'] -$libvirt_inject_partition = $resource['input']['libvirt_inject_partition'] -$remove_unused_base_images = $resource['input']['remove_unused_base_images'] -$remove_unused_kernels = $resource['input']['remove_unused_kernels'] -$remove_unused_resized_minimum_age_seconds = $resource['input']['remove_unused_resized_minimum_age_seconds'] -$remove_unused_original_minimum_age_seconds = $resource['input']['remove_unused_original_minimum_age_seconds'] -$libvirt_service_name = $resource['input']['libvirt_service_name'] -$libvirt_type = $resource['input']['libvirt_type'] - -class { 'nova::compute::libvirt': - libvirt_virt_type => $libvirt_virt_type, - vncserver_listen => $vncserver_listen, - migration_support => $migration_support, - libvirt_cpu_mode => $libvirt_cpu_mode, - libvirt_disk_cachemodes => $libvirt_disk_cachemodes, - libvirt_inject_password => $libvirt_inject_password, - libvirt_inject_key => $libvirt_inject_key, - libvirt_inject_partition => $libvirt_inject_partition, - remove_unused_base_images => $remove_unused_base_images, - remove_unused_kernels => $remove_unused_kernels, - remove_unused_resized_minimum_age_seconds => $remove_unused_resized_minimum_age_seconds, - remove_unused_original_minimum_age_seconds => $remove_unused_original_minimum_age_seconds, - libvirt_service_name => $libvirt_service_name, - libvirt_type => $libvirt_type, -} - -#exec { 'networking-refresh': -# command => '/sbin/ifdown -a ; /sbin/ifup -a', -#} - -#exec { 'post-nova_config': -# command => '/bin/echo "Nova config has changed"', -#} - -include nova::params - -service { 'nova-compute': - name => $::nova::params::compute_service_name, -} - -package { 'nova-compute': - name => $::nova::params::compute_package_name, -} - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => $ensure_package, -} diff --git a/resources/nova_compute_libvirt_puppet/1.0.0/actions/update.pp b/resources/nova_compute_libvirt_puppet/1.0.0/actions/update.pp deleted file mode 100644 index 33dbcd88..00000000 --- a/resources/nova_compute_libvirt_puppet/1.0.0/actions/update.pp +++ /dev/null @@ -1,60 +0,0 @@ -$resource = hiera($::resource_name) - -$libvirt_virt_type = $resource['input']['libvirt_virt_type'] -$vncserver_listen = $resource['input']['vncserver_listen'] -$migration_support = $resource['input']['migration_support'] -$libvirt_cpu_mode = $resource['input']['libvirt_cpu_mode'] -$libvirt_disk_cachemodes = $resource['input']['libvirt_disk_cachemodes'] -$libvirt_inject_password = $resource['input']['libvirt_inject_password'] -$libvirt_inject_key = $resource['input']['libvirt_inject_key'] -$libvirt_inject_partition = $resource['input']['libvirt_inject_partition'] -$remove_unused_base_images = $resource['input']['remove_unused_base_images'] -$remove_unused_kernels = $resource['input']['remove_unused_kernels'] -$remove_unused_resized_minimum_age_seconds = $resource['input']['remove_unused_resized_minimum_age_seconds'] -$remove_unused_original_minimum_age_seconds = $resource['input']['remove_unused_original_minimum_age_seconds'] -$libvirt_service_name = $resource['input']['libvirt_service_name'] -$libvirt_type = $resource['input']['libvirt_type'] - -class { 'nova::compute::libvirt': - libvirt_virt_type => $libvirt_virt_type, - vncserver_listen => $vncserver_listen, - migration_support => $migration_support, - libvirt_cpu_mode => $libvirt_cpu_mode, - libvirt_disk_cachemodes => $libvirt_disk_cachemodes, - libvirt_inject_password => $libvirt_inject_password, - libvirt_inject_key => $libvirt_inject_key, - libvirt_inject_partition => $libvirt_inject_partition, - remove_unused_base_images => $remove_unused_base_images, - remove_unused_kernels => $remove_unused_kernels, - remove_unused_resized_minimum_age_seconds => $remove_unused_resized_minimum_age_seconds, - remove_unused_original_minimum_age_seconds => $remove_unused_original_minimum_age_seconds, - libvirt_service_name => $libvirt_service_name, - libvirt_type => $libvirt_type, -} - -#exec { 'networking-refresh': -# command => '/sbin/ifdown -a ; /sbin/ifup -a', -#} - -#exec { 'post-nova_config': -# command => '/bin/echo "Nova config has changed"', -#} - -include nova::params - -service { 'nova-compute': - name => $::nova::params::compute_service_name, -} - -package { 'nova-compute': - name => $::nova::params::compute_package_name, -} - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => $ensure_package, -} - -notify { "restart nova compute": - notify => Service["nova-compute"], -} diff --git a/resources/nova_compute_libvirt_puppet/1.0.0/meta.yaml b/resources/nova_compute_libvirt_puppet/1.0.0/meta.yaml deleted file mode 100644 index f9073035..00000000 --- a/resources/nova_compute_libvirt_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,61 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - libvirt_virt_type: - schema: str - value: 'kvm' - vncserver_listen: - schema: str - value: '127.0.0.1' - migration_support: - schema: bool - value: false - libvirt_cpu_mode: - schema: str - value: - libvirt_disk_cachemodes: - schema: [str] - value: [] - libvirt_inject_password: - schema: bool - value: false - libvirt_inject_key: - schema: bool - value: false - libvirt_inject_partition: - schema: str - value: '-2' - remove_unused_base_images: - schema: str - value: - remove_unused_kernels: - schema: str - value: - remove_unused_resized_minimum_age_seconds: - schema: str - value: - remove_unused_original_minimum_age_seconds: - schema: str - value: - libvirt_service_name: - schema: str - value: 'libvirt-bin' - libvirt_type: - schema: bool - value: false - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-nova', branch: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/nova_compute_libvirt_service, resources/nova_compute_libvirt, resources/nova_compute, resources/nova] diff --git a/resources/nova_compute_puppet/1.0.0/README.md b/resources/nova_compute_puppet/1.0.0/README.md deleted file mode 100644 index 8cecc3ec..00000000 --- a/resources/nova_compute_puppet/1.0.0/README.md +++ /dev/null @@ -1,105 +0,0 @@ -# Nova compute resource for puppet handler - -Setup and configure the Nova compute service. - -# Parameters - -source https://github.com/openstack/puppet-nova_compute/blob/5.1.0/manifests/compute.pp - - ``enabled`` - (optional) Whether to enable the nova-compute service - Defaults to false - - ``manage_service`` - (optional) Whether to start/stop the service - Defaults to true - - ``ensure_package`` - (optional) The state for the nova-compute package - Defaults to 'present' - - ``vnc_enabled`` - (optional) Whether to use a VNC proxy - Defaults to true - - ``vncserver_proxyclient_address`` - (optional) The IP address of the server running the VNC proxy client - Defaults to '127.0.0.1' - - ``vncproxy_host`` - (optional) The host of the VNC proxy server - Defaults to false - - ``vncproxy_protocol`` - (optional) The protocol to communicate with the VNC proxy server - Defaults to 'http' - - ``vncproxy_port`` - (optional) The port to communicate with the VNC proxy server - Defaults to '6080' - - ``vncproxy_path`` - (optional) The path at the end of the uri for communication with the VNC proxy server - Defaults to '/vnc_auto.html' - - ``vnc_keymap`` - (optional) The keymap to use with VNC (ls -alh /usr/share/qemu/keymaps to list available keymaps) - Defaults to 'en-us' - - ``force_config_drive`` - (optional) Whether to force the config drive to be attached to all VMs - Defaults to false - - ``virtio_nic`` - (optional) Whether to use virtio for the nic driver of VMs - Defaults to false - - ``neutron_enabled`` - (optional) Whether to use Neutron for networking of VMs - Defaults to true - - ``network_device_mtu`` - (optional) The MTU size for the interfaces managed by nova - Defaults to undef - - ``instance_usage_audit`` - (optional) Generate periodic compute.instance.exists notifications. - Defaults to false - - ``instance_usage_audit_period`` - (optional) Time period to generate instance usages for. - Time period must be hour, day, month or year - Defaults to 'month' - - ``force_raw_images`` - (optional) Force backing images to raw format. - Defaults to true - - ``reserved_host_memory`` - Reserved host memory - The amount of memory in MB reserved for the host. - Defaults to '512' - - ``compute_manager`` - Compute manager - The driver that will manage the running instances. - Defaults to nova.compute.manager.ComputeManager - - ``pci_passthrough_whitelist`` - (optional) Pci passthrough hash in format of: - Defaults to undef - Example - "[ { 'vendor_id':'1234','product_id':'5678' }, - { 'vendor_id':'4321','product_id':'8765','physical_network':'default' } ] " - - ``default_availability_zone`` - (optional) Default compute node availability zone. - Defaults to nova - - ``default_schedule_zone`` - (optional) Availability zone to use when user doesn't specify one. - Defaults to undef - - ``internal_service_availability_zone`` - (optional) The availability zone to show internal services under. - Defaults to internal diff --git a/resources/nova_compute_puppet/1.0.0/actions/remove.pp b/resources/nova_compute_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index a8a07fb0..00000000 --- a/resources/nova_compute_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,20 +0,0 @@ -class { 'nova::compute': - ensure_package => 'absent', - enabled => false, -} - -include nova::params - -exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', - refreshonly => true, -} - -exec { 'networking-refresh': - command => '/sbin/ifdown -a ; /sbin/ifup -a', -} - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => 'absent', -} \ No newline at end of file diff --git a/resources/nova_compute_puppet/1.0.0/actions/run.pp b/resources/nova_compute_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 2a8810be..00000000 --- a/resources/nova_compute_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,64 +0,0 @@ -$resource = hiera($::resource_name) - -$ensure_package = $resource['input']['ensure_package'] -$vnc_enabled = $resource['input']['vnc_enabled'] -$vncserver_proxyclient_address = $resource['input']['vncserver_proxyclient_address'] -$vncproxy_host = $resource['input']['vncproxy_host'] -$vncproxy_protocol = $resource['input']['vncproxy_protocol'] -$vncproxy_port = $resource['input']['vncproxy_port'] -$vncproxy_path = $resource['input']['vncproxy_path'] -$vnc_keymap = $resource['input']['vnc_keymap'] -$force_config_drive = $resource['input']['force_config_drive'] -$virtio_nic = $resource['input']['virtio_nic'] -$neutron_enabled = $resource['input']['neutron_enabled'] -$network_device_mtu = $resource['input']['network_device_mtu'] -$instance_usage_audit = $resource['input']['instance_usage_audit'] -$instance_usage_audit_period = $resource['input']['instance_usage_audit_period'] -$force_raw_images = $resource['input']['force_raw_images'] -$reserved_host_memory = $resource['input']['reserved_host_memory'] -$compute_manager = $resource['input']['compute_manager'] -$pci_passthrough = $resource['input']['pci_passthrough'] -$default_availability_zone = $resource['input']['default_availability_zone'] -$default_schedule_zone = $resource['input']['default_schedule_zone'] -$internal_service_availability_zone = $resource['input']['internal_service_availability_zone'] - -class { 'nova::compute': - enabled => true, - manage_service => true, - ensure_package => $ensure_package, - vnc_enabled => $vnc_enabled, - vncserver_proxyclient_address => $vncserver_proxyclient_address, - vncproxy_host => $vncproxy_host, - vncproxy_protocol => $vncproxy_protocol, - vncproxy_port => $vncproxy_port, - vncproxy_path => $vncproxy_path, - vnc_keymap => $vnc_keymap, - force_config_drive => $force_config_drive, - virtio_nic => $virtio_nic, - neutron_enabled => $neutron_enabled, - network_device_mtu => $network_device_mtu, - instance_usage_audit => $instance_usage_audit, - instance_usage_audit_period => $instance_usage_audit_period, - force_raw_images => $force_raw_images, - reserved_host_memory => $reserved_host_memory, - compute_manager => $compute_manager, - pci_passthrough => $pci_passthrough, - default_availability_zone => $default_availability_zone, - default_schedule_zone => $default_schedule_zone, - internal_service_availability_zone => $internal_service_availability_zone, -} - -exec { 'networking-refresh': - command => '/sbin/ifdown -a ; /sbin/ifup -a', -} - -exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', -} - -include nova::params - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => $ensure_package, -} \ No newline at end of file diff --git a/resources/nova_compute_puppet/1.0.0/actions/update.pp b/resources/nova_compute_puppet/1.0.0/actions/update.pp deleted file mode 100644 index ba31d806..00000000 --- a/resources/nova_compute_puppet/1.0.0/actions/update.pp +++ /dev/null @@ -1,68 +0,0 @@ -$resource = hiera($::resource_name) - -$ensure_package = $resource['input']['ensure_package'] -$vnc_enabled = $resource['input']['vnc_enabled'] -$vncserver_proxyclient_address = $resource['input']['vncserver_proxyclient_address'] -$vncproxy_host = $resource['input']['vncproxy_host'] -$vncproxy_protocol = $resource['input']['vncproxy_protocol'] -$vncproxy_port = $resource['input']['vncproxy_port'] -$vncproxy_path = $resource['input']['vncproxy_path'] -$vnc_keymap = $resource['input']['vnc_keymap'] -$force_config_drive = $resource['input']['force_config_drive'] -$virtio_nic = $resource['input']['virtio_nic'] -$neutron_enabled = $resource['input']['neutron_enabled'] -$network_device_mtu = $resource['input']['network_device_mtu'] -$instance_usage_audit = $resource['input']['instance_usage_audit'] -$instance_usage_audit_period = $resource['input']['instance_usage_audit_period'] -$force_raw_images = $resource['input']['force_raw_images'] -$reserved_host_memory = $resource['input']['reserved_host_memory'] -$compute_manager = $resource['input']['compute_manager'] -$pci_passthrough = $resource['input']['pci_passthrough'] -$default_availability_zone = $resource['input']['default_availability_zone'] -$default_schedule_zone = $resource['input']['default_schedule_zone'] -$internal_service_availability_zone = $resource['input']['internal_service_availability_zone'] - -class { 'nova::compute': - enabled => true, - manage_service => true, - ensure_package => $ensure_package, - vnc_enabled => $vnc_enabled, - vncserver_proxyclient_address => $vncserver_proxyclient_address, - vncproxy_host => $vncproxy_host, - vncproxy_protocol => $vncproxy_protocol, - vncproxy_port => $vncproxy_port, - vncproxy_path => $vncproxy_path, - vnc_keymap => $vnc_keymap, - force_config_drive => $force_config_drive, - virtio_nic => $virtio_nic, - neutron_enabled => $neutron_enabled, - network_device_mtu => $network_device_mtu, - instance_usage_audit => $instance_usage_audit, - instance_usage_audit_period => $instance_usage_audit_period, - force_raw_images => $force_raw_images, - reserved_host_memory => $reserved_host_memory, - compute_manager => $compute_manager, - pci_passthrough => $pci_passthrough, - default_availability_zone => $default_availability_zone, - default_schedule_zone => $default_schedule_zone, - internal_service_availability_zone => $internal_service_availability_zone, -} - -exec { 'networking-refresh': - command => '/sbin/ifdown -a ; /sbin/ifup -a', -} - -exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', -} - -include nova::params - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => $ensure_package, -} - -notify { "restart nova compute": - notify => Service["nova-compute"], -} diff --git a/resources/nova_compute_puppet/1.0.0/meta.yaml b/resources/nova_compute_puppet/1.0.0/meta.yaml deleted file mode 100644 index 3100d9b3..00000000 --- a/resources/nova_compute_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,82 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - ensure_package: - schema: str - value: 'present' - vnc_enabled: - schema: bool - value: true - vncserver_proxyclient_address: - schema: str - value: '127.0.0.1' - vncproxy_host: - schema: str - value: - vncproxy_protocol: - schema: str - value: 'http' - vncproxy_port: - schema: int - value: 6080 - vncproxy_path: - schema: str - value: '/vnc_auto.html' - vnc_keymap: - schema: str - value: 'en-us' - force_config_drive: - schema: bool - value: false - virtio_nic: - schema: bool - value: false - neutron_enabled: - schema: bool - value: true - network_device_mtu: - schema: str - value: - instance_usage_audit: - schema: bool - value: false - instance_usage_audit_period: - schema: str - value: 'month' - force_raw_images: - schema: bool - value: true - reserved_host_memory: - schema: int - value: 512 - compute_manager: - schema: str - value: 'nova.compute.manager.ComputeManager' - pci_passthrough: - schema: str - value: - default_availability_zone: - schema: str - value: 'nova' - default_schedule_zone: - schema: str - value: - internal_service_availability_zone: - schema: str - value: 'internal' - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-nova', branch: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/nova_compute_service, resources/nova_compute, resources/nova] diff --git a/resources/nova_conductor_puppet/1.0.0/README.md b/resources/nova_conductor_puppet/1.0.0/README.md deleted file mode 100644 index 26a94d35..00000000 --- a/resources/nova_conductor_puppet/1.0.0/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# Nova conductor resource for puppet handler - -Setup and configure the Nova conductor service. -Note, it [should not](http://docs.openstack.org/juno/config-reference/content/section_conductor.html) be deployed on compute nodes. - -# Parameters - -source https://github.com/openstack/puppet-nova_conductor/blob/5.1.0/manifests/conductor.pp - - ``ensure_package`` - (optional) The state of the nova conductor package - Defaults to 'present' - - ``workers`` - (optional) Number of workers for OpenStack Conductor service - Defaults to undef (i.e. parameter will not be present) \ No newline at end of file diff --git a/resources/nova_conductor_puppet/1.0.0/actions/remove.pp b/resources/nova_conductor_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 9c9fee76..00000000 --- a/resources/nova_conductor_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,11 +0,0 @@ -class { 'nova::conductor': - ensure_package => 'absent', - enabled => false, -} - -include nova::params - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => 'absent', -} \ No newline at end of file diff --git a/resources/nova_conductor_puppet/1.0.0/actions/run.pp b/resources/nova_conductor_puppet/1.0.0/actions/run.pp deleted file mode 100644 index f0aa53e7..00000000 --- a/resources/nova_conductor_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,22 +0,0 @@ -$resource = hiera($::resource_name) - -$ensure_package = $resource['input']['ensure_package'] -$workers = $resource['input']['workers'] - -exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', -} - -include nova::params - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => $ensure_package, -} - -class { 'nova::conductor': - enabled => true, - manage_service => true, - ensure_package => $ensure_package, - workers => $workers, -} diff --git a/resources/nova_conductor_puppet/1.0.0/actions/update.pp b/resources/nova_conductor_puppet/1.0.0/actions/update.pp deleted file mode 100644 index ed258675..00000000 --- a/resources/nova_conductor_puppet/1.0.0/actions/update.pp +++ /dev/null @@ -1,26 +0,0 @@ -$resource = hiera($::resource_name) - -$ensure_package = $resource['input']['ensure_package'] -$workers = $resource['input']['workers'] - -exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', -} - -include nova::params - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => $ensure_package, -} - -class { 'nova::conductor': - enabled => true, - manage_service => true, - ensure_package => $ensure_package, - workers => $workers, -} - -notify { "restart nova conductor": - notify => Service["nova-conductor"], -} diff --git a/resources/nova_conductor_puppet/1.0.0/meta.yaml b/resources/nova_conductor_puppet/1.0.0/meta.yaml deleted file mode 100644 index 3b9a6228..00000000 --- a/resources/nova_conductor_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - ensure_package: - schema: str - value: 'present' - workers: - schema: int - value: 1 - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-nova', branch: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/nova_conductor_service, resources/nova_conductor, resources/nova] diff --git a/resources/nova_generic_service_puppet/1.0.0/README.md b/resources/nova_generic_service_puppet/1.0.0/README.md deleted file mode 100644 index 900f686e..00000000 --- a/resources/nova_generic_service_puppet/1.0.0/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Nova generic service resource for puppet handler - -Setup and configure the Nova generic services. - -source https://github.com/openstack/puppet-nova/blob/5.1.0/manifests/generic_service.pp diff --git a/resources/nova_generic_service_puppet/1.0.0/actions/remove.pp b/resources/nova_generic_service_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index d848408a..00000000 --- a/resources/nova_generic_service_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,21 +0,0 @@ -$service_title = $resource['input']['title'] -$package_name = $resource['input']['package_name'] -$service_name = $resource['input']['service_name'] - -exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', -} - -nova::generic_service { $service_title: - ensure_package => 'absent', - enabled => false, - package_name => $package_name, - service_name => $service_name, -} - -include nova::params - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => 'absent', -} \ No newline at end of file diff --git a/resources/nova_generic_service_puppet/1.0.0/actions/run.pp b/resources/nova_generic_service_puppet/1.0.0/actions/run.pp deleted file mode 100644 index ab47b8cc..00000000 --- a/resources/nova_generic_service_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,25 +0,0 @@ -$resource = hiera($::resource_name) - -$service_title = $resource['input']['title'] -$package_name = $resource['input']['package_name'] -$service_name = $resource['input']['service_name'] -$ensure_package = $resource['input']['ensure_package'] - -exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', -} - -include nova::params - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => $ensure_package, -} - -nova::generic_service { $service_title: - enabled => true, - manage_service => true, - package_name => $package_name, - service_name => $service_name, - ensure_package => $ensure_package, -} \ No newline at end of file diff --git a/resources/nova_generic_service_puppet/1.0.0/actions/update.pp b/resources/nova_generic_service_puppet/1.0.0/actions/update.pp deleted file mode 100644 index 49301e58..00000000 --- a/resources/nova_generic_service_puppet/1.0.0/actions/update.pp +++ /dev/null @@ -1,29 +0,0 @@ -$resource = hiera($::resource_name) - -$service_title = $resource['input']['title'] -$package_name = $resource['input']['package_name'] -$service_name = $resource['input']['service_name'] -$ensure_package = $resource['input']['ensure_package'] - -exec { 'post-nova_config': - command => '/bin/echo "Nova config has changed"', -} - -include nova::params - -package { 'nova-common': - name => $nova::params::common_package_name, - ensure => $ensure_package, -} - -nova::generic_service { $service_title: - enabled => true, - manage_service => true, - package_name => $package_name, - service_name => $service_name, - ensure_package => $ensure_package, -} - -notify { "restart generic service": - notify => Service["nova-${service_title}"], -} diff --git a/resources/nova_generic_service_puppet/1.0.0/meta.yaml b/resources/nova_generic_service_puppet/1.0.0/meta.yaml deleted file mode 100644 index 0afe1e96..00000000 --- a/resources/nova_generic_service_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,31 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - title: - schema: str! - value: - package_name: - schema: str! - value: - service_name: - schema: str! - value: - ensure_package: - schema: str - value: 'present' - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-nova', branch: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/nova_generic_service, resources/nova] diff --git a/resources/nova_neutron_puppet/1.0.0/README.md b/resources/nova_neutron_puppet/1.0.0/README.md deleted file mode 100644 index fd438068..00000000 --- a/resources/nova_neutron_puppet/1.0.0/README.md +++ /dev/null @@ -1,115 +0,0 @@ -# Nova neutron resource for puppet handler - -Setup and configure the Nova compute to use Neutron. -Note, it should only be deployed on compute nodes. -Also manage the network driver to use for compute guests -This will use virtio for VM guests and the specified driver for the VIF. - -# Parameters - -source https://github.com/openstack/puppet-nova/blob/5.1.0/manifests/compute/neutron.pp - - ``libvirt_vif_driver`` - (optional) The libvirt VIF driver to configure the VIFs. - Defaults to 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver'. - - ``force_snat_range`` - (optional) Force SNAT rule to specified network for nova-network - Default to 0.0.0.0/0 - Due to architecture constraints in nova_config, it's not possible to setup - more than one SNAT rule though initial parameter is MultiStrOpt - -source https://github.com/openstack/puppet-nova/blob/5.1.0/manifests/network/neutron.pp - - ``neutron_admin_password`` - (required) Password for connecting to Neutron network services in - admin context through the OpenStack Identity service. - - ``neutron_auth_strategy`` - (optional) Should be kept as default 'keystone' for all production deployments. - Defaults to 'keystone' - - ``neutron_url`` - (optional) URL for connecting to the Neutron networking service. - Defaults to 'http://127.0.0.1:9696' - Note: for this resource it is decomposed to the - 'neutron_endpoint_host', 'neutron_endpoint_port', 'neutron_endpoint_protocol' inputs - due to implementation limitations - - ``neutron_url_timeout`` - (optional) Timeout value for connecting to neutron in seconds. - Defaults to '30' - - ``neutron_admin_tenant_name`` - (optional) Tenant name for connecting to Neutron network services in - admin context through the OpenStack Identity service. - Defaults to 'services' - - ``neutron_default_tenant_id`` - (optional) Default tenant id when creating neutron networks - Defaults to 'default' - - ``neutron_region_name`` - (optional) Region name for connecting to neutron in admin context - through the OpenStack Identity service. - Defaults to 'RegionOne' - - ``neutron_admin_username`` - (optional) Username for connecting to Neutron network services in admin context - through the OpenStack Identity service. - Defaults to 'neutron' - - ``neutron_ovs_bridge`` - (optional) Name of Integration Bridge used by Open vSwitch - Defaults to 'br-int' - - ``neutron_extension_sync_interval`` - (optional) Number of seconds before querying neutron for extensions - Defaults to '600' - - ``neutron_ca_certificates_file`` - (optional) Location of ca certicates file to use for neutronclient requests. - Defaults to 'None' - - ``neutron_admin_auth_url`` - (optional) Points to the OpenStack Identity server IP and port. - This is the Identity (keystone) admin API server IP and port value, - and not the Identity service API IP and port. - Defaults to 'http://127.0.0.1:35357/v2.0' - Note: for this resource it is decomposed to the - 'auth_host', 'auth_port', 'auth_protocol' inputs - due to implementation limitations - - ``network_api_class`` - (optional) The full class name of the network API class. - The default configures Nova to use Neutron for the network API. - Defaults to 'nova.network.neutronv2.api.API' - - ``security_group_api`` - (optional) The full class name of the security API class. - The default configures Nova to use Neutron for security groups. - Set to 'nova' to use standard Nova security groups. - Defaults to 'neutron' - - ``firewall_driver`` - (optional) Firewall driver. - This prevents nova from maintaining a firewall so it does not interfere - with Neutron's. Set to 'nova.virt.firewall.IptablesFirewallDriver' - to re-enable the Nova firewall. - Defaults to 'nova.virt.firewall.NoopFirewallDriver' - - ``vif_plugging_is_fatal`` - (optional) Fail to boot instance if vif plugging fails. - This prevents nova from booting an instance if vif plugging notification - is not received from neutron. - Defaults to 'True' - - ``vif_plugging_timeout`` - (optional) Number of seconds to wait for neutron vif plugging events. - Set to '0' and vif_plugging_is_fatal to 'False' if vif plugging - notification is not being used. - Defaults to '300' - - ``dhcp_domain`` - (optional) domain to use for building the hostnames - Defaults to 'novalocal' \ No newline at end of file diff --git a/resources/nova_neutron_puppet/1.0.0/actions/remove.pp b/resources/nova_neutron_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index cfb90903..00000000 --- a/resources/nova_neutron_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1 +0,0 @@ -notify { "Nothing to remove here": } \ No newline at end of file diff --git a/resources/nova_neutron_puppet/1.0.0/actions/run.pp b/resources/nova_neutron_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 5589032c..00000000 --- a/resources/nova_neutron_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,54 +0,0 @@ -$resource = hiera($::resource_name) - -$auth_host = $resource['input']['auth_host'] -$auth_port = $resource['input']['auth_port'] -$auth_protocol = $resource['input']['auth_protocol'] -$neutron_endpoint_host = $resource['input']['neutron_endpoint_host'] -$neutron_endpoint_port = $resource['input']['neutron_endpoint_port'] -$neutron_endpoint_protocol = $resource['input']['neutron_endpoint_protocol'] - -$libvirt_vif_driver = $resource['input']['libvirt_vif_driver'] -$force_snat_range = $resource['input']['force_snat_range'] -$neutron_admin_password = $resource['input']['neutron_admin_password'] -$neutron_auth_strategy = $resource['input']['neutron_auth_strategy'] -$neutron_url_timeout = $resource['input']['neutron_url_timeout'] -$neutron_admin_tenant_name = $resource['input']['neutron_admin_tenant_name'] -$neutron_default_tenant_id = $resource['input']['neutron_default_tenant_id'] -$neutron_region_name = $resource['input']['neutron_region_name'] -$neutron_admin_username = $resource['input']['neutron_admin_username'] -$neutron_ovs_bridge = $resource['input']['neutron_ovs_bridge'] -$neutron_extension_sync_interval = $resource['input']['neutron_extension_sync_interval'] -$neutron_ca_certificates_file = $resource['input']['neutron_ca_certificates_file'] -$network_api_class = $resource['input']['network_api_class'] -$security_group_api = $resource['input']['security_group_api'] -$firewall_driver = $resource['input']['firewall_driver'] -$vif_plugging_is_fatal = $resource['input']['vif_plugging_is_fatal'] -$vif_plugging_timeout = $resource['input']['vif_plugging_timeout'] -$dhcp_domain = $resource['input']['dhcp_domain'] - - -class { 'nova::compute::neutron': - libvirt_vif_driver => $libvirt_vif_driver, - force_snat_range => $force_snat_range, -} - -class { 'nova::network::neutron': - neutron_admin_password => $neutron_admin_password, - neutron_auth_strategy => $neutron_auth_strategy, - neutron_url => "${neutron_endpoint_protocol}://${neutron_endpoint_host}:${neutron_endpoint_port}", - neutron_url_timeout => $neutron_url_timeout, - neutron_admin_tenant_name => $neutron_admin_tenant_name, - neutron_default_tenant_id => $neutron_default_tenant_id, - neutron_region_name => $neutron_region_name, - neutron_admin_username => $neutron_admin_username, - neutron_admin_auth_url => "${auth_protocol}://${auth_host}:${auth_port}/v2.0", - neutron_ovs_bridge => $neutron_ovs_bridge, - neutron_extension_sync_interval => $neutron_extension_sync_interval, - neutron_ca_certificates_file => $neutron_ca_certificates_file, - network_api_class => $network_api_class, - security_group_api => $security_group_api, - firewall_driver => $firewall_driver, - vif_plugging_is_fatal => $vif_plugging_is_fatal, - vif_plugging_timeout => $vif_plugging_timeout, - dhcp_domain => $dhcp_domain, -} diff --git a/resources/nova_neutron_puppet/1.0.0/meta.yaml b/resources/nova_neutron_puppet/1.0.0/meta.yaml deleted file mode 100644 index dffa410b..00000000 --- a/resources/nova_neutron_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,92 +0,0 @@ -handler: puppet -version: 1.0.0 -input: - auth_host: - schema: str - value: 'localhost' - auth_port: - schema: int - value: 35357 - auth_protocol: - schema: str - value: 'http' - neutron_endpoint_host: - schema: str - value: 'localhost' - neutron_endpoint_port: - schema: int - value: 9696 - neutron_endpoint_protocol: - schema: str - value: 'http' - - libvirt_vif_driver: - schema: str - value: 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver' - force_snat_range: - schema: str - value: '0.0.0.0/0' - neutron_admin_password: - schema: str - value: 'neutron' - neutron_auth_strategy: - schema: str - value: 'keystone' - neutron_url_timeout: - schema: int - value: 30 - neutron_admin_tenant_name: - schema: str - value: 'services' - neutron_default_tenant_id: - schema: str - value: 'default' - neutron_region_name: - schema: str - value: 'RegionOne' - neutron_admin_username: - schema: str - value: 'neutron' - neutron_ovs_bridge: - schema: str - value: 'br-int' - neutron_extension_sync_interval: - schema: int - value: 600 - neutron_ca_certificates_file: - schema: str - value: - network_api_class: - schema: str - value: 'nova.network.neutronv2.api.API' - security_group_api: - schema: str - value: 'neutron' - firewall_driver: - schema: str - value: 'nova.virt.firewall.NoopFirewallDriver' - vif_plugging_is_fatal: - schema: bool - value: true - vif_plugging_timeout: - schema: int - value: 300 - dhcp_domain: - schema: str - value: 'novalocal' - - git: - schema: {repository: str!, branch: str!} - value: {repository: 'https://github.com/openstack/puppet-nova', branch: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/nova_neutron_service, resources/nova_neutron, resources/nova_compute, resources/nova] diff --git a/resources/nova_puppet/1.0.0/README.md b/resources/nova_puppet/1.0.0/README.md deleted file mode 100644 index ad41226a..00000000 --- a/resources/nova_puppet/1.0.0/README.md +++ /dev/null @@ -1,275 +0,0 @@ -# Nova resource for puppet handler - -Controls a live cycle of the nova entities, -like the main puppet class, auth, DB, AMQP, packages, -keystone user, role and endpoint. - -# Parameters - -source https://github.com/openstack/puppet-nova/blob/5.1.0/manifests/init.pp - - ``ensure_package`` - (optional) The state of nova packages - Defaults to 'present' - - ``nova_cluster_id`` - (optional) Deprecated. This parameter does nothing and will be removed. - Defaults to 'localcluster' - - ``sql_connection`` - (optional) Deprecated. Use database_connection instead. - Defaults to false - - ``sql_idle_timeout`` - (optional) Deprecated. Use database_idle_timeout instead - Defaults to false - - ``database_connection`` - (optional) Connection url to connect to nova database. - Defaults to false - - ``slave_connection`` - (optional) Connection url to connect to nova slave database (read-only). - Defaults to false - - ``database_idle_timeout`` - (optional) Timeout before idle db connections are reaped. - Defaults to 3600 - - ``rpc_backend`` - (optional) The rpc backend implementation to use, can be: - rabbit (for rabbitmq) - qpid (for qpid) - zmq (for zeromq) - Defaults to 'rabbit' - - ``image_service`` - (optional) Service used to search for and retrieve images. - Defaults to 'nova.image.local.LocalImageService' - - ``glance_api_servers`` - (optional) List of addresses for api servers. - Defaults to 'localhost:9292' - - ``memcached_servers`` - (optional) Use memcached instead of in-process cache. Supply a list of memcached server IP's:Memcached Port. - Defaults to false - - ``rabbit_host`` - (optional) Location of rabbitmq installation. - Defaults to 'localhost' - - ``rabbit_hosts`` - (optional) List of clustered rabbit servers. - Defaults to false - - ``rabbit_port`` - (optional) Port for rabbitmq instance. - Defaults to '5672' - - ``rabbit_password`` - (optional) Password used to connect to rabbitmq. - Defaults to 'guest' - - ``rabbit_userid`` - (optional) User used to connect to rabbitmq. - Defaults to 'guest' - - ``rabbit_virtual_host`` - (optional) The RabbitMQ virtual host. - Defaults to '/' - - ``rabbit_use_ssl`` - (optional) Connect over SSL for RabbitMQ - Defaults to false - - ``kombu_ssl_ca_certs`` - (optional) SSL certification authority file (valid only if SSL enabled). - Defaults to undef - - ``kombu_ssl_certfile`` - (optional) SSL cert file (valid only if SSL enabled). - Defaults to undef - - ``kombu_ssl_keyfile`` - (optional) SSL key file (valid only if SSL enabled). - Defaults to undef - - ``kombu_ssl_version`` - (optional) SSL version to use (valid only if SSL enabled). - Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be - available on some distributions. - Defaults to 'TLSv1' - - ``amqp_durable_queues`` - (optional) Define queues as "durable" to rabbitmq. - Defaults to false - - ``qpid_hostname`` - (optional) Location of qpid server - Defaults to 'localhost' - - ``qpid_port`` - (optional) Port for qpid server - Defaults to '5672' - - ``qpid_username`` - (optional) Username to use when connecting to qpid - Defaults to 'guest' - - ``qpid_password`` - (optional) Password to use when connecting to qpid - Defaults to 'guest' - - ``qpid_heartbeat`` - (optional) Seconds between connection keepalive heartbeats - Defaults to 60 - - ``qpid_protocol`` - (optional) Transport to use, either 'tcp' or 'ssl'' - Defaults to 'tcp' - - ``qpid_sasl_mechanisms`` - (optional) Enable one or more SASL mechanisms - Defaults to false - - ``qpid_tcp_nodelay`` - (optional) Disable Nagle algorithm - Defaults to true - - ``service_down_time`` - (optional) Maximum time since last check-in for up service. - Defaults to 60 - - ``logdir`` - (optional) Deprecated. Use log_dir instead. - Defaults to false - - ``log_dir`` - (optional) Directory where logs should be stored. - If set to boolean false, it will not log to any directory. - Defaults to '/var/log/nova' - - ``state_path`` - (optional) Directory for storing state. - Defaults to '/var/lib/nova' - - ``lock_path`` - (optional) Directory for lock files. - On RHEL will be '/var/lib/nova/tmp' and on Debian '/var/lock/nova' - Defaults to $::nova::params::lock_path - - ``verbose`` - (optional) Set log output to verbose output. - Defaults to false - - ``periodic_interval`` - (optional) Seconds between running periodic tasks. - Defaults to '60' - - ``report_interval`` - (optional) Interval at which nodes report to data store. - Defaults to '10' - - ``monitoring_notifications`` - (optional) Whether or not to send system usage data notifications out on the message queue. Only valid for stable/essex. - Defaults to false - - ``use_syslog`` - (optional) Use syslog for logging - Defaults to false - - ``log_facility`` - (optional) Syslog facility to receive log lines. - Defaults to 'LOG_USER' - - ``use_ssl`` - (optional) Enable SSL on the API server - Defaults to false, not set - - ``enabled_ssl_apis`` - (optional) List of APIs to SSL enable - Defaults to [] - Possible values : 'ec2', 'osapi_compute', 'metadata' - - ``cert_file`` - (optinal) Certificate file to use when starting API server securely - Defaults to false, not set - - ``key_file`` - (optional) Private key file to use when starting API server securely - Defaults to false, not set - - ``ca_file`` - (optional) CA certificate file to use to verify connecting clients - Defaults to false, not set_ - - ``nova_user_id`` - (optional) Create the nova user with the specified gid. - Changing to a new uid after specifying a different uid previously, - or using this option after the nova account already exists will break - the ownership of all files/dirs owned by nova. It is strongly encouraged - not to use this option and instead create user before nova class or - for network shares create netgroup into which you'll put nova on all the - nodes. If undef no user will be created and user creation will standardly - happen in nova-common package. - Defaults to undef. - - ``nova_group_id`` - (optional) Create the nova user with the specified gid. - Changing to a new uid after specifying a different uid previously, - or using this option after the nova account already exists will break - the ownership of all files/dirs owned by nova. It is strongly encouraged - not to use this option and instead create group before nova class or for - network shares create netgroup into which you'll put nova on all the - nodes. If undef no user or group will be created and creation will - happen in nova-common package. - Defaults to undef. - - ``nova_public_key`` - (optional) Install public key in .ssh/authorized_keys for the 'nova' user. - Expects a hash of the form { type => 'key-type', key => 'key-data' } where - 'key-type' is one of (ssh-rsa, ssh-dsa, ssh-ecdsa) and 'key-data' is the - actual key data (e.g, 'AAAA...'). - - ``nova_private_key`` - (optional) Install private key into .ssh/id_rsa (or appropriate equivalent - for key type). Expects a hash of the form { type => 'key-type', key => - 'key-data' }, where 'key-type' is one of (ssh-rsa, ssh-dsa, ssh-ecdsa) and - 'key-data' is the contents of the private key file. - - ``nova_shell`` - (optional) Set shell for 'nova' user to the specified value. - Defaults to '/bin/false'. - - ``mysql_module`` - (optional) Deprecated. Does nothing. - - ``notification_driver`` - (optional) Driver or drivers to handle sending notifications. - Value can be a string or a list. - Defaults to [] - - ``notification_topics`` - (optional) AMQP topic used for OpenStack notifications - Defaults to 'notifications' - - ``notify_api_faults`` - (optional) If set, send api.fault notifications on caught - exceptions in the API service - Defaults to false - - ``notify_on_state_change`` - (optional) If set, send compute.instance.update notifications - on instance state changes. Valid values are None for no notifications, - "vm_state" for notifications on VM state changes, or "vm_and_task_state" - for notifications on VM and task state changes. - Defaults to undef - - ``os_region_name`` - (optional) Sets the os_region_name flag. For environments with - more than one endpoint per service, this is required to make - things such as cinder volume attach work. If you don't set this - and you have multiple endpoints, you will get AmbiguousEndpoint - exceptions in the nova API service. - Defaults to undef \ No newline at end of file diff --git a/resources/nova_puppet/1.0.0/actions/remove.pp b/resources/nova_puppet/1.0.0/actions/remove.pp deleted file mode 100644 index 764ff454..00000000 --- a/resources/nova_puppet/1.0.0/actions/remove.pp +++ /dev/null @@ -1,4 +0,0 @@ -class { 'nova': - ensure_package => 'absent', - rabbit_password => 'not important as removed', -} diff --git a/resources/nova_puppet/1.0.0/actions/run.pp b/resources/nova_puppet/1.0.0/actions/run.pp deleted file mode 100644 index 34003538..00000000 --- a/resources/nova_puppet/1.0.0/actions/run.pp +++ /dev/null @@ -1,138 +0,0 @@ -$resource = hiera($::resource_name) - -$db_user = $resource['input']['db_user'] -$db_password = $resource['input']['db_password'] -$db_name = $resource['input']['db_name'] -$db_host = $resource['input']['db_host'] -$db_port = $resource['input']['db_port'] -$glance_api_servers_host = $resource['input']['glance_api_servers_host'] -$glance_api_servers_port = $resource['input']['glance_api_servers_port'] - -$ensure_package = $resource['input']['ensure_package'] -$database_connection = $resource['input']['database_connection'] -$slave_connection = $resource['input']['slave_connection'] -$database_idle_timeout = $resource['input']['database_idle_timeout'] -$rpc_backend = $resource['input']['rpc_backend'] -$image_service = $resource['input']['image_service'] -$glance_api_servers = $resource['input']['glance_api_servers'] -$memcached_servers = $resource['input']['memcached_servers'] -$rabbit_host = $resource['input']['rabbit_host'] -$rabbit_hosts = $resource['input']['rabbit_hosts'] -$rabbit_password = $resource['input']['rabbit_password'] -$rabbit_port = $resource['input']['rabbit_port'] -$rabbit_userid = $resource['input']['rabbit_userid'] -$rabbit_virtual_host = $resource['input']['rabbit_virtual_host'] -$rabbit_use_ssl = $resource['input']['rabbit_use_ssl'] -$rabbit_ha_queues = $resource['input']['rabbit_ha_queues'] -$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs'] -$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile'] -$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile'] -$kombu_ssl_version = $resource['input']['kombu_ssl_version'] -$amqp_durable_queues = $resource['input']['amqp_durable_queues'] -$qpid_hostname = $resource['input']['qpid_hostname'] -$qpid_port = $resource['input']['qpid_port'] -$qpid_username = $resource['input']['qpid_username'] -$qpid_password = $resource['input']['qpid_password'] -$qpid_sasl_mechanisms = $resource['input']['qpid_sasl_mechanisms'] -$qpid_heartbeat = $resource['input']['qpid_heartbeat'] -$qpid_protocol = $resource['input']['qpid_protocol'] -$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay'] -$auth_strategy = $resource['input']['auth_strategy'] -$service_down_time = $resource['input']['service_down_time'] -$log_dir = $resource['input']['log_dir'] -$state_path = $resource['input']['state_path'] -$lock_path = $resource['input']['lock_path'] -$verbose = $resource['input']['verbose'] -$debug = $resource['input']['debug'] -$periodic_interval = $resource['input']['periodic_interval'] -$report_interval = $resource['input']['report_interval'] -$rootwrap_config = $resource['input']['rootwrap_config'] -$use_ssl = $resource['input']['use_ssl'] -$enabled_ssl_apis = $resource['input']['enabled_ssl_apis'] -$ca_file = $resource['input']['ca_file'] -$cert_file = $resource['input']['cert_file'] -$key_file = $resource['input']['key_file'] -$nova_user_id = $resource['input']['nova_user_id'] -$nova_group_id = $resource['input']['nova_group_id'] -$nova_public_key = $resource['input']['nova_public_key'] -$nova_private_key = $resource['input']['nova_private_key'] -$nova_shell = $resource['input']['nova_shell'] -$monitoring_notifications = $resource['input']['monitoring_notifications'] -$use_syslog = $resource['input']['use_syslog'] -$log_facility = $resource['input']['log_facility'] -$install_utilities = $resource['input']['install_utilities'] -$notification_driver = $resource['input']['notification_driver'] -$notification_topics = $resource['input']['notification_topics'] -$notify_api_faults = $resource['input']['notify_api_faults'] -$notify_on_state_change = $resource['input']['notify_on_state_change'] -$mysql_module = $resource['input']['mysql_module'] -$nova_cluster_id = $resource['input']['nova_cluster_id'] -$sql_connection = $resource['input']['sql_connection'] -$sql_idle_timeout = $resource['input']['sql_idle_timeout'] -$logdir = $resource['input']['logdir'] -$os_region_name = $resource['input']['os_region_name'] - -class { 'nova': - database_connection => "mysql://${db_user}:${db_password}@${db_host}:${db_port}/${db_name}?charset=utf8", - ensure_package => $ensure_package, - slave_connection => $slave_connection, - database_idle_timeout => $database_idle_timeout, - rpc_backend => $rpc_backend, - image_service => $image_service, - glance_api_servers => "${glance_api_servers_host}:${glance_api_servers_port}", - memcached_servers => $memcached_servers, - rabbit_host => $rabbit_host, - rabbit_hosts => $rabbit_hosts, - rabbit_password => $rabbit_password, - rabbit_port => $rabbit_port, - rabbit_userid => $rabbit_userid, - rabbit_virtual_host => $rabbit_virtual_host, - rabbit_use_ssl => $rabbit_use_ssl, - rabbit_ha_queues => $rabbit_ha_queues, - kombu_ssl_ca_certs => $kombu_ssl_ca_certs, - kombu_ssl_certfile => $kombu_ssl_certfile, - kombu_ssl_keyfile => $kombu_ssl_keyfile, - kombu_ssl_version => $kombu_ssl_version, - amqp_durable_queues => $amqp_durable_queues, - qpid_hostname => $qpid_hostname, - qpid_port => $qpid_port, - qpid_username => $qpid_username, - qpid_password => $qpid_password, - qpid_sasl_mechanisms => $qpid_sasl_mechanisms, - qpid_heartbeat => $qpid_heartbeat, - qpid_protocol => $qpid_protocol, - qpid_tcp_nodelay => $qpid_tcp_nodelay, - auth_strategy => $auth_strategy, - service_down_time => $service_down_time, - log_dir => $log_dir, - state_path => $state_path, - lock_path => $lock_path, - verbose => $verbose, - debug => $debug, - periodic_interval => $periodic_interval, - report_interval => $report_interval, - rootwrap_config => $rootwrap_config, - use_ssl => $use_ssl, - enabled_ssl_apis => $enabled_ssl_apis, - ca_file => $ca_file, - cert_file => $cert_file, - key_file => $key_file, - nova_user_id => $nova_user_id, - nova_group_id => $nova_group_id, - nova_public_key => $nova_public_key, - nova_private_key => $nova_private_key, - nova_shell => $nova_shell, - monitoring_notifications => $monitoring_notifications, - use_syslog => $use_syslog, - log_facility => $log_facility, - install_utilities => $install_utilities, - notification_driver => $notification_driver, - notification_topics => $notification_topics, - notify_api_faults => $notify_api_faults, - notify_on_state_change => $notify_on_state_change, - mysql_module => $mysql_module, - nova_cluster_id => $nova_cluster_id, - sql_idle_timeout => $sql_idle_timeout, - logdir => $logdir, - os_region_name => $os_region_name, -} diff --git a/resources/nova_puppet/1.0.0/meta.yaml b/resources/nova_puppet/1.0.0/meta.yaml deleted file mode 100644 index 0f16e898..00000000 --- a/resources/nova_puppet/1.0.0/meta.yaml +++ /dev/null @@ -1,254 +0,0 @@ -handler: puppet -version: 1.0.0 -actions: - run: run.pp - update: run.pp -input: - ensure_package: - schema: str - value: 'present' - database_connection: - schema: str - value: - slave_connection: - schema: str - value: - database_idle_timeout: - schema: int - value: 3600 - rpc_backend: - schema: str - value: 'rabbit' - image_service: - schema: str - value: 'nova.image.glance.GlanceImageService' - glance_api_servers: - schema: str - value: 'localhost:9292' - memcached_servers: - schema: str - value: - rabbit_host: - schema: str - value: 'localhost' - rabbit_hosts: - schema: str - value: - rabbit_password: - schema: str! - value: 'guest' - rabbit_port: - schema: int - value: 5672 - rabbit_userid: - schema: str - value: 'guest' - rabbit_virtual_host: - schema: str - value: '/' - rabbit_use_ssl: - schema: bool - value: false - rabbit_ha_queues: - schema: str - value: - kombu_ssl_ca_certs: - schema: str - value: - kombu_ssl_certfile: - schema: str - value: - kombu_ssl_keyfile: - schema: str - value: - kombu_ssl_version: - schema: str - value: 'TLSv1' - amqp_durable_queues: - schema: bool - value: false - qpid_hostname: - schema: str - value: 'localhost' - qpid_port: - schema: int - value: 5672 - qpid_username: - schema: str - value: 'guest' - qpid_password: - schema: str! - value: 'guest' - qpid_sasl_mechanisms: - schema: bool - value: false - qpid_heartbeat: - schema: int - value: 60 - qpid_protocol: - schema: str - value: 'tcp' - qpid_tcp_nodelay: - schema: bool - value: true - auth_strategy: - schema: str - value: 'keystone' - service_down_time: - schema: int - value: 60 - log_dir: - schema: str - value: '/var/log/nova' - state_path: - schema: str - value: '/var/lib/nova' - lock_path: - schema: str - value: '/var/lock/nova' - verbose: - schema: bool - value: false - debug: - schema: bool - value: false - periodic_interval: - schema: int - value: 60 - report_interval: - schema: int - value: 10 - rootwrap_config: - schema: str - value: '/etc/nova/rootwrap.conf' - use_ssl: - schema: bool - value: false - enabled_ssl_apis: - schema: [str] - value: ['ec2', 'metadata', 'osapi_compute'] - ca_file: - schema: str - value: - cert_file: - schema: str - value: - key_file: - schema: str - value: - nova_user_id: - schema: str - value: - nova_group_id: - schema: str - value: - nova_public_key: - schema: str - value: - nova_private_key: - schema: str - value: - nova_shell: - schema: str - value: '/bin/false' - monitoring_notifications: - schema: bool - value: false - use_syslog: - schema: bool - value: false - log_facility: - schema: str - value: 'LOG_USER' - install_utilities: - schema: bool - value: true - notification_driver: - schema: [str] - value: [] - notification_topics: - schema: str - value: 'notifications' - notify_api_faults: - schema: bool - value: false - notify_on_state_change: - schema: str - value: - mysql_module: - schema: str - value: - nova_cluster_id: - schema: str - value: - sql_connection: - schema: str - value: - sql_idle_timeout: - schema: str - value: - logdir: - schema: str - value: - os_region_name: - schema: str - value: - - db_user: - schema: str! - value: nova - db_password: - schema: str! - value: - db_name: - schema: str! - value: - db_host: - schema: str! - value: - db_port: - schema: int! - value: - - keystone_password: - schema: str! - value: - keystone_port: - schema: int! - value: - keystone_host: - schema: str! - value: - keystone_tenant: - schema: str! - value: - keystone_user: - schema: str! - value: - - glance_api_servers_port: - schema: int - value: 9292 - glance_api_servers_host: - schema: 'str' - value: 'localhost' - - port: - schema: int! - value: 8774 - - module: - schema: {repository: str!, branch: str!} - value: {name: 'nova', type: 'git', url: 'https://github.com/openstack/puppet-nova', ref: '5.1.0'} - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/nova_service, resources/nova] diff --git a/resources/openrc_file/1.0.0/actions/remove.yaml b/resources/openrc_file/1.0.0/actions/remove.yaml deleted file mode 100644 index de66259b..00000000 --- a/resources/openrc_file/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,5 +0,0 @@ -- hosts: [ {{ host }} ] - sudo: yes - tasks: - - file: path=/root/openrc state=absent - diff --git a/resources/openrc_file/1.0.0/actions/run.yaml b/resources/openrc_file/1.0.0/actions/run.yaml deleted file mode 100644 index 3a1b4959..00000000 --- a/resources/openrc_file/1.0.0/actions/run.yaml +++ /dev/null @@ -1,10 +0,0 @@ -- hosts: [{{ host }}] - sudo: yes - vars: - tenant: {{tenant}} - user_name: {{user_name}} - password: {{password}} - keystone_host: {{keystone_host}} - keystone_port: {{keystone_port}} - tasks: - - template: src={{templates_dir}}/openrc.template dest=/root/openrc diff --git a/resources/openrc_file/1.0.0/meta.yaml b/resources/openrc_file/1.0.0/meta.yaml deleted file mode 100644 index fdcde8cc..00000000 --- a/resources/openrc_file/1.0.0/meta.yaml +++ /dev/null @@ -1,28 +0,0 @@ -handler: ansible -version: 1.0.0 - -input: - keystone_host: - schema: str! - value: - keystone_port: - schema: int! - value: - tenant: - schema: str! - value: - user_name: - schema: str! - value: - password: - schema: str! - value: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: diff --git a/resources/openrc_file/1.0.0/templates/openrc.template b/resources/openrc_file/1.0.0/templates/openrc.template deleted file mode 100644 index f13924e7..00000000 --- a/resources/openrc_file/1.0.0/templates/openrc.template +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh -export LC_ALL=C -export OS_NO_CACHE="true" -export OS_TENANT_NAME={{tenant}} -export OS_USERNAME={{user_name}} -export OS_PASSWORD={{password}} -export OS_AUTH_URL=http://{{keystone_host}}:{{keystone_port}}/v2.0 -export OS_AUTH_STRATEGY=keystone -export OS_REGION_NAME='RegionOne' -export OS_VOLUME_API_VERSION='2' \ No newline at end of file diff --git a/resources/rabbitmq_config/1.0.0/actions/remove.yaml b/resources/rabbitmq_config/1.0.0/actions/remove.yaml deleted file mode 100644 index 48094d21..00000000 --- a/resources/rabbitmq_config/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,4 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - file: path={{config_dir}} state=absent diff --git a/resources/rabbitmq_config/1.0.0/actions/run.yaml b/resources/rabbitmq_config/1.0.0/actions/run.yaml deleted file mode 100644 index 6976a441..00000000 --- a/resources/rabbitmq_config/1.0.0/actions/run.yaml +++ /dev/null @@ -1,8 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - vars: - admin_user: {{admin_user}} - admin_password: {{admin_password}} - tasks: - - file: path={{config_dir}} state=directory - - template: src={{templates_dir}}/rabbitmq.conf dest={{config_dir}}/rabbitmq.conf diff --git a/resources/rabbitmq_config/1.0.0/meta.yaml b/resources/rabbitmq_config/1.0.0/meta.yaml deleted file mode 100644 index 6e2153af..00000000 --- a/resources/rabbitmq_config/1.0.0/meta.yaml +++ /dev/null @@ -1,21 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - config_dir: - schema: str! - value: - admin_name: - schema: str! - value: - admin_password: - schema: str! - value: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: diff --git a/resources/rabbitmq_config/1.0.0/templates/rabbitmq.conf b/resources/rabbitmq_config/1.0.0/templates/rabbitmq.conf deleted file mode 100644 index 85db812a..00000000 --- a/resources/rabbitmq_config/1.0.0/templates/rabbitmq.conf +++ /dev/null @@ -1,30 +0,0 @@ -[ - {rabbit, [ - {cluster_partition_handling, autoheal}, - {default_permissions, [<<".*">>, <<".*">>, <<".*">>]}, - {default_vhost, <<"/">>}, - {log_levels, [connection,info,error]}, - {tcp_listen_options, [ - binary, - {packet, raw}, - {reuseaddr, true}, - {backlog, 128}, - {nodelay, true}, - {exit_on_close, false}, - {keepalive, true} - ]}, - {default_user, <<"{{default_user}}">>}, - {default_pass, <<"{{default_password}}">>} - ]}, - {kernel, [ - {inet_default_connect_options, [{nodelay,true}]}, - {inet_dist_listen_max, 41055}, - {inet_dist_listen_min, 41055} - ]} -, - {rabbitmq_management, [ - {listener, [ - {port, 15672} - ]} - ]} -]. diff --git a/resources/rabbitmq_service/1.0.0/actions/remove.pp b/resources/rabbitmq_service/1.0.0/actions/remove.pp deleted file mode 100644 index f1121da5..00000000 --- a/resources/rabbitmq_service/1.0.0/actions/remove.pp +++ /dev/null @@ -1,9 +0,0 @@ -$resource = hiera($::resource_name) - -class { '::rabbitmq': - package_ensure => 'absent', - environment_variables => { - 'RABBITMQ_SERVICENAME' => 'RabbitMQ' - } -} - diff --git a/resources/rabbitmq_service/1.0.0/actions/run.pp b/resources/rabbitmq_service/1.0.0/actions/run.pp deleted file mode 100644 index 02ed9cff..00000000 --- a/resources/rabbitmq_service/1.0.0/actions/run.pp +++ /dev/null @@ -1,11 +0,0 @@ -$resource = hiera($::resource_name) - -$port = "${resource['input']['port']}" -$management_port = "${resource['input']['management_port']}" - -class { '::rabbitmq': - service_manage => true, - port => $port, - management_port => $management_port, - delete_guest_user => true, -} diff --git a/resources/rabbitmq_service/1.0.0/meta.yaml b/resources/rabbitmq_service/1.0.0/meta.yaml deleted file mode 100644 index 39108b6d..00000000 --- a/resources/rabbitmq_service/1.0.0/meta.yaml +++ /dev/null @@ -1,24 +0,0 @@ -handler: puppet -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - - port: - schema: int! - value: 5672 - management_port: - schema: int! - value: 15672 - module: - schema: {name: str!, type: str, url: str, ref: str} - value: {name: 'rabbitmq', type: 'git', url: 'https://github.com/puppetlabs/puppetlabs-rabbitmq.git', ref: '5.1.0'} - -tags: [] -version: 1.0.0 diff --git a/resources/rabbitmq_user/1.0.0/actions/remove.yaml b/resources/rabbitmq_user/1.0.0/actions/remove.yaml deleted file mode 100644 index f1f5fc21..00000000 --- a/resources/rabbitmq_user/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - rabbitmq_user: user={{user_name}} - vhost={{vhost_name}} - state=absent diff --git a/resources/rabbitmq_user/1.0.0/actions/run.yaml b/resources/rabbitmq_user/1.0.0/actions/run.yaml deleted file mode 100644 index 5b84d7ea..00000000 --- a/resources/rabbitmq_user/1.0.0/actions/run.yaml +++ /dev/null @@ -1,11 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - rabbitmq_user: user={{user_name}} - password={{password}} - vhost={{vhost_name}} - configure_priv=.* - read_priv=.* - write_priv=.* - tags={{tags}} - state=present diff --git a/resources/rabbitmq_user/1.0.0/meta.yaml b/resources/rabbitmq_user/1.0.0/meta.yaml deleted file mode 100644 index 94536690..00000000 --- a/resources/rabbitmq_user/1.0.0/meta.yaml +++ /dev/null @@ -1,26 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - user_name: - schema: str! - value: openstack - password: - schema: str! - value: openstack_password - vhost_name: - schema: str! - value: - tags: - schema: str - value: 'management' - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resources/rabbitmq, resource/rabbitmq_user] diff --git a/resources/rabbitmq_vhost/1.0.0/actions/remove.yaml b/resources/rabbitmq_vhost/1.0.0/actions/remove.yaml deleted file mode 100644 index b9f51659..00000000 --- a/resources/rabbitmq_vhost/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,5 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - rabbitmq_vhost: name={{vhost_name}} - state=absent diff --git a/resources/rabbitmq_vhost/1.0.0/actions/run.yaml b/resources/rabbitmq_vhost/1.0.0/actions/run.yaml deleted file mode 100644 index f2a1be54..00000000 --- a/resources/rabbitmq_vhost/1.0.0/actions/run.yaml +++ /dev/null @@ -1,5 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - rabbitmq_vhost: name={{vhost_name}} - state=present diff --git a/resources/rabbitmq_vhost/1.0.0/meta.yaml b/resources/rabbitmq_vhost/1.0.0/meta.yaml deleted file mode 100644 index a9ead414..00000000 --- a/resources/rabbitmq_vhost/1.0.0/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - vhost_name: - schema: str! - value: openstack - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resources/rabbitmq, resource/rabbitmq_vhost] diff --git a/resources/remote_file/1.0.0/actions/run.sh b/resources/remote_file/1.0.0/actions/run.sh deleted file mode 100644 index 212bdfb4..00000000 --- a/resources/remote_file/1.0.0/actions/run.sh +++ /dev/null @@ -1,10 +0,0 @@ -mkdir -p {{dest}} - -{% for transport in remote %} - {% if transport.name == 'ssh' %} -scp -i {{transport.key}} -r {{transport.user}}@{{remote_ip}}:/{{remote_path}} {{dest}} -exit 0 - {% endif %} -{% endfor %} -echo 'No suitable transport.' -exit 2 diff --git a/resources/remote_file/1.0.0/meta.yaml b/resources/remote_file/1.0.0/meta.yaml deleted file mode 100644 index 904debeb..00000000 --- a/resources/remote_file/1.0.0/meta.yaml +++ /dev/null @@ -1,19 +0,0 @@ -handler: shell -version: 1.0.0 -input: - ip: - schema: str! - value: - remote: - schema: {} - value: - remote_ip: - schema: str! - value: - remote_path: - schema: str! - value: - dest: - schema: str! - value: -tags: [] diff --git a/resources/riak_join_single/1.0.0/actions/join.yaml b/resources/riak_join_single/1.0.0/actions/join.yaml deleted file mode 100644 index c705d3fd..00000000 --- a/resources/riak_join_single/1.0.0/actions/join.yaml +++ /dev/null @@ -1,4 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - shell: riak-admin cluster join {{join_to}} diff --git a/resources/riak_join_single/1.0.0/meta.yaml b/resources/riak_join_single/1.0.0/meta.yaml deleted file mode 100644 index 46d1761a..00000000 --- a/resources/riak_join_single/1.0.0/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -handler: ansible -version: 1.0.0 -actions: - join: actions/join.yaml -input: - join_to: - schema: str! - value: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: diff --git a/resources/riak_node/1.0.0/actions/commit.yaml b/resources/riak_node/1.0.0/actions/commit.yaml deleted file mode 100644 index 52bc4712..00000000 --- a/resources/riak_node/1.0.0/actions/commit.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - # - shell: sleep 30 - - shell: riak-admin cluster plan - - shell: riak-admin cluster commit diff --git a/resources/riak_node/1.0.0/actions/join.yaml b/resources/riak_node/1.0.0/actions/join.yaml deleted file mode 100644 index 01d202f7..00000000 --- a/resources/riak_node/1.0.0/actions/join.yaml +++ /dev/null @@ -1,15 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - shell: riak-admin cluster join {{join_to}} - ignore_errors: true - register: join_output - # those below are hacky solution for "this node is already member of a cluster - # solar for now lacks logic that would allow to avoid it - - shell: /bin/true - when: join_output|failed and join_output.stdout.find("This node is already a member of a cluster") != -1 - - shell: /bin/false - when: join_output|failed and join_output.stdout.find("This node is already a member of a cluster") == -1 - - shell: /bin/true - when: join_output|success - diff --git a/resources/riak_node/1.0.0/actions/remove.yaml b/resources/riak_node/1.0.0/actions/remove.yaml deleted file mode 100644 index 9af84785..00000000 --- a/resources/riak_node/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - apt: - name: riak - state: absent diff --git a/resources/riak_node/1.0.0/actions/run.yaml b/resources/riak_node/1.0.0/actions/run.yaml deleted file mode 100644 index cb553d47..00000000 --- a/resources/riak_node/1.0.0/actions/run.yaml +++ /dev/null @@ -1,28 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - # those below are mostly for tests - - shell: killall -u riak - ignore_errors: yes - # remove above when non tests - - # we install ubuntu repo there, - # NOT recommended on production - - shell: curl -s https://packagecloud.io/install/repositories/basho/riak/script.deb.sh | sudo bash - - - apt: - name: riak - state: present - - service: - name: riak - state: stopped - - file: path=/etc/riak/riak.conf state=touch - - template: - src: {{templates_dir}}/riak.conf - dest: /etc/riak/riak.conf - - shell: rm -fr /var/lib/riak/kv_vnode/* - - shell: rm -fr /var/lib/riak/ring/* - - - service: - name: riak - state: reloaded diff --git a/resources/riak_node/1.0.0/actions/update.yaml b/resources/riak_node/1.0.0/actions/update.yaml deleted file mode 100644 index a4a5af70..00000000 --- a/resources/riak_node/1.0.0/actions/update.yaml +++ /dev/null @@ -1,12 +0,0 @@ -- hosts: [{{host}}] - sudo: yes - tasks: - - service: - name: riak - state: stopped - - template: - src: {{templates_dir}}/riak.conf - dest: /etc/riak/riak.conf - - service: - name: riak - state: reloaded diff --git a/resources/riak_node/1.0.0/meta.yaml b/resources/riak_node/1.0.0/meta.yaml deleted file mode 100644 index bf698ce5..00000000 --- a/resources/riak_node/1.0.0/meta.yaml +++ /dev/null @@ -1,38 +0,0 @@ -handler: ansible -version: 1.0.0 -actions: - commit: commit.yaml - run: run.yaml - join: join.yaml -input: - ip: - schema: str! - value: - riak_self_name: - schema: str! - value: - riak_hostname: - schema: str! - value: - riak_name: - schema: str! - value: null - computable: - lang: jinja2 - type: full - func: "{{riak_self_name}}@{{riak_hostname}}" - riak_port_http: - schema: int! - value: 18098 - riak_port_pb: - schema: int! - value: 18087 - riak_port_solr: - schema: int! - value: 8985 - join_to: - schema: str - value: - storage_backend: - schema: str! - value: bitcask diff --git a/resources/riak_node/1.0.0/templates/riak.conf.jinja b/resources/riak_node/1.0.0/templates/riak.conf.jinja deleted file mode 100644 index a9f5d977..00000000 --- a/resources/riak_node/1.0.0/templates/riak.conf.jinja +++ /dev/null @@ -1,494 +0,0 @@ -## Where to emit the default log messages (typically at 'info' -## severity): -## off: disabled -## file: the file specified by log.console.file -## console: to standard output (seen when using `riak attach-direct`) -## both: log.console.file and standard out. -## -## Default: file -## -## Acceptable values: -## - one of: off, file, console, both -log.console = file - -## The severity level of the console log, default is 'info'. -## -## Default: info -## -## Acceptable values: -## - one of: debug, info, notice, warning, error, critical, alert, emergency, none -log.console.level = info - -## When 'log.console' is set to 'file' or 'both', the file where -## console messages will be logged. -## -## Default: $(platform_log_dir)/console.log -## -## Acceptable values: -## - the path to a file -log.console.file = $(platform_log_dir)/console.log - -## The file where error messages will be logged. -## -## Default: $(platform_log_dir)/error.log -## -## Acceptable values: -## - the path to a file -log.error.file = $(platform_log_dir)/error.log - -## When set to 'on', enables log output to syslog. -## -## Default: off -## -## Acceptable values: -## - on or off -log.syslog = off - -## Whether to enable the crash log. -## -## Default: on -## -## Acceptable values: -## - on or off -log.crash = on - -## If the crash log is enabled, the file where its messages will -## be written. -## -## Default: $(platform_log_dir)/crash.log -## -## Acceptable values: -## - the path to a file -log.crash.file = $(platform_log_dir)/crash.log - -## Maximum size in bytes of individual messages in the crash log -## -## Default: 64KB -## -## Acceptable values: -## - a byte size with units, e.g. 10GB -log.crash.maximum_message_size = 64KB - -## Maximum size of the crash log in bytes, before it is rotated -## -## Default: 10MB -## -## Acceptable values: -## - a byte size with units, e.g. 10GB -log.crash.size = 10MB - -## The schedule on which to rotate the crash log. For more -## information see: -## https://github.com/basho/lager/blob/master/README.md#internal-log-rotation -## -## Default: $D0 -## -## Acceptable values: -## - text -log.crash.rotation = $D0 - -## The number of rotated crash logs to keep. When set to -## 'current', only the current open log file is kept. -## -## Default: 5 -## -## Acceptable values: -## - an integer -## - the text "current" -log.crash.rotation.keep = 5 - -## Name of the Erlang node -## -## Default: riak@127.0.0.1 -## -## Acceptable values: -## - text -nodename = {{riak_name}} - -## Cookie for distributed node communication. All nodes in the -## same cluster should use the same cookie or they will not be able to -## communicate. -## -## Default: riak -## -## Acceptable values: -## - text -distributed_cookie = riak - -## Sets the number of threads in async thread pool, valid range -## is 0-1024. If thread support is available, the default is 64. -## More information at: http://erlang.org/doc/man/erl.html -## -## Default: 64 -## -## Acceptable values: -## - an integer -erlang.async_threads = 64 - -## The number of concurrent ports/sockets -## Valid range is 1024-134217727 -## -## Default: 65536 -## -## Acceptable values: -## - an integer -erlang.max_ports = 65536 - -## Set scheduler forced wakeup interval. All run queues will be -## scanned each Interval milliseconds. While there are sleeping -## schedulers in the system, one scheduler will be woken for each -## non-empty run queue found. An Interval of zero disables this -## feature, which also is the default. -## This feature is a workaround for lengthy executing native code, and -## native code that do not bump reductions properly. -## More information: http://www.erlang.org/doc/man/erl.html#+sfwi -## -## Default: 500 -## -## Acceptable values: -## - an integer -## erlang.schedulers.force_wakeup_interval = 500 - -## Enable or disable scheduler compaction of load. By default -## scheduler compaction of load is enabled. When enabled, load -## balancing will strive for a load distribution which causes as many -## scheduler threads as possible to be fully loaded (i.e., not run out -## of work). This is accomplished by migrating load (e.g. runnable -## processes) into a smaller set of schedulers when schedulers -## frequently run out of work. When disabled, the frequency with which -## schedulers run out of work will not be taken into account by the -## load balancing logic. -## More information: http://www.erlang.org/doc/man/erl.html#+scl -## -## Default: false -## -## Acceptable values: -## - one of: true, false -## erlang.schedulers.compaction_of_load = false - -## Enable or disable scheduler utilization balancing of load. By -## default scheduler utilization balancing is disabled and instead -## scheduler compaction of load is enabled which will strive for a -## load distribution which causes as many scheduler threads as -## possible to be fully loaded (i.e., not run out of work). When -## scheduler utilization balancing is enabled the system will instead -## try to balance scheduler utilization between schedulers. That is, -## strive for equal scheduler utilization on all schedulers. -## More information: http://www.erlang.org/doc/man/erl.html#+sub -## -## Acceptable values: -## - one of: true, false -## erlang.schedulers.utilization_balancing = true - -## Number of partitions in the cluster (only valid when first -## creating the cluster). Must be a power of 2, minimum 8 and maximum -## 1024. -## -## Default: 64 -## -## Acceptable values: -## - an integer -ring_size = 8 - -## Number of concurrent node-to-node transfers allowed. -## -## Default: 2 -## -## Acceptable values: -## - an integer -## transfer_limit = 2 - -## Default cert location for https can be overridden -## with the ssl config variable, for example: -## -## Acceptable values: -## - the path to a file -## ssl.certfile = $(platform_etc_dir)/cert.pem - -## Default key location for https can be overridden with the ssl -## config variable, for example: -## -## Acceptable values: -## - the path to a file -## ssl.keyfile = $(platform_etc_dir)/key.pem - -## Default signing authority location for https can be overridden -## with the ssl config variable, for example: -## -## Acceptable values: -## - the path to a file -## ssl.cacertfile = $(platform_etc_dir)/cacertfile.pem - -## DTrace support Do not enable 'dtrace' unless your Erlang/OTP -## runtime is compiled to support DTrace. DTrace is available in -## R15B01 (supported by the Erlang/OTP official source package) and in -## R14B04 via a custom source repository & branch. -## -## Default: off -## -## Acceptable values: -## - on or off -dtrace = off - -## Platform-specific installation paths (substituted by rebar) -## -## Default: ./bin -## -## Acceptable values: -## - the path to a directory -platform_bin_dir = ./bin - -## -## Default: ./data -## -## Acceptable values: -## - the path to a directory -platform_data_dir = ./data - -## -## Default: ./etc -## -## Acceptable values: -## - the path to a directory -platform_etc_dir = ./etc - -## -## Default: ./lib -## -## Acceptable values: -## - the path to a directory -platform_lib_dir = ./lib - -## -## Default: ./log -## -## Acceptable values: -## - the path to a directory -platform_log_dir = ./log - -## Enable consensus subsystem. Set to 'on' to enable the -## consensus subsystem used for strongly consistent Riak operations. -## -## Default: off -## -## Acceptable values: -## - on or off -## strong_consistency = on - -## listener.http. is an IP address and TCP port that the Riak -## HTTP interface will bind. -## -## Default: 127.0.0.1:8098 -## -## Acceptable values: -## - an IP/port pair, e.g. 127.0.0.1:10011 -listener.http.internal = 0.0.0.0:{{riak_port_http}} - -## listener.protobuf. is an IP address and TCP port that the Riak -## Protocol Buffers interface will bind. -## -## Default: 127.0.0.1:8087 -## -## Acceptable values: -## - an IP/port pair, e.g. 127.0.0.1:10011 -listener.protobuf.internal = 0.0.0.0:{{riak_port_pb}} - -## The maximum length to which the queue of pending connections -## may grow. If set, it must be an integer > 0. If you anticipate a -## huge number of connections being initialized *simultaneously*, set -## this number higher. -## -## Default: 128 -## -## Acceptable values: -## - an integer -## protobuf.backlog = 128 - -## listener.https. is an IP address and TCP port that the Riak -## HTTPS interface will bind. -## -## Acceptable values: -## - an IP/port pair, e.g. 127.0.0.1:10011 -## listener.https.internal = 127.0.0.1:8098 - -## How Riak will repair out-of-sync keys. Some features require -## this to be set to 'active', including search. -## * active: out-of-sync keys will be repaired in the background -## * passive: out-of-sync keys are only repaired on read -## * active-debug: like active, but outputs verbose debugging -## information -## -## Default: active -## -## Acceptable values: -## - one of: active, passive, active-debug -anti_entropy = active - -## Specifies the storage engine used for Riak's key-value data -## and secondary indexes (if supported). -## -## Default: bitcask -## -## Acceptable values: -## - one of: bitcask, leveldb, memory, multi -storage_backend = {{storage_backend}} - -## Controls which binary representation of a riak value is stored -## on disk. -## * 0: Original erlang:term_to_binary format. Higher space overhead. -## * 1: New format for more compact storage of small values. -## -## Default: 1 -## -## Acceptable values: -## - the integer 1 -## - the integer 0 -object.format = 1 - -## Reading or writing objects bigger than this size will write a -## warning in the logs. -## -## Default: 5MB -## -## Acceptable values: -## - a byte size with units, e.g. 10GB -object.size.warning_threshold = 5MB - -## Writing an object bigger than this will send a failure to the -## client. -## -## Default: 50MB -## -## Acceptable values: -## - a byte size with units, e.g. 10GB -object.size.maximum = 50MB - -## Writing an object with more than this number of siblings will -## generate a warning in the logs. -## -## Default: 25 -## -## Acceptable values: -## - an integer -object.siblings.warning_threshold = 25 - -## Writing an object with more than this number of siblings will -## send a failure to the client. -## -## Default: 100 -## -## Acceptable values: -## - an integer -object.siblings.maximum = 100 - -## A path under which bitcask data files will be stored. -## -## Default: $(platform_data_dir)/bitcask -## -## Acceptable values: -## - the path to a directory -bitcask.data_root = $(platform_data_dir)/bitcask - -## Configure how Bitcask writes data to disk. -## erlang: Erlang's built-in file API -## nif: Direct calls to the POSIX C API -## The NIF mode provides higher throughput for certain -## workloads, but has the potential to negatively impact -## the Erlang VM, leading to higher worst-case latencies -## and possible throughput collapse. -## -## Default: erlang -## -## Acceptable values: -## - one of: erlang, nif -bitcask.io_mode = erlang - -## Set to 'off' to disable the admin panel. -## -## Default: off -## -## Acceptable values: -## - on or off -riak_control = on - -## Authentication mode used for access to the admin panel. -## -## Default: off -## -## Acceptable values: -## - one of: off, userlist -riak_control.auth.mode = off - -## If riak control's authentication mode (riak_control.auth.mode) -## is set to 'userlist' then this is the list of usernames and -## passwords for access to the admin panel. -## To create users with given names, add entries of the format: -## riak_control.auth.user.USERNAME.password = PASSWORD -## replacing USERNAME with the desired username and PASSWORD with the -## desired password for that user. -## -## Acceptable values: -## - text -## riak_control.auth.user.admin.password = pass - -## This parameter defines the percentage of total server memory -## to assign to LevelDB. LevelDB will dynamically adjust its internal -## cache sizes to stay within this size. The memory size can -## alternately be assigned as a byte count via leveldb.maximum_memory -## instead. -## -## Default: 70 -## -## Acceptable values: -## - an integer -leveldb.maximum_memory.percent = 70 - -## To enable Search set this 'on'. -## -## Default: off -## -## Acceptable values: -## - on or off -search = off - -## How long Riak will wait for Solr to start. The start sequence -## will be tried twice. If both attempts timeout, then the Riak node -## will be shutdown. This may need to be increased as more data is -## indexed and Solr takes longer to start. Values lower than 1s will -## be rounded up to the minimum 1s. -## -## Default: 30s -## -## Acceptable values: -## - a time duration with units, e.g. '10s' for 10 seconds -search.solr.start_timeout = 30s - -## The port number which Solr binds to. -## NOTE: Binds on every interface. -## -## Default: 8093 -## -## Acceptable values: -## - an integer -search.solr.port = 8093 - -## The port number which Solr JMX binds to. -## NOTE: Binds on every interface. -## -## Default: 8985 -## -## Acceptable values: -## - an integer -search.solr.jmx_port = 8985 - -## The options to pass to the Solr JVM. Non-standard options, -## i.e. -XX, may not be portable across JVM implementations. -## E.g. -XX:+UseCompressedStrings -## -## Default: -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops -## -## Acceptable values: -## - text -search.solr.jvm_options = -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops - -# new diff --git a/resources/ro_node/1.0.0/meta.yaml b/resources/ro_node/1.0.0/meta.yaml deleted file mode 100644 index 7cbf9f5d..00000000 --- a/resources/ro_node/1.0.0/meta.yaml +++ /dev/null @@ -1,22 +0,0 @@ -handler: none -version: 1.0.0 -actions: -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - name: - schema: str - value: a node - location_id: - schema: str! - value: $uuid - reverse: True - -tags: [resources=node] diff --git a/resources/solar_bootstrap/1.0.0/actions/run.yaml b/resources/solar_bootstrap/1.0.0/actions/run.yaml deleted file mode 100644 index 89bcfb3d..00000000 --- a/resources/solar_bootstrap/1.0.0/actions/run.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -# TODO: this shouldn't be outside of the resource directory -- hosts: all - sudo: yes - tasks: - - script: /vagrant/bootstrap/playbooks/files/ubuntu-ansible.sh -#- include: celery.yaml tags=['master'] celery_dir=/var/run/celery -- include: /vagrant/bootstrap/playbooks/build-main.yaml -- include: /vagrant/bootstrap/playbooks/custom-configs.yaml master_ip={{ master_ip }} -- include: /vagrant/bootstrap/playbooks/celery.yaml tags=slave diff --git a/resources/solar_bootstrap/1.0.0/meta.yaml b/resources/solar_bootstrap/1.0.0/meta.yaml deleted file mode 100644 index 599c325b..00000000 --- a/resources/solar_bootstrap/1.0.0/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -handler: ansible -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - - master_ip: - schema: str! - value: -tags: [] -version: 1.0.0 diff --git a/resources/sources/1.0.0/meta.yaml b/resources/sources/1.0.0/meta.yaml deleted file mode 100644 index 88f9eee1..00000000 --- a/resources/sources/1.0.0/meta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -handler: naive_sync -version: 1.0.0 -input: - sources: - schema: [{'src': 'str!', 'dst': 'str!'}] - value: [] diff --git a/resources/ssh_key/1.0.0/actions/run.yaml b/resources/ssh_key/1.0.0/actions/run.yaml deleted file mode 100644 index 11423977..00000000 --- a/resources/ssh_key/1.0.0/actions/run.yaml +++ /dev/null @@ -1,14 +0,0 @@ -- hosts: '*' - sudo: yes - gather_facts: false - # this is default variables, they will be overwritten by resource one - vars: - keys_dir: /vagrant/.ssh - private_key: /vagrant/.ssh/id_rsa - passphrase: '' - tasks: - - shell: mkdir -p {{keys_dir}} - - stat: path={{private_key}} - register: key - - shell: ssh-keygen -t rsa -f {{private_key}} -N "" - when: key.stat.exists == False diff --git a/resources/ssh_key/1.0.0/meta.yaml b/resources/ssh_key/1.0.0/meta.yaml deleted file mode 100644 index d927ae14..00000000 --- a/resources/ssh_key/1.0.0/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -handler: ansible_playbook -version: 1.0.0 -actions: -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - keys_dir: - schema: str! - value: - private_key: - schema: str! - value: - public_key: - schema: str! - value: - passphrase: - schema: str - value: diff --git a/resources/transport_rsync/1.0.0/meta.yaml b/resources/transport_rsync/1.0.0/meta.yaml deleted file mode 100644 index c9d70bf2..00000000 --- a/resources/transport_rsync/1.0.0/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -input: - key: - schema: str - value: - password: - schema: str - value: - user: - schema: str! - value: - name: - schema: str! - value: rsync - location_id: - schema: str - value: - reverse: True - is_own: False - transports_id: - schema: str - value: - is_emit: False - port: - schema: int - value: 3579 diff --git a/resources/transport_solar_agent/1.0.0/actions/run.yaml b/resources/transport_solar_agent/1.0.0/actions/run.yaml deleted file mode 100644 index ed118eca..00000000 --- a/resources/transport_solar_agent/1.0.0/actions/run.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- hosts: [{{ host }}] - sudo: yes - tasks: - - shell: pip install git+git://github.com/Mirantis/solar-agent.git - - shell: start-stop-daemon --stop --make-pidfile --pidfile /tmp/solar_agent.pid --startas /bin/bash -- -c "exec /usr/local/bin/solar_agent run --port {{solar_agent_port}} --base tcp > /tmp/solar_agent.log 2>&1" - ignore_errors: True - - shell: start-stop-daemon -b --start --make-pidfile --pidfile /tmp/solar_agent.pid --startas /bin/bash -- -c "exec /usr/local/bin/solar_agent run --port {{solar_agent_port}} --base tcp > /tmp/solar_agent.log 2>&1" diff --git a/resources/transport_solar_agent/1.0.0/actions/update.yaml b/resources/transport_solar_agent/1.0.0/actions/update.yaml deleted file mode 100644 index ed118eca..00000000 --- a/resources/transport_solar_agent/1.0.0/actions/update.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- hosts: [{{ host }}] - sudo: yes - tasks: - - shell: pip install git+git://github.com/Mirantis/solar-agent.git - - shell: start-stop-daemon --stop --make-pidfile --pidfile /tmp/solar_agent.pid --startas /bin/bash -- -c "exec /usr/local/bin/solar_agent run --port {{solar_agent_port}} --base tcp > /tmp/solar_agent.log 2>&1" - ignore_errors: True - - shell: start-stop-daemon -b --start --make-pidfile --pidfile /tmp/solar_agent.pid --startas /bin/bash -- -c "exec /usr/local/bin/solar_agent run --port {{solar_agent_port}} --base tcp > /tmp/solar_agent.log 2>&1" diff --git a/resources/transport_solar_agent/1.0.0/meta.yaml b/resources/transport_solar_agent/1.0.0/meta.yaml deleted file mode 100644 index 804025ba..00000000 --- a/resources/transport_solar_agent/1.0.0/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -handler: ansible -input: - solar_agent_user: - schema: str! - value: - solar_agent_password: - schema: str! - value: - # solar_agent_transport_class: - # schema: str! - # value: - solar_agent_port: - schema: int! - value: 5555 - name: - schema: str! - value: solar_agent - location_id: - schema: str - value: - reverse: True - is_own: False - transports_id: - schema: str - is_emit: False diff --git a/resources/transport_ssh/1.0.0/meta.yaml b/resources/transport_ssh/1.0.0/meta.yaml deleted file mode 100644 index 4273ed88..00000000 --- a/resources/transport_ssh/1.0.0/meta.yaml +++ /dev/null @@ -1,25 +0,0 @@ -input: - ssh_key: - schema: str - value: - ssh_password: - schema: str - value: - ssh_user: - schema: str! - value: - ssh_port: - schema: int! - value: 22 - name: - schema: str! - value: ssh - location_id: - schema: str - value: - reverse: True - is_own: False - transports_id: - schema: str - value: - is_emit: False diff --git a/resources/transport_torrent/1.0.0/actions/run.yaml b/resources/transport_torrent/1.0.0/actions/run.yaml deleted file mode 100644 index 76b00830..00000000 --- a/resources/transport_torrent/1.0.0/actions/run.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- hosts: [{{ host }}] - sudo: yes - tasks: - - apt: - name: python-libtorrent - state: present - - copy: - src: {{scripts_dir}}/solar_torrent.py - dest: /var/tmp/solar_torrent.py diff --git a/resources/transport_torrent/1.0.0/meta.yaml b/resources/transport_torrent/1.0.0/meta.yaml deleted file mode 100644 index 9100f94f..00000000 --- a/resources/transport_torrent/1.0.0/meta.yaml +++ /dev/null @@ -1,17 +0,0 @@ -handler: ansible -input: - trackers: - schema: [str!] - value: [] - name: - schema: str! - value: torrent - location_id: - schema: str - value: - reverse: True - is_own: False - transports_id: - schema: str - value: - is_emit: False diff --git a/resources/transport_torrent/1.0.0/scripts/solar_torrent.py b/resources/transport_torrent/1.0.0/scripts/solar_torrent.py deleted file mode 100644 index 602fb64f..00000000 --- a/resources/transport_torrent/1.0.0/scripts/solar_torrent.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# TODO: change to something less naive -# - -from __future__ import print_function - -import libtorrent as lt -from operator import attrgetter -import os -import sys -import time - -state_str = ['queued', 'checking', 'downloading metadata', 'downloading', - 'finished', 'seeding', 'allocating', 'checking fastresume'] - - -# we use port range from 6881 to 6981 - - -class MultiTorrent(object): - def __init__(self, torrents, ses): - self.torrents = torrents - self.ses = ses - - def force_reannounce(self): - for torrent in self.torrents: - torrent.force_reannounce() - - @property - def is_seeding(self): - for torrent in self.torrents: - status = torrent.status() - if state_str[status.state] != 'seeding': - return False - return True - - @property - def progress(self): - total_progress = map( - attrgetter('progress'), map(lambda x: x.status(), self.torrents)) - return sum(total_progress) / len(total_progress) - - def numbers(self): - seeding = 0 - downloading = 0 - for torrent in self.torrents: - if torrent.status().is_seeding: - seeding += 1 - else: - downloading += 1 - return seeding, downloading - - -def init_session(args, seed=False): - ses = lt.session() - all_torrents = [] - for save_path, magnet_or_path in args: - if os.path.exists(magnet_or_path): - e = lt.bdecode(open(magnet_or_path, 'rb').read()) - info = lt.torrent_info(e) - params = {'save_path': save_path, - 'storage_mode': lt.storage_mode_t.storage_mode_sparse, - 'ti': info, - 'seed_mode': seed} - h = ses.add_torrent(params) - else: - h = ses.add_torrent({ - 'save_path': save_path, - 'storage_mode': lt.storage_mode_t.storage_mode_sparse, - 'url': magnet_or_path, - 'seed_mode': seed - }) - all_torrents.append(h) - return ses, all_torrents - - -def _daemonize(): - # should be true daemonize - new_pid = os.fork() - if new_pid > 0: - # first - sys.exit(0) - os.setsid() - new_pid2 = os.fork() - if new_pid2 > 0: - sys.exit(0) - stdin = file(os.devnull, 'r') - stdout = file(os.devnull, 'a+') - stderr = file(os.devnull, 'a+', 0) - os.dup2(stdin.fileno(), sys.stdin.fileno()) - os.dup2(stdout.fileno(), sys.stdout.fileno()) - os.dup2(stderr.fileno(), sys.stderr.fileno()) - - -def _seeder(torrents, save_path='.', max_seed_ratio=5): - _daemonize() - no_peers = 120 - max_alive = 5 * 60 - ses, all_torrents = init_session(torrents, seed=True) - ses.listen_on(6881, 6981) - - mt = MultiTorrent(all_torrents, ses) - end = time.time() + max_alive - peers_0 = time.time() - i = 0 - while not time.time() > end: - now = time.time() - i += 1 - # if i % 10 == 0 and i != 0: - # mt.force_reannounce() - s = ses.status() - # if not mt.is_seeding: - # sys.exit("Was seeder mode but not seeding") - if peers_0 < now - no_peers: - sys.exit("No peers for %d seconds exiting" % no_peers) - if i % 5 == 0: - print("%.2f%% up=%.1f kB/s peers=%s total_upload_B=%.1f" % - (mt.progress * 100, s.upload_rate / 1000, s.num_peers, - s.total_upload)) - if s.num_peers != 0: - peers_0 = now - sys.stdout.flush() - time.sleep(1) - else: - print('Seed timeout exiting') - sys.exit(0) - - -def _getter(torrents, max_seed_ratio=3): - max_no_changes = 1 * 60 - ses, all_torrents = init_session(torrents) - ses.listen_on(6881, 6981) - - mt = MultiTorrent(all_torrents, ses) - - i = 0 - last_state = (time.time(), None) - while (not mt.is_seeding): - i += 1 - # if i % 10 == 0 and i != 0: - # mt.force_reannounce() - s = ses.status() - if i % 5 == 0: - print('%.2f%% complete (down: %.1f kb/s up: %.1f kB/s p: %d) %s' % - (mt.progress * 100, - s.download_rate / 1000, - s.upload_rate / 1000, - s.num_peers, - mt.numbers())) - now = time.time() - current_state = (now, mt.progress) - if current_state[-1] != last_state[-1]: - last_state = current_state - if last_state[0] < now - max_no_changes: - sys.exit("Failed to fetch torrents in %ds" % max_no_changes) - time.sleep(0.5) - if mt.progress == 1: - # ok - # torrent lib dislikes forks there - from subprocess import check_output - args = sys.argv[:] - args[-2] = 's' - args.insert(0, sys.executable) - print("Entering seeder mode") - check_output(args, shell=False) - else: - # err - sys.exit(1) - - -if __name__ == '__main__': - mode = sys.argv[1] - torrents = sys.argv[2] - torrents = [x.split('|') for x in torrents.split(';')] - print(repr(torrents)) - if mode == 'g': - _getter(torrents, *sys.argv[3:]) - elif mode == 's': - _seeder(torrents, *sys.argv[3:]) - else: - sys.exit("`s` or `g` needed") diff --git a/resources/transports/1.0.0/meta.yaml b/resources/transports/1.0.0/meta.yaml deleted file mode 100644 index fca3e5b8..00000000 --- a/resources/transports/1.0.0/meta.yaml +++ /dev/null @@ -1,12 +0,0 @@ -input: - transports: - schema: [{user: str, password: str, port: int!, key: str, name: str!, trackers: [str]}] - value: [] - transports_id: - schema: str! - value: $uuid - reverse: True - location_id: - schema: str - value: - reverse: True diff --git a/resources/volume_group/1.0.0/actions/remove.yaml b/resources/volume_group/1.0.0/actions/remove.yaml deleted file mode 100644 index d23e5958..00000000 --- a/resources/volume_group/1.0.0/actions/remove.yaml +++ /dev/null @@ -1,16 +0,0 @@ -- hosts: [{{ host }}] - sudo: yes - tasks: - - name: remove VG - lvg: vg={{name}} state=absent force=yes - - name: find loop device - shell: losetup -a|grep "{{path}}"|awk -F':' '{print $1}' - register: loop_device - - name: if loop device exists, delete it - command: sudo losetup -d {% raw %}{{item}}{% endraw %} - when: loop_device|success - with_items: loop_device.stdout_lines - - name: remove file - file: path={{path}} state=absent - - diff --git a/resources/volume_group/1.0.0/actions/run.yaml b/resources/volume_group/1.0.0/actions/run.yaml deleted file mode 100644 index 11ff2ad1..00000000 --- a/resources/volume_group/1.0.0/actions/run.yaml +++ /dev/null @@ -1,20 +0,0 @@ -- hosts: [{{ host }}] - sudo: yes - tasks: - - name: install dependencies - apt: name=lvm2 state=present - - name: preapara file - command: truncate -s 10G {{path}} creates={{path}} - - name: check if loop for file is already created - shell: losetup -a|grep {{path}} - register: loop_created - ignore_errors: True - - name: if loop is not created, create it - command: losetup -f {{path}} - when: loop_created|failed - - name: find loop device - shell: losetup -a|grep '{{path}}'|awk -F':' '{print $1}' - register: loop_device - - name: create Volume Group on loop device - lvg: vg={{volume_name}} pvs={% raw %}{{item}}{% endraw %} state=present - with_items: loop_device.stdout_lines diff --git a/resources/volume_group/1.0.0/meta.yaml b/resources/volume_group/1.0.0/meta.yaml deleted file mode 100644 index 4ba31874..00000000 --- a/resources/volume_group/1.0.0/meta.yaml +++ /dev/null @@ -1,26 +0,0 @@ -handler: ansible -version: 1.0.0 -input: - volume_name: - schema: str! - value: - path: - schema: str! - value: - - # not used, for now all VGs are file based - type: - schema: str! - value: 'file' - - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - -tags: [resource/volume_group] diff --git a/resources/vxlan_mesh/1.0.0/actions/run.yaml b/resources/vxlan_mesh/1.0.0/actions/run.yaml deleted file mode 100644 index 48f5e8d3..00000000 --- a/resources/vxlan_mesh/1.0.0/actions/run.yaml +++ /dev/null @@ -1,16 +0,0 @@ -- hosts: '*' - sudo: yes - vars: - id: 42 - group: 239.1.10.2 - parent: eth1 - master: br-test0 - tasks: - - name: add vxlan mesh - shell: ip l add vxlan{{id}} type vxlan id {{id}} - group {{group}} dev {{parent}} - ignore_errors: true - - name: set vxlan master - shell: ip l set vxlan{{id}} master {{master}} - - name: set vxlan tunnel up - shell: ip l set vxlan{{id}} up diff --git a/resources/vxlan_mesh/1.0.0/meta.yaml b/resources/vxlan_mesh/1.0.0/meta.yaml deleted file mode 100644 index d2e74372..00000000 --- a/resources/vxlan_mesh/1.0.0/meta.yaml +++ /dev/null @@ -1,22 +0,0 @@ -handler: ansible_playbook -version: 1.0.0 -actions: -input: - ip: - schema: str! - value: - # ssh_key: - # schema: str! - # value: - # ssh_user: - # schema: str! - # value: - parent: - schema: str! - value: - master: - schema: str! - value: - id: - schema: int! - value: diff --git a/templates/controller/1.0.0/controller.yaml b/templates/controller/1.0.0/controller.yaml deleted file mode 100644 index 20e34c62..00000000 --- a/templates/controller/1.0.0/controller.yaml +++ /dev/null @@ -1,76 +0,0 @@ -id: primary_controller - -resources: - - id: mariadb_service - from: resources/mariadb_service - input: - port: 3306 - root_password: mariadb - image: mariadb - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - - - id: rabbitmq_service - from: resources/rabbitmq_service - input: - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - - - id: keystone_base - from: templates/keystone_base - input: - login_user: root - login_password: 'mariadb_service::root_password' - login_port: 'mariadb_service::port' - db_name: 'keystone' - db_host: 'mariadb_service::ip' - user_password: 'keystone' - user_name: 'keystone' - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - - - id: keystone_api_1 - from: templates/keystone_api - input: - idx: 1 - db_password: 'keystone_db_user::user_password' - db_user: 'keystone_db_user::user_name' - db_port: 'keystone_db_user::login_port' - db_name: 'keystone_db_user::db_name' - db_host: 'mariadb_service::ip' - admin_token: 132fdsfwqee - admin_port: 35357 - port: 5000 - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - - - id: openstack_base - from: templates/openstack_base - input: - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - keystone_ip: 'keystone_service_1::ip' - keystone_admin_port: 'keystone_service_1::admin_port' - keystone_port: 'keystone_service_1::port' - admin_token: 'keystone_service_1::admin_token' - - - id: openrc_file - from: resources/openrc_file - input: - keystone_host: 'keystone_service_1::ip' - keystone_port: 'keystone_service_1::admin_port' - tenant: 'admin_user::tenant_name' - user_name: 'admin_user::user_name' - password: 'admin_user::user_password' - - ip: '#{ip}#' - ssh_key: '#{ssh_key}#' - ssh_user: '#{ssh_user}#' - -tags: ['resources/controller', 'resource/primary_controller'] - diff --git a/templates/glance/1.0.0/glance.yaml b/templates/glance/1.0.0/glance.yaml deleted file mode 100644 index 021db28f..00000000 --- a/templates/glance/1.0.0/glance.yaml +++ /dev/null @@ -1,23 +0,0 @@ -id: glance_#{idx}# - -resources: - - id: glance_base_#{ idx }# - from: templates/glance_db - input: - idx: '#{ idx }#' - - db_name: '#{ db_name }#' - db_user: '#{ db_user }#' - db_password: '#{ db_password }#' - db_host: '#{ db_host }#' - db_login_port: '#{ db_port }#' - db_login_user: '#{ db_login_user }#' - db_login_password: '#{ db_login_password }#' - - ip: '#{ ip }#' - ssh_user: '#{ ssh_user }#' - ssh_key: '#{ ssh_key }#' - - -tags: ['resources/glance', 'resource/glance_api', 'resource/glance_registry'] - diff --git a/templates/glance_base/1.0.0/glance_base.yaml b/templates/glance_base/1.0.0/glance_base.yaml deleted file mode 100644 index 1cc4639d..00000000 --- a/templates/glance_base/1.0.0/glance_base.yaml +++ /dev/null @@ -1,26 +0,0 @@ -id: glance_base - -resources: - - id: glance_db - from: resources/mariadb_db - input: - db_name: #{db_name}# - login_user: '#{login_user}#' - login_password: '#{login_password}#' - login_port: '#{login_port}#' - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - - - id: glance_db_user - from: resources/mariadb_user - input: - user_password: '#{user_password}#' - user_name: '#{user_name}#' - db_name: 'keystone_db::db_name' - login_user: 'keystone_db::login_user' - login_password: 'keystone_db::login_password' - login_port: 'keystone_db::login_port' - ip: 'keystone_db::ip' - ssh_user: 'keystone_db::ssh_user' - ssh_key: 'keystone_db::ssh_key' diff --git a/templates/glance_db/1.0.0/glance_db.yaml b/templates/glance_db/1.0.0/glance_db.yaml deleted file mode 100644 index b64f14cc..00000000 --- a/templates/glance_db/1.0.0/glance_db.yaml +++ /dev/null @@ -1,30 +0,0 @@ -id: glance_db_#{ idx }# - -resources: - - id: glance_db_db_#{ idx }# - from: resources/mariadb_db - input: - db_name: '#{ db_name }#' - login_user: '#{ db_login_user }#' - login_password: '#{ db_login_password }#' - login_port: '#{ db_login_port }#' - - ip: '#{ ip }#' - ssh_user: '#{ ssh_user }#' - ssh_key: '#{ ssh_key }#' - - - id: glance_db_user - from: resources/mariadb_user - input: - user_password: '#{ db_password }#' - user_name: '#{ db_user }#' - - db_name: '#{ db_name }#' - - login_user: '#{ db_login_user }#' - login_password: '#{ db_login_password }#' - login_port: '#{ db_login_port }#' - - ip: '#{ ip }#' - ssh_user: '#{ ssh_user }#' - ssh_key: '#{ ssh_key }#' diff --git a/templates/glance_registry/1.0.0/glance_registry.yaml b/templates/glance_registry/1.0.0/glance_registry.yaml deleted file mode 100644 index 5d30cb04..00000000 --- a/templates/glance_registry/1.0.0/glance_registry.yaml +++ /dev/null @@ -1,26 +0,0 @@ -id: glance_register_#{idx}# - -resources: - - id: glance_config_#{idx}# - from: resources/glance_config - input: - keystone_admin_port: '#{keystone_admin_port}#' - keystone_ip: '#{keystone_ip}#' - mysql_password: '#{mysql_password}#' - mysql_user: '#{mysql_user}#' - mysql_db: '#{mysql_db}#' - mysql_ip: '#{mysql_ip}#' - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - - - - id: glance_registry_#{idx}# - from: resources/glance_registry_service - input: - ip: 'keystone_config_#{idx}#::ip' - ssh_user: 'keystone_config_#{idx}#::ssh_user' - ssh_key: 'keystone_config_#{idx}#::ssh_key' - -tags: ['resources/keystone', 'resource/keystone_api'] - diff --git a/templates/haproxy/1.0.0/haproxy.yaml b/templates/haproxy/1.0.0/haproxy.yaml deleted file mode 100644 index 77920fc7..00000000 --- a/templates/haproxy/1.0.0/haproxy.yaml +++ /dev/null @@ -1,46 +0,0 @@ -id: haproxy - -resources: - - id: haproxy_config#{index}# - from: resources/haproxy_config - location: #{node}# - input: - ip: '#{node}#::ip' - config:protocol: - #% for config in service_configs %# - - #{config}#::protocol - #% endfor %# - config:listen_port: - #% for config in service_configs %# - - #{config}#::listen_port - #% endfor %# - config:name: - #% for config in service_configs %# - - #{config}#::name - #% endfor %# - config:backends: - #% for config in service_configs %# - - #{config}#::backends - #% endfor %# - - - id: haproxy_service#{index}# - location: #{node}# - from: resources/haproxy_service - input: - ip: '#{node}#::ip' - -events: - - type: depends_on - parent_action: 'haproxy_service#{index}#.run' - state: 'success' - child_action: 'haproxy_config#{index}#.run' - - - type: react_on - parent_action: 'haproxy_config#{index}#.run' - state: 'success' - child_action: 'haproxy_service#{index}#.apply_config' - - - type: react_on - parent_action: 'haproxy_config#{index}#.update' - state: 'success' - child_action: 'haproxy_service#{index}#.apply_config' diff --git a/templates/keystone_api/1.0.0/keystone_api.yaml b/templates/keystone_api/1.0.0/keystone_api.yaml deleted file mode 100644 index f831bdc1..00000000 --- a/templates/keystone_api/1.0.0/keystone_api.yaml +++ /dev/null @@ -1,20 +0,0 @@ -id: keystone_api_#{idx}# - -resources: - - id: keystone_service_#{idx}# - from: resources/keystone_puppet - input: - admin_token: '#{admin_token}#' - db_host: '#{db_host}#' - db_name: '#{db_name}#' - db_user: '#{db_user}#' - db_password: '#{db_password}#' - - admin_port: #{admin_port}# - port: #{port}# - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - - -tags: ['resources/keystone', 'resource/keystone_api'] diff --git a/templates/keystone_base/1.0.0/keystone_base.yaml b/templates/keystone_base/1.0.0/keystone_base.yaml deleted file mode 100644 index a7bee922..00000000 --- a/templates/keystone_base/1.0.0/keystone_base.yaml +++ /dev/null @@ -1,28 +0,0 @@ -id: keystone_base - -resources: - - id: keystone_db - from: resources/mariadb_db - input: - db_name: '#{db_name}#' - db_host: '#{db_host}#' - login_user: '#{login_user}#' - login_password: '#{login_password}#' - login_port: '#{login_port}#' - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - - - id: keystone_db_user - from: resources/mariadb_user - input: - user_password: '#{user_password}#' - user_name: '#{user_name}#' - db_name: 'keystone_db::db_name' - db_host: '#{db_host}#' - login_user: 'keystone_db::login_user' - login_password: 'keystone_db::login_password' - login_port: 'keystone_db::login_port' - ip: 'keystone_db::ip' - ssh_user: 'keystone_db::ssh_user' - ssh_key: 'keystone_db::ssh_key' diff --git a/templates/mos_repos/1.0.0/mos_repos.yaml b/templates/mos_repos/1.0.0/mos_repos.yaml deleted file mode 100644 index 1326a418..00000000 --- a/templates/mos_repos/1.0.0/mos_repos.yaml +++ /dev/null @@ -1,43 +0,0 @@ -id: mos_repos -resources: - - id: mos_holdback_#{index}# - from: resources/apt_repo - location: #{node}# - input: - name: mos-holdback - package: '*' - repo: deb http://mirror.fuel-infra.org/mos-repos/ubuntu/8.0/ mos8.0-holdback main restricted - pin: release o=Mirantis,n=mos8.0,a=mos8.0-holdback,l=mos8.0 - pin_priority: 1100 - - id: mos_security_#{index}# - from: resources/apt_repo - location: #{node}# - input: - name: mos - package: '*' - repo: deb http://mirror.fuel-infra.org/mos-repos/ubuntu/8.0/ mos8.0-security main restricted - pin: release o=Mirantis,n=mos8.0,a=mos8.0-security,l=mos8.0 - pin_priority: 1050 - - id: mos_updates_#{index}# - from: resources/apt_repo - location: #{node}# - input: - name: mos_update - package: '*' - repo: deb http://mirror.fuel-infra.org/mos-repos/ubuntu/8.0/ mos8.0-updates main restricted - pin: release o=Mirantis,a=mos8.0-updates,l=mos8.0,n=mos8.0 - pin_priority: 1050 - - id: managed_apt_#{index}# - from: resources/managed_apt - location: #{node}# - input: - names: - - mos_holdback_#{index}#::name - - mos_security_#{index}#::name - - mos_updates_#{index}#::name - repos: - - mos_holdback_#{index}#::repo - - mos_security_#{index}#::repo - - mos_updates_#{index}#::repo - ensure_other_removed: false - diff --git a/templates/nodes/1.0.0/nodes.yaml b/templates/nodes/1.0.0/nodes.yaml deleted file mode 100644 index dd8beef2..00000000 --- a/templates/nodes/1.0.0/nodes.yaml +++ /dev/null @@ -1,37 +0,0 @@ -id: simple_riak_with_transports -resources: -#% for i in range(count|int) %# - #% set j = i +1 %# - - id: ssh_transport#{j}# - from: resources/transport_ssh - input: - ssh_user: 'vagrant' - ssh_key: '/vagrant/.vagrant/machines/solar-dev#{j}#/virtualbox/private_key' - - id: rsync#{j}# - from: resources/transport_rsync - input: - user: vagrant - key: /vagrant/.vagrant/machines/solar-dev#{j}#/virtualbox/private_key - - id: transports#{j}# - from: resources/transports - input: - transports: - - key: ssh_transport#{j}#::ssh_key - user: ssh_transport#{j}#::ssh_user - port: ssh_transport#{j}#::ssh_port - name: ssh_transport#{j}#::name - - key: rsync#{j}#::key - name: rsync#{j}#::name - user: rsync#{j}#::user - port: rsync#{j}#::port - - id: node#{j}# - from: resources/ro_node - input: - name: node#{j}# - ip: '10.0.0.#{i + 3}#' - transports_id: transports#{j}#::transports_id - - id: hosts_file#{j}# - from: resources/hosts_file - location: node#{j}# - tags: ['location=node#{j}#'] -#% endfor %# diff --git a/templates/nodes_network/1.0.0/nodes_network.yaml b/templates/nodes_network/1.0.0/nodes_network.yaml deleted file mode 100644 index 8be7ee76..00000000 --- a/templates/nodes_network/1.0.0/nodes_network.yaml +++ /dev/null @@ -1,71 +0,0 @@ -id: simple_multinode_gre -# eth2 - private 10.1.0.0/24 with JUMBO frames, -# eth1 - mgmt 10.0.0.0/24, -# eth3 - ext 10.2.0.0/24 -resources: -#% for i in range(count|int) %# - - id: node#{i}#_sdn - from: resources/node_network_puppet - input: - use_ovs: true - network_scheme: - version: '1.1' - provider: lnx - interfaces: - eth3: - mtu: 1500 - eth1: - mtu: 1500 - eth2: - mtu: 9000 - transformations: - - action: add-br - name: br-mgmt - - action: add-br - name: br-ex - - action: add-br - name: br-floating - provider: ovs - - action: add-patch - bridges: - - br-floating - - br-ex - provider: ovs - mtu: 65000 - - action: add-br - name: br-mesh - - action: add-port - bridge: br-ex - name: eth3 - - action: add-port - bridge: br-mgmt - name: eth1 - - action: add-port - bridge: br-mesh - name: eth2 - endpoints: - br-mesh: - IP: - - 10.1.0.#{3 + i}#/24 - br-floating: - IP: none - br-mgmt: - IP: - - 10.0.0.#{3 + i}#/24 - vendor_specific: - phy_interfaces: - - eth1 - br-ex: - IP: - - 10.2.0.#{3 + i}#/24 - vendor_specific: - phy_interfaces: - - eth3 - #gateway: 10.2.0.1 - roles: - management: br-mgmt - neutron/mesh: br-mesh - ex: br-ex - neutron/floating: br-floating - fw-admin: br-fw-admin -#% endfor %# diff --git a/templates/nodes_with_transports/1.0.0/nodes_with_transports.yaml b/templates/nodes_with_transports/1.0.0/nodes_with_transports.yaml deleted file mode 100644 index a7b0fddc..00000000 --- a/templates/nodes_with_transports/1.0.0/nodes_with_transports.yaml +++ /dev/null @@ -1,32 +0,0 @@ -id: simple_multinode_with_transports -resources: -#% for i in range(count|int) %# - - id: ssh_transport#{i}# - from: resources/transport_ssh - input: - ssh_user: 'vagrant' - ssh_key: '/vagrant/.vagrant/machines/solar-dev#{i + 1}#/virtualbox/private_key' - - id: rsync#{i}# - from: resources/transport_rsync - input: - user: vagrant - key: /vagrant/.vagrant/machines/solar-dev#{i + 1}#/virtualbox/private_key - - id: transports#{i}# - from: resources/transports - input: - transports: - - key: ssh_transport#{i}#::ssh_key - user: ssh_transport#{i}#::ssh_user - port: ssh_transport#{i}#::ssh_port - name: ssh_transport#{i}#::name - - key: rsync#{i}#::key - name: rsync#{i}#::name - user: rsync#{i}#::user - port: rsync#{i}#::port - - id: node#{i}# - from: resources/ro_node - input: - ip: '10.0.0.#{i + 3}#' - transports_id: transports#{i}#::transports_id - name: node#{i}# -#% endfor %# diff --git a/templates/not_provisioned_nodes/1.0.0/not_provisioned_nodes.yaml b/templates/not_provisioned_nodes/1.0.0/not_provisioned_nodes.yaml deleted file mode 100644 index db86a7e8..00000000 --- a/templates/not_provisioned_nodes/1.0.0/not_provisioned_nodes.yaml +++ /dev/null @@ -1,43 +0,0 @@ -id: not_provisioned_nodes -resources: -#% for node in nodes %# - #% set id = node.id | replace(':', '_') %# - - id: ssh_transport#{ id }# - from: resources/transport_ssh - input: - ssh_user: 'root' - ssh_key: '/vagrant/tmp/keys/ssh_private' - - id: transports#{id}# - from: resources/transports - input: - transports:key: ssh_transport#{id}#::ssh_key - transports:user: ssh_transport#{id}#::ssh_user - transports:port: ssh_transport#{id}#::ssh_port - transports:name: ssh_transport#{id}#::name - - id: node_#{id}# - from: resources/not_provisioned_node - input: - ip: #{node.ip}# - transports_id: transports#{id}#::transports_id - name: node_#{id}# - admin_mac: #{node.mac}# -#% endfor %# - - - id: ssh_transport_master - from: resources/transport_ssh - input: - ssh_user: 'vagrant' - ssh_key: '/vagrant/.vagrant/machines/solar-dev/virtualbox/private_key' - - id: transports_master - from: resources/transports - input: - transports:key: ssh_transport_master::ssh_key - transports:user: ssh_transport_master::ssh_user - transports:port: ssh_transport_master::ssh_port - transports:name: ssh_transport_master::name - - id: node_master - from: resources/ro_node - input: - name: node_master - ip: '10.0.2.15' - transports_id: transports_master::transports_id diff --git a/templates/openstack_base/1.0.0/openstack_base.yaml b/templates/openstack_base/1.0.0/openstack_base.yaml deleted file mode 100644 index 28465e9e..00000000 --- a/templates/openstack_base/1.0.0/openstack_base.yaml +++ /dev/null @@ -1,74 +0,0 @@ -id: openstack_base - -resources: - - id: admin_tenant - from: resources/keystone_tenant - input: - tenant_name: admin - keystone_port: '#{keystone_admin_port}#' - keystone_host: '#{keystone_ip}#' - admin_token: '#{admin_token}#' - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - - - id: admin_user - from: resources/keystone_user - input: - user_name: 'admin' - user_password: 'admin' - tenant_name: 'admin_tenant::tenant_name' - keystone_port: '#{keystone_admin_port}#' - keystone_host: '#{keystone_ip}#' - admin_token: '#{admin_token}#' - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - - - id: admin_role - from: resources/keystone_role - input: - role_name: 'admin' - user_name: 'admin_user::user_name' - tenant_name: 'admin_user::tenant_name' - keystone_port: '#{keystone_admin_port}#' - keystone_host: '#{keystone_ip}#' - admin_token: '#{admin_token}#' - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - - - id: keystone_service_endpoint - from: resources/keystone_service_endpoint - input: - #% raw %# - adminurl: 'http://#{admin_ip}#:#{admin_port}#/v2.0' - internalurl: 'http://#{internal_ip}#:#{internal_port}#/v2.0' - publicurl: 'http://#{public_ip}#:#{public_port}#/v2.0' - #% endraw %# - description: 'OpenStack Identity Service' - type: 'identity' - endpoint_name: 'keystone' - admin_port: '#{keystone_admin_port}#' - public_port: '#{keystone_port}#' - internal_port: '#{keystone_port}#' - admin_ip: '#{ip}#' - public_ip: '#{ip}#' - internal_ip: '#{ip}#' - keystone_admin_port: '#{keystone_admin_port}#' - keystone_host: '#{keystone_ip}#' - admin_token: '#{admin_token}#' - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' - - - id: service_tenant - from: resources/keystone_tenant - input: - tenant_name: services - keystone_port: '#{keystone_admin_port}#' - keystone_host: '#{keystone_ip}#' - admin_token: '#{admin_token}#' - ip: '#{ip}#' - ssh_user: '#{ssh_user}#' - ssh_key: '#{ssh_key}#' diff --git a/templates/profile/1.0.0/profile.yaml b/templates/profile/1.0.0/profile.yaml deleted file mode 100644 index 956a6bd3..00000000 --- a/templates/profile/1.0.0/profile.yaml +++ /dev/null @@ -1,112 +0,0 @@ -id: #{id}# -type: profile - -extensions: - - id: file_discovery - version: '1.0.0' - - id: ansible - version: '1.0.0' - -tags: #{tags}# - -connections: - # keystone - - for_resources: [resource/mariadb_service] - filter_resources: [resource/mariadb_keystone_db, resource/mariadb_keystone_user] - mapping: - root_password: login_password - port: login_port - - - for_resources: [resource/mariadb_keystone_db] - filter_resources: [resource/mariadb_keystone_user] - mapping: - db_name: db_name - - - for_resources: [resource/mariadb_service] - filter_resources: [resource/keystone_config] - mapping: - ip: db_host - port: db_port - - - for_resources: [resource/mariadb_keystone_user] - filter_resources: [resource/keystone_config] - mapping: - db_name: db_name - new_user_name: db_user - new_user_password: db_password - - - for_resources: [resource/keystone_config] - filter_resources: [resource/keystone_service] - mapping: - config_dir: config_dir - - # haproxy - - for_resources: [resource/keystone_service] - filter_resources: [resource/haproxy_keystone_config] - mapping: - ip: servers - port: ports - - - for_resources: [resource/haproxy_keystone_config] - filter_resources: [resource/haproxy_config] - mapping: - listen_port: listen_ports - name: configs_names - ports: configs_ports - servers: configs - - - for_resources: [resource/haproxy_config] - filter_resources: [resource/haproxy_service] - mapping: - listen_ports: ports - config_dir: host_binds - - # keystone data - - for_resources: [resource/keystone_config] - filter_resources: [resource/keystone_tenant] - mapping: {} - - - for_resources: [resource/keystone_service] - filter_resources: [resource/keystone_tenant] - mapping: - admin_port: keystone_port - ip: keystone_host - - - for_resources: [resource/keystone_tenant] - filter_resources: [resource/keystone_user] - mapping: {} - - - for_resources: [resource/keystone_user] - filter_resources: [resource/keystone_role] - mapping: {} - - - for_resources: [resource/keystone_service] - filter_resources: [resource/keystone_service_endpoint] - mapping: - admin_port: admin_port - ip: keystone_host - port: port - - - for_resources: [resource/keystone_config] - filter_resources: [resource/keystone_service_endpoint] - mapping: - admin_token: admin_token - - - for_resources: [resource/keystone_service] - filter_resources: [resource/keystone_service_endpoint] - mapping: - admin_port: keystone_port - - # rabbitmq - - for_resources: [resource/rabbitmq_service] - filter_resources: [resource/rabbitmq_vhost] - mapping: {} - - - for_resources: [resource/rabbitmq_service] - filter_resources: [resource/rabbitmq_user] - mapping: {} - - - for_resources: [resource/rabbitmq_vhost] - filter_resources: [resource/rabbitmq_user] - mapping: - vhost_name: vhost_name diff --git a/templates/seed_node/1.0.0/seed_node.yaml b/templates/seed_node/1.0.0/seed_node.yaml deleted file mode 100644 index acfb8e15..00000000 --- a/templates/seed_node/1.0.0/seed_node.yaml +++ /dev/null @@ -1,8 +0,0 @@ -id: seed_node -resources: - - id: seed_node - from: resources/ro_node - input: - ip: '10.0.0.2' - ssh_key: '/vagrant/.vagrant/machines/solar-dev/virtualbox/private_key' - ssh_user: 'vagrant' diff --git a/templates/sources/1.0.0/sources.yaml b/templates/sources/1.0.0/sources.yaml deleted file mode 100644 index 1022366f..00000000 --- a/templates/sources/1.0.0/sources.yaml +++ /dev/null @@ -1,8 +0,0 @@ -id: sources -resources: - - id: sources#{index}# - from: resources/sources - location: #{node}# - input: - sources: - - {src: /tmp/sources_test, dst: /tmp/sources_test}