Update role for stable/mitaka testing
This updates the repository SHA's to use stable/mitaka where available and updated SHA's where not. It also updates all paste, policy and rootwrap configurations to match the current contents found in stable/mitaka. Change-Id: If1ad0e508866d2f6022ab2f20ce991733cebd384
This commit is contained in:
parent
f0fe53ddc1
commit
dc105f0b83
@ -50,8 +50,3 @@ conntrack: CommandFilter, conntrack, root
|
||||
|
||||
# keepalived state change monitor
|
||||
keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root
|
||||
|
||||
# For creating namespace local /etc
|
||||
rt_tables_mkdir: RegExpFilter, mkdir, root, mkdir, -p, /etc/netns/qrouter-[^/].*
|
||||
rt_tables_chown: RegExpFilter, chown, root, chown, [1-9][0-9].*, /etc/netns/qrouter-[^/].*
|
||||
rt_tables_rmdir: RegExpFilter, rm, root, rm, -r, -f, /etc/netns/qrouter-[^/].*
|
||||
|
@ -18,3 +18,8 @@ bridge: CommandFilter, bridge, root
|
||||
ip: IpFilter, ip, root
|
||||
find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
|
||||
ip_exec: IpNetnsExecFilter, ip, root
|
||||
|
||||
# tc commands needed for QoS support
|
||||
tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+
|
||||
tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, root
|
||||
tc_show: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
|
||||
|
@ -17,9 +17,6 @@ paste.filter_factory = oslo_middleware:CatchErrors.factory
|
||||
[filter:cors]
|
||||
paste.filter_factory = oslo_middleware.cors:filter_factory
|
||||
oslo_config_project = neutron
|
||||
latent_allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID
|
||||
latent_expose_headers = X-Auth-Token, X-Subject-Token, X-Service-Token, X-OpenStack-Request-ID
|
||||
latent_allow_methods = GET, PUT, POST, DELETE, PATCH
|
||||
|
||||
[filter:keystonecontext]
|
||||
paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
|
||||
|
@ -4,7 +4,7 @@
|
||||
"admin_or_owner": "rule:context_is_admin or rule:owner",
|
||||
"context_is_advsvc": "role:advsvc",
|
||||
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
|
||||
"admin_owner_or_network_owner": "rule:admin_or_network_owner or rule:owner",
|
||||
"admin_owner_or_network_owner": "rule:owner or rule:admin_or_network_owner",
|
||||
"admin_only": "rule:context_is_admin",
|
||||
"regular_user": "",
|
||||
"shared": "field:networks:shared=True",
|
||||
@ -43,8 +43,11 @@
|
||||
"get_network:provider:physical_network": "rule:admin_only",
|
||||
"get_network:provider:segmentation_id": "rule:admin_only",
|
||||
"get_network:queue_id": "rule:admin_only",
|
||||
"get_network_ip_availabilities": "rule:admin_only",
|
||||
"get_network_ip_availability": "rule:admin_only",
|
||||
"create_network:shared": "rule:admin_only",
|
||||
"create_network:router:external": "rule:admin_only",
|
||||
"create_network:is_default": "rule:admin_only",
|
||||
"create_network:segments": "rule:admin_only",
|
||||
"create_network:provider:network_type": "rule:admin_only",
|
||||
"create_network:provider:physical_network": "rule:admin_only",
|
||||
@ -60,30 +63,30 @@
|
||||
|
||||
"network_device": "field:port:device_owner=~^network:",
|
||||
"create_port": "",
|
||||
"create_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:mac_address": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:binding:host_id": "rule:admin_only",
|
||||
"create_port:binding:profile": "rule:admin_only",
|
||||
"create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"create_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"create_port:allowed_address_pairs": "rule:admin_or_network_owner",
|
||||
"get_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
|
||||
"get_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner",
|
||||
"get_port:queue_id": "rule:admin_only",
|
||||
"get_port:binding:vif_type": "rule:admin_only",
|
||||
"get_port:binding:vif_details": "rule:admin_only",
|
||||
"get_port:binding:host_id": "rule:admin_only",
|
||||
"get_port:binding:profile": "rule:admin_only",
|
||||
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
|
||||
"update_port:device_owner": "not rule:network_device or rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:device_owner": "not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"update_port:mac_address": "rule:admin_only or rule:context_is_advsvc",
|
||||
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:fixed_ips": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"update_port:port_security_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"update_port:binding:host_id": "rule:admin_only",
|
||||
"update_port:binding:profile": "rule:admin_only",
|
||||
"update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
|
||||
"update_port:mac_learning_enabled": "rule:context_is_advsvc or rule:admin_or_network_owner",
|
||||
"update_port:allowed_address_pairs": "rule:admin_or_network_owner",
|
||||
"delete_port": "rule:admin_owner_or_network_owner or rule:context_is_advsvc",
|
||||
"delete_port": "rule:context_is_advsvc or rule:admin_owner_or_network_owner",
|
||||
|
||||
"get_router:ha": "rule:admin_only",
|
||||
"create_router": "rule:regular_user",
|
||||
@ -203,5 +206,29 @@
|
||||
|
||||
"create_flavor_service_profile": "rule:admin_only",
|
||||
"delete_flavor_service_profile": "rule:admin_only",
|
||||
"get_flavor_service_profile": "rule:regular_user"
|
||||
"get_flavor_service_profile": "rule:regular_user",
|
||||
"get_auto_allocated_topology": "rule:admin_or_owner",
|
||||
|
||||
"get_bgp_speaker": "rule:admin_only",
|
||||
"create_bgp_speaker": "rule:admin_only",
|
||||
"update_bgp_speaker": "rule:admin_only",
|
||||
"delete_bgp_speaker": "rule:admin_only",
|
||||
|
||||
"get_bgp_peer": "rule:admin_only",
|
||||
"create_bgp_peer": "rule:admin_only",
|
||||
"update_bgp_peer": "rule:admin_only",
|
||||
"delete_bgp_peer": "rule:admin_only",
|
||||
|
||||
"add_bgp_peer": "rule:admin_only",
|
||||
"remove_bgp_peer": "rule:admin_only",
|
||||
|
||||
"add_gateway_network": "rule:admin_only",
|
||||
"remove_gateway_network": "rule:admin_only",
|
||||
|
||||
"get_advertised_routes":"rule:admin_only",
|
||||
|
||||
"add_bgp_speaker_to_dragent": "rule:admin_only",
|
||||
"remove_bgp_speaker_from_dragent": "rule:admin_only",
|
||||
"list_bgp_speaker_on_dragent": "rule:admin_only",
|
||||
"list_dragent_hosting_bgp_speaker": "rule:admin_only"
|
||||
}
|
||||
|
@ -78,6 +78,8 @@
|
||||
keystone_galera_database: keystone
|
||||
keystone_venv_tag: testing
|
||||
keystone_developer_mode: true
|
||||
keystone_git_install_branch: stable/mitaka
|
||||
keystone_requirements_git_install_branch: f8cf7eba898a1424549c730d6692ec4e9573c0ed # HEAD of "master" as of 23.03.2016
|
||||
keystone_auth_admin_token: SuperSecreteTestToken
|
||||
keystone_auth_admin_password: SuperSecretePassword
|
||||
keystone_service_password: secrete
|
||||
|
@ -107,10 +107,8 @@
|
||||
nova_service_user_domain_id: default
|
||||
nova_service_user_name: nova
|
||||
neutron_venv_tag: testing
|
||||
#TODO(jmccrory): Remove when requirements_git_install_branch is bumped in openstack-ansible
|
||||
# Pinned eventlet version in current SHA is is no longer available on PyPi
|
||||
# https://review.openstack.org/#/c/277912/
|
||||
neutron_requirements_git_install_branch: 1a6867129b19c1a34c8fe694d9062eca427507fe
|
||||
neutron_git_install_branch: stable/mitaka
|
||||
neutron_requirements_git_install_branch: f8cf7eba898a1424549c730d6692ec4e9573c0ed # HEAD of "master" as of 23.03.2016
|
||||
neutron_developer_mode: true
|
||||
neutron_provider_networks:
|
||||
network_flat_networks: "flat"
|
||||
|
3
tox.ini
3
tox.ini
@ -112,15 +112,12 @@ commands =
|
||||
rm -rf {homedir}/.ansible
|
||||
git clone https://git.openstack.org/openstack/openstack-ansible-plugins \
|
||||
{homedir}/.ansible/plugins
|
||||
curl --create-dirs -o {homedir}/.ansible/vars/openstack_services.yml \
|
||||
https://git.openstack.org/cgit/openstack/openstack-ansible/plain/playbooks/defaults/repo_packages/openstack_services.yml
|
||||
ansible-galaxy install \
|
||||
--role-file={toxinidir}/tests/ansible-role-requirements.yml \
|
||||
--ignore-errors \
|
||||
--force
|
||||
ansible-playbook -i {toxinidir}/tests/inventory \
|
||||
-e "rolename={toxinidir}" \
|
||||
-e "@{homedir}/.ansible/vars/openstack_services.yml" \
|
||||
-vv \
|
||||
{toxinidir}/tests/test.yml
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user