Install Ocp on Osp
This patch introduces a new workload called "ocp_on_osp". This workload installs Openshift cluster on Openstack as a part of Dynamic Workloads. Change-Id: I5c0a1ef04bf0e82563e7d17fdd9688f938d1e33f
This commit is contained in:
parent
61dc15961e
commit
0ae5a7cebc
@ -681,13 +681,13 @@ workloads:
|
|||||||
iface_name: "ens7f0"
|
iface_name: "ens7f0"
|
||||||
iface_mac: "3c:fd:fe:c1:73:40"
|
iface_mac: "3c:fd:fe:c1:73:40"
|
||||||
num_vms_provider_net: 2
|
num_vms_provider_net: 2
|
||||||
shift_on_stack_job_iterations: 100
|
e2e_kube_burner_job_iterations: 100
|
||||||
shift_on_stack_qps: 20
|
e2e_kube_burner_qps: 20
|
||||||
shift_on_stack_burst: 20
|
e2e_kube_burner_burst: 20
|
||||||
# shift_on_stack_workload can be poddensity, clusterdensity, maxnamespaces,
|
# e2e_kube_burner_workload can be poddensity, clusterdensity, maxnamespaces,
|
||||||
# or maxservices
|
# or maxservices
|
||||||
shift_on_stack_workload: poddensity
|
e2e_kube_burner_workload: poddensity
|
||||||
shift_on_stack_kubeconfig_paths:
|
ocp_kubeconfig_paths:
|
||||||
- /home/stack/.kube/config
|
- /home/stack/.kube/config
|
||||||
# External networks with /23 ranges will be created by dynamic workloads.
|
# External networks with /23 ranges will be created by dynamic workloads.
|
||||||
# All these external networks will share the first 16 bits.
|
# All these external networks will share the first 16 bits.
|
||||||
@ -703,9 +703,15 @@ workloads:
|
|||||||
# delete_loadbalancers, delete_members_random_lb, pod_fip_simulation,
|
# delete_loadbalancers, delete_members_random_lb, pod_fip_simulation,
|
||||||
# add_subports_to_random_trunks, delete_subports_from_random_trunks,
|
# add_subports_to_random_trunks, delete_subports_from_random_trunks,
|
||||||
# swap_floating_ips_between_random_subports, provider_netcreate_nova_boot_ping,
|
# swap_floating_ips_between_random_subports, provider_netcreate_nova_boot_ping,
|
||||||
# provider_net_nova_boot_ping, provider_net_nova_delete, shift_on_stack
|
# provider_net_nova_boot_ping, provider_net_nova_delete, e2e_kube_burner, ocp_on_osp
|
||||||
# Note: Octavia, Provider and Shift-on-Stack scenarios are not included in 'all'
|
# Note: Octavia, Provider, e2e_kube_burner and ocp_on_osp scenarios are not included in 'all'
|
||||||
# by default, and have to be included separately.
|
# by default, and have to be included separately.
|
||||||
|
# Steps for running ocp_on_osp workload, given that underlying openstack has been
|
||||||
|
# deployed with necessary dependencies:
|
||||||
|
# 1) Pass your pull secret in ocp_on_osp/vars/shift_stack_vars.yaml
|
||||||
|
# 2) If you want to change any default params(like ocp_version, worker count, master and worker flavors, etc), refer to ocp_on_osp/vars/shift_stack_vars.yml
|
||||||
|
# 3) Run "ansible-playbook -vvv ocp_on_osp/ocp_bootstrap.yml"
|
||||||
|
# 4) And then run ocp_on_osp workload. NOTE: use "ansible-playbook -vvv ocp_on_osp/ocp_cleanup.yml" for the clean up.
|
||||||
workloads: all
|
workloads: all
|
||||||
file: rally/rally-plugins/dynamic-workloads/dynamic_workload.yml
|
file: rally/rally-plugins/dynamic-workloads/dynamic_workload.yml
|
||||||
|
|
||||||
|
122
ocp_on_osp/create_ocp_infra_nodes.yml
Normal file
122
ocp_on_osp/create_ocp_infra_nodes.yml
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
vars_files:
|
||||||
|
- vars/shift_stack_vars.yaml
|
||||||
|
environment:
|
||||||
|
KUBECONFIG: /home/stack/ocp_clusters/{{ ocp_cluster_name }}/auth/kubeconfig
|
||||||
|
tasks:
|
||||||
|
- name: Get cluster name
|
||||||
|
shell: |
|
||||||
|
{%raw%}oc get machineset -n openshift-machine-api -o=go-template='{{ (index (index .items 0).metadata.labels {%endraw%} "{{ machineset_metadata_label_prefix }}/cluster-api-cluster" {%raw%}) }}'{%endraw%}
|
||||||
|
register: cluster_name
|
||||||
|
|
||||||
|
- name: Get current ready node count
|
||||||
|
shell: oc get nodes | grep " Ready" -ic
|
||||||
|
register: current_node_count
|
||||||
|
|
||||||
|
- name: (OSP) Template out machineset yamls
|
||||||
|
template:
|
||||||
|
src: "{{ item.src }}"
|
||||||
|
dest: "{{ item.dest }}"
|
||||||
|
with_items:
|
||||||
|
- src: osp-infra-node-machineset.yml.j2
|
||||||
|
dest: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/infra-node-machineset.yml"
|
||||||
|
toggle: "{{ ocp_create_infra_nodes }}"
|
||||||
|
- src: osp-workload-node-machineset.yml.j2
|
||||||
|
dest: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/workload-node-machineset.yml"
|
||||||
|
toggle: "{{ ocp_create_workload_nodes }}"
|
||||||
|
when:
|
||||||
|
- item.toggle|bool
|
||||||
|
|
||||||
|
- name: Create machinesets
|
||||||
|
shell: |
|
||||||
|
oc create -f {{ item.ms }}
|
||||||
|
when: item.toggle|bool
|
||||||
|
with_items:
|
||||||
|
- ms: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/infra-node-machineset.yml"
|
||||||
|
toggle: "{{ ocp_create_infra_nodes }}"
|
||||||
|
- ms: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/workload-node-machineset.yml"
|
||||||
|
toggle: "{{ ocp_create_workload_nodes }}"
|
||||||
|
|
||||||
|
- name: Set expected node count
|
||||||
|
set_fact:
|
||||||
|
expected_node_count: "{{ current_node_count.stdout|int }}"
|
||||||
|
|
||||||
|
- name: Increment expected node count with infra nodes
|
||||||
|
set_fact:
|
||||||
|
expected_node_count: "{{ expected_node_count|int + 3 }}"
|
||||||
|
when: ocp_create_infra_nodes|bool
|
||||||
|
|
||||||
|
- name: Increment expected node count with workload node
|
||||||
|
set_fact:
|
||||||
|
expected_node_count: "{{ expected_node_count|int + 1 }}"
|
||||||
|
when: ocp_create_workload_nodes|bool
|
||||||
|
|
||||||
|
- name: Poll nodes to see if creating nodes finished
|
||||||
|
shell: oc get nodes | grep " Ready" -ic
|
||||||
|
register: current_node_count
|
||||||
|
until: current_node_count.stdout|int >= (expected_node_count|int)
|
||||||
|
delay: 30
|
||||||
|
retries: "{{ ocp_post_install_poll_attempts|int }}"
|
||||||
|
|
||||||
|
- name: Relabel the infra nodes
|
||||||
|
shell: |
|
||||||
|
oc label nodes --overwrite -l 'node-role.kubernetes.io/infra=' node-role.kubernetes.io/worker-
|
||||||
|
when: ocp_create_infra_nodes|bool
|
||||||
|
|
||||||
|
- name: Relabel the workload node
|
||||||
|
shell: |
|
||||||
|
oc label nodes --overwrite -l 'node-role.kubernetes.io/workload=' node-role.kubernetes.io/worker-
|
||||||
|
when: ocp_create_workload_nodes|bool
|
||||||
|
|
||||||
|
- name: Add additional label to worker nodes to provide ablity to isolate workloads on workers
|
||||||
|
shell: |
|
||||||
|
oc label nodes --overwrite -l 'node-role.kubernetes.io/worker=' computenode=true
|
||||||
|
|
||||||
|
- name: Taint the workload node
|
||||||
|
shell: |
|
||||||
|
oc adm taint node -l node-role.kubernetes.io/workload= role=workload:NoSchedule --overwrite=true
|
||||||
|
when: ocp_create_workload_nodes|bool
|
||||||
|
|
||||||
|
- name: Copy new cluster-monitoring-config
|
||||||
|
template:
|
||||||
|
src: cluster-monitoring-config.yml.j2
|
||||||
|
dest: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/cluster-monitoring-config.yml"
|
||||||
|
when: ocp_create_infra_nodes|bool
|
||||||
|
|
||||||
|
- name: Replace the cluster-monitoring-config ConfigMap
|
||||||
|
shell: |
|
||||||
|
oc create -f /home/stack/ocp_clusters/{{ ocp_cluster_name }}/cluster-monitoring-config.yml
|
||||||
|
ignore_errors: yes
|
||||||
|
when: ocp_create_infra_nodes|bool
|
||||||
|
|
||||||
|
- name: Apply new nodeSelector to infra workload components
|
||||||
|
shell: |
|
||||||
|
oc patch {{ item.object }} {{ item.type|default('',True) }} -n {{ item.namespace }} -p {{ item.patch }}
|
||||||
|
with_items:
|
||||||
|
- namespace: openshift-ingress-operator
|
||||||
|
object: ingresscontrollers/default
|
||||||
|
patch: |
|
||||||
|
'{"spec": {"nodePlacement": {"nodeSelector": {"matchLabels": {"node-role.kubernetes.io/infra": ""}}}}}'
|
||||||
|
type: "--type merge"
|
||||||
|
- namespace: openshift-image-registry
|
||||||
|
object: deployment.apps/image-registry
|
||||||
|
patch: |
|
||||||
|
'{"spec": {"template": {"spec": {"nodeSelector": {"node-role.kubernetes.io/infra": ""}}}}}'
|
||||||
|
when: ocp_create_infra_nodes|bool
|
||||||
|
|
||||||
|
- name: Deploy dittybopper
|
||||||
|
block:
|
||||||
|
- name: clone dittybopper
|
||||||
|
git:
|
||||||
|
repo: 'https://github.com/cloud-bulldozer/performance-dashboards.git'
|
||||||
|
dest: "{{ ansible_user_dir }}/ocp_clusters/{{ ocp_cluster_name }}/performance-dashboards"
|
||||||
|
force: yes
|
||||||
|
|
||||||
|
- name: Deploy mutable Grafana
|
||||||
|
command: ./deploy.sh
|
||||||
|
args:
|
||||||
|
chdir: "{{ ansible_user_dir }}/ocp_clusters/{{ ocp_cluster_name }}/performance-dashboards/dittybopper"
|
||||||
|
environment:
|
||||||
|
KUBECONFIG: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/auth/kubeconfig"
|
||||||
|
when: dittybopper_enable|bool
|
75
ocp_on_osp/ocp_bootstrap.yml
Normal file
75
ocp_on_osp/ocp_bootstrap.yml
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: true
|
||||||
|
vars_files:
|
||||||
|
- vars/shift_stack_vars.yaml
|
||||||
|
tasks:
|
||||||
|
- name: Get Binaries
|
||||||
|
block:
|
||||||
|
- name: Set url for installer
|
||||||
|
set_fact:
|
||||||
|
ocp_installer_url: https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/
|
||||||
|
when: ocp_dev_preview == false
|
||||||
|
|
||||||
|
- name: Set url for installer
|
||||||
|
set_fact:
|
||||||
|
ocp_installer_url: https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp-dev-preview/
|
||||||
|
when: ocp_dev_preview
|
||||||
|
|
||||||
|
- name: Get the latest installer
|
||||||
|
get_url:
|
||||||
|
validate_certs: no
|
||||||
|
force: true
|
||||||
|
url: "{{ ocp_installer_url }}{{ ocp_release }}/openshift-install-linux.tar.gz"
|
||||||
|
dest: /home/stack/openshift-install-linux.tar.gz
|
||||||
|
|
||||||
|
- name: Untar installer
|
||||||
|
unarchive:
|
||||||
|
src: /home/stack/openshift-install-linux.tar.gz
|
||||||
|
dest: /home/stack/
|
||||||
|
remote_src: yes
|
||||||
|
|
||||||
|
- name: Get the ocp client
|
||||||
|
get_url:
|
||||||
|
validate_certs: no
|
||||||
|
force: true
|
||||||
|
url: "{{ ocp_installer_url }}{{ ocp_release }}/openshift-client-linux.tar.gz"
|
||||||
|
dest: /home/stack/openshift-client-linux.tar.gz
|
||||||
|
|
||||||
|
- name: Untar ocp client
|
||||||
|
unarchive:
|
||||||
|
src: /home/stack/openshift-client-linux.tar.gz
|
||||||
|
dest: /home/stack/
|
||||||
|
remote_src: yes
|
||||||
|
mode: 0700
|
||||||
|
|
||||||
|
- name: Copy oc to bin
|
||||||
|
become: true
|
||||||
|
shell: |
|
||||||
|
cp /home/stack/oc /usr/local/bin
|
||||||
|
cp /home/stack/kubectl /usr/local/bin
|
||||||
|
chmod a+x /usr/local/bin/oc
|
||||||
|
chmod a+x /usr/local/bin/kubectl
|
||||||
|
# End block
|
||||||
|
|
||||||
|
- name: create flavors
|
||||||
|
include_tasks: tasks/create_flavors.yml
|
||||||
|
|
||||||
|
- name: create new overcloudrc file as ocp_venvrc, to be used while setting shiftstack quotas to unlimited
|
||||||
|
shell: |
|
||||||
|
sed 's/.*OS_COMPUTE_API_VERSION.*/export OS_COMPUTE_API_VERSION=2.79/' /home/stack/overcloudrc > /home/stack/ocp_venvrc
|
||||||
|
|
||||||
|
- name: create ansible log directory
|
||||||
|
shell: mkdir -p /home/stack/ocp_ansible_logs/
|
||||||
|
|
||||||
|
- name: flush iptables
|
||||||
|
shell: sudo iptables --flush
|
||||||
|
|
||||||
|
- name: get default route
|
||||||
|
shell: |
|
||||||
|
sudo ip r | grep default | cut -d ' ' -f5
|
||||||
|
register: default_route
|
||||||
|
|
||||||
|
- name: masquerade on public interface
|
||||||
|
shell: |
|
||||||
|
sudo iptables -t nat -A POSTROUTING -o {{ default_route.stdout }} -j MASQUERADE
|
82
ocp_on_osp/ocp_cleanup.yml
Normal file
82
ocp_on_osp/ocp_cleanup.yml
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
---
|
||||||
|
- name: Determine undercloud and allocate inventory
|
||||||
|
hosts: localhost
|
||||||
|
gather_facts: true
|
||||||
|
tasks:
|
||||||
|
- name: list openstack projects for installed clusters
|
||||||
|
shell: |
|
||||||
|
source /home/stack/overcloudrc
|
||||||
|
openstack project list -c Name -f value | grep rhocp
|
||||||
|
register: osp_project_list
|
||||||
|
|
||||||
|
- name: Add undercloud host to inventory for each cluster, that are to be deleted
|
||||||
|
add_host:
|
||||||
|
name: "undercloud-{{item}}"
|
||||||
|
group: undercloud
|
||||||
|
ansible_connection: local
|
||||||
|
ansible_python_interpreter: "{{ansible_playbook_python}}"
|
||||||
|
osp_project_name: "{{item}}"
|
||||||
|
loop: "{{ osp_project_list.stdout_lines }}"
|
||||||
|
|
||||||
|
|
||||||
|
- hosts: undercloud
|
||||||
|
gather_facts: true
|
||||||
|
vars:
|
||||||
|
ocp_cluster_name: "{{ osp_project_name }}"
|
||||||
|
osp_user_name: "{{ osp_project_name }}"
|
||||||
|
tasks:
|
||||||
|
- name: Run openshift-installer destroy cluster
|
||||||
|
shell: |
|
||||||
|
./openshift-install --log-level=debug destroy cluster --dir=/home/stack/ocp_clusters/{{osp_project_name}}/ > /home/stack/ocp_clusters/{{osp_project_name}}/ocp_destroy.log 2>&1
|
||||||
|
args:
|
||||||
|
chdir: /home/stack/
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Delete floating ips
|
||||||
|
shell: |
|
||||||
|
source /home/stack/overcloudrc
|
||||||
|
for i in $(openstack floating ip list --project {{ osp_project_name }} -c ID -f value); do openstack floating ip delete $i; done
|
||||||
|
|
||||||
|
- name: Delete security groups
|
||||||
|
shell: |
|
||||||
|
source /home/stack/overcloudrc
|
||||||
|
for i in $(openstack security group list --project {{ osp_project_name }} -c ID -f value); do openstack security group delete $i; done
|
||||||
|
|
||||||
|
- name: Delete shiftstack project
|
||||||
|
os_project:
|
||||||
|
cloud: "overcloud"
|
||||||
|
state: absent
|
||||||
|
name: "{{ osp_project_name }}"
|
||||||
|
domain: default
|
||||||
|
|
||||||
|
- name: Delete shiftstack user
|
||||||
|
os_user:
|
||||||
|
cloud: "overcloud"
|
||||||
|
state: absent
|
||||||
|
name: "{{ osp_user_name }}"
|
||||||
|
domain: default
|
||||||
|
|
||||||
|
- name: remove ocp_clusters directory
|
||||||
|
file:
|
||||||
|
path: /home/stack/ocp_clusters/{{ ocp_cluster_name }}
|
||||||
|
state: absent
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: remove ansible log files
|
||||||
|
shell: rm /home/stack/ocp_ansible_logs/*
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: Remove DNS detail in etc/hosts
|
||||||
|
blockinfile:
|
||||||
|
backup: true
|
||||||
|
path: "/etc/hosts"
|
||||||
|
marker: "# {mark} {{ ocp_cluster_name }} OCP CLUSTER MANAGED BLOCK"
|
||||||
|
become: yes
|
||||||
|
become_user: root
|
||||||
|
throttle: 1
|
||||||
|
|
||||||
|
- name: Remove cloud parameters
|
||||||
|
blockinfile:
|
||||||
|
dest: "/home/stack/clouds.yaml"
|
||||||
|
marker: "# {mark} {{ ocp_cluster_name }} OCP CLUSTER MANAGED BLOCK"
|
||||||
|
throttle: 1
|
256
ocp_on_osp/ocp_on_osp.yml
Normal file
256
ocp_on_osp/ocp_on_osp.yml
Normal file
@ -0,0 +1,256 @@
|
|||||||
|
---
|
||||||
|
- name: Create OCP cluster
|
||||||
|
hosts: localhost
|
||||||
|
gather_facts: true
|
||||||
|
vars_files:
|
||||||
|
- vars/shift_stack_vars.yaml
|
||||||
|
vars:
|
||||||
|
random_str: "{{ lookup('password', '/dev/null length=9 chars=ascii_lowercase,digits') + 'rhocp' }}"
|
||||||
|
tasks:
|
||||||
|
- name: Set facts for OCP vars
|
||||||
|
set_fact:
|
||||||
|
osp_project_name: "{{ random_str }}"
|
||||||
|
osp_user_name: "{{ random_str }}"
|
||||||
|
ocp_cluster_name: "{{ random_str }}"
|
||||||
|
|
||||||
|
- name: Fail on cluster directory existing (Safeguard to prevent creating duplicate clusters)
|
||||||
|
stat:
|
||||||
|
path: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}"
|
||||||
|
register: cluster_dir_check
|
||||||
|
failed_when: cluster_dir_check.stat.exists
|
||||||
|
|
||||||
|
- name: Create ocp_clusters directories
|
||||||
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
with_items:
|
||||||
|
- /home/stack/ocp_clusters
|
||||||
|
- "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/"
|
||||||
|
|
||||||
|
- name: create new user and project
|
||||||
|
vars:
|
||||||
|
project_name: "{{ osp_project_name }}"
|
||||||
|
user_name: "{{ osp_user_name }}"
|
||||||
|
include_tasks: tasks/create_project_and_user.yml
|
||||||
|
|
||||||
|
- name: Get shiftstack tenant id
|
||||||
|
shell: |
|
||||||
|
source /home/stack/overcloudrc
|
||||||
|
openstack project show {{ osp_project_name }} -f value -c id
|
||||||
|
register: shiftstack_id
|
||||||
|
args:
|
||||||
|
chdir: /home/stack
|
||||||
|
|
||||||
|
- name: set shiftstack quotas to unlimited
|
||||||
|
shell: |
|
||||||
|
source /home/stack/ocp_venvrc
|
||||||
|
openstack quota set --properties -1 --server-groups -1 --server-group-members -1 --ram -1 --key-pairs -1 --instances -1 --cores -1 --per-volume-gigabytes -1 --gigabytes -1 --backup-gigabytes -1 --snapshots -1 --volumes -1 --backups -1 --subnetpools -1 --ports -1 --subnets -1 --networks -1 --floating-ips -1 --secgroup-rules -1 --secgroups -1 --routers -1 --rbac-policies -1 {{ osp_project_name }}
|
||||||
|
args:
|
||||||
|
chdir: /home/stack
|
||||||
|
|
||||||
|
- name: Create shiftstackrc
|
||||||
|
shell: |
|
||||||
|
sed -e 's/OS_USERNAME=.*/OS_USERNAME={{ osp_user_name }}/' \
|
||||||
|
-e 's/OS_PROJECT_NAME=.*/OS_PROJECT_NAME={{ osp_project_name }}/' \
|
||||||
|
-e 's/OS_CLOUDNAME=.*/OS_CLOUDNAME={{ osp_project_name }}/' \
|
||||||
|
-e 's/OS_PASSWORD=.*/OS_PASSWORD=redhat/' overcloudrc > /home/stack/ocp_clusters/{{ocp_cluster_name}}/shiftstackrc
|
||||||
|
args:
|
||||||
|
chdir: /home/stack/
|
||||||
|
|
||||||
|
- name: Get cloud.yaml variables
|
||||||
|
shell: |
|
||||||
|
for key in $( set | awk '{FS="="} /^OS_/ {print $1}' ); do unset $key ; done
|
||||||
|
source /home/stack/ocp_clusters/{{ocp_cluster_name}}/shiftstackrc
|
||||||
|
echo -n " {{ osp_project_name }}: \
|
||||||
|
{'auth': \
|
||||||
|
{ 'auth_url': '$OS_AUTH_URL', \
|
||||||
|
'username': '$OS_USERNAME', \
|
||||||
|
'password': '$OS_PASSWORD', \
|
||||||
|
$(if [ -n "$OS_USER_DOMAIN_NAME" ]; then echo "'user_domain_name': '${OS_USER_DOMAIN_NAME}',"; fi) \
|
||||||
|
$(if [ -n "$OS_PROJECT_DOMAIN_NAME" ]; then echo "'project_domain_name': '${OS_PROJECT_DOMAIN_NAME}',"; fi) \
|
||||||
|
'project_name': '${OS_PROJECT_NAME:-$OS_TENANT_NAME}' \
|
||||||
|
} $(if [ -n "$OS_IDENTITY_API_VERSION" ]; then echo ",'identity_api_version': '${OS_IDENTITY_API_VERSION}'"; fi) }"
|
||||||
|
register: cloud_details
|
||||||
|
|
||||||
|
- name: Set clouds_yaml fact
|
||||||
|
set_fact:
|
||||||
|
clouds_yaml: "{{ cloud_details.stdout|from_yaml }}"
|
||||||
|
|
||||||
|
- name: Insert clouds.yaml parameters
|
||||||
|
blockinfile:
|
||||||
|
dest: "/home/stack/clouds.yaml"
|
||||||
|
block: |5
|
||||||
|
{{ clouds_yaml|to_nice_yaml(indent=4) }}
|
||||||
|
insertbefore: "EOF"
|
||||||
|
marker: "# {mark} {{ ocp_cluster_name }} OCP CLUSTER MANAGED BLOCK"
|
||||||
|
throttle: 1
|
||||||
|
|
||||||
|
- name: Read ssh key file
|
||||||
|
slurp:
|
||||||
|
src: "{{ ssh_key_file }}"
|
||||||
|
register: ssh_key_content
|
||||||
|
|
||||||
|
- name: Set pull secret
|
||||||
|
set_fact:
|
||||||
|
ocp_pull_secret: "{{ pull_secret| to_yaml }}"
|
||||||
|
when: pull_secret is defined
|
||||||
|
|
||||||
|
- name: Set pull secret from environment variable
|
||||||
|
set_fact:
|
||||||
|
ocp_pull_secret: "{{ lookup('env', 'OPENSHIFT_INSTALL_PULL_SECRET') }}"
|
||||||
|
when: pull_secret is not defined
|
||||||
|
|
||||||
|
- name: Set ssh_public_key fact
|
||||||
|
set_fact:
|
||||||
|
ssh_public_key: "{{ ssh_key_content['content'] | b64decode }}"
|
||||||
|
|
||||||
|
- name: Use floating ips with DNS
|
||||||
|
block:
|
||||||
|
- name: Determine API and Ingress floating ips
|
||||||
|
set_fact:
|
||||||
|
api_floating_ip: "{{ external_net_cidr | next_nth_usable(3) }}"
|
||||||
|
ingress_floating_ip: "{{ external_net_cidr | next_nth_usable(4) }}"
|
||||||
|
|
||||||
|
- name: Create api floating ip
|
||||||
|
shell: |
|
||||||
|
source /home/stack/overcloudrc
|
||||||
|
openstack floating ip create --project {{ shiftstack_id.stdout }} --floating-ip-address {{ api_floating_ip }} {{ public_net_name }}
|
||||||
|
|
||||||
|
- name: Create ingress floating ip
|
||||||
|
shell: |
|
||||||
|
source /home/stack/overcloudrc
|
||||||
|
openstack floating ip create --project {{ shiftstack_id.stdout }} --floating-ip-address {{ ingress_floating_ip }} {{ public_net_name }}
|
||||||
|
when: random_fip == false
|
||||||
|
|
||||||
|
- name: Use random floating ips
|
||||||
|
block:
|
||||||
|
- name: Create random api floating ip
|
||||||
|
shell: |
|
||||||
|
source /home/stack/overcloudrc
|
||||||
|
openstack floating ip create --project {{ shiftstack_id.stdout }} {{ public_net_name }} -f value -c floating_ip_address
|
||||||
|
register: api_fip
|
||||||
|
|
||||||
|
- name: Create random ingress floating ip
|
||||||
|
shell: |
|
||||||
|
source /home/stack/overcloudrc
|
||||||
|
openstack floating ip create --project {{ shiftstack_id.stdout }} {{ public_net_name }} -f value -c floating_ip_address
|
||||||
|
register: ingress_fip
|
||||||
|
|
||||||
|
- name: Set floating ips for install-config.yaml
|
||||||
|
set_fact:
|
||||||
|
api_floating_ip: "{{ api_fip.stdout }}"
|
||||||
|
ingress_floating_ip: "{{ ingress_fip.stdout }}"
|
||||||
|
when: random_fip == true
|
||||||
|
|
||||||
|
- name: Add DNS detail in /etc/hosts
|
||||||
|
blockinfile:
|
||||||
|
path: "/etc/hosts"
|
||||||
|
backup: true
|
||||||
|
block: |
|
||||||
|
{{ api_floating_ip }} api.{{ ocp_cluster_name }}.{{ ocp_base_domain }}
|
||||||
|
{{ ingress_floating_ip }} oauth-openshift.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}
|
||||||
|
{{ ingress_floating_ip }} console-openshift-console.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}
|
||||||
|
{{ ingress_floating_ip }} downloads-openshift-console.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}
|
||||||
|
{{ ingress_floating_ip }} alertmanager-main-openshift-monitoring.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}
|
||||||
|
{{ ingress_floating_ip }} grafana-openshift-monitoring.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}
|
||||||
|
{{ ingress_floating_ip }} prometheus-k8s-openshift-monitoring.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}
|
||||||
|
{{ ingress_floating_ip }} thanos-querier-openshift-monitoring.apps.{{ ocp_cluster_name }}.{{ ocp_base_domain }}
|
||||||
|
insertafter: "EOF"
|
||||||
|
marker: "# {mark} {{ ocp_cluster_name }} OCP CLUSTER MANAGED BLOCK"
|
||||||
|
become: yes
|
||||||
|
become_user: root
|
||||||
|
throttle: 1
|
||||||
|
when: random_fip == true
|
||||||
|
|
||||||
|
- name: Prepare install-config.yaml
|
||||||
|
template:
|
||||||
|
src: "{{ playbook_dir }}/templates/install-config.yaml.j2"
|
||||||
|
dest: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/install-config.yaml"
|
||||||
|
|
||||||
|
- name: Backup the install-config.yaml
|
||||||
|
copy:
|
||||||
|
src: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/install-config.yaml"
|
||||||
|
dest: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/install-config.yaml.orig"
|
||||||
|
remote_src: yes
|
||||||
|
|
||||||
|
- name: Create ignition-configs with openshift-installer
|
||||||
|
shell: |
|
||||||
|
./openshift-install --log-level=debug create ignition-configs --dir=/home/stack/ocp_clusters/{{ ocp_cluster_name }}/ > /home/stack/ocp_clusters/{{ ocp_cluster_name }}/create_ignition_configs.log 2>&1
|
||||||
|
args:
|
||||||
|
chdir: /home/stack/
|
||||||
|
|
||||||
|
- name: Backup the ignition-configs
|
||||||
|
copy:
|
||||||
|
src: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/{{ item }}.ign"
|
||||||
|
dest: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/{{ item }}.ign.orig"
|
||||||
|
remote_src: yes
|
||||||
|
with_items:
|
||||||
|
- master
|
||||||
|
- worker
|
||||||
|
- bootstrap
|
||||||
|
|
||||||
|
- name: Read original master.ign config file
|
||||||
|
slurp:
|
||||||
|
src: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/master.ign.orig"
|
||||||
|
register: master_ign
|
||||||
|
when: ocp_master_etcd_nvme
|
||||||
|
|
||||||
|
- name: Get master etcd on nvme ignition configuration
|
||||||
|
set_fact:
|
||||||
|
master_ign_config_base: "{{ master_ign['content'] | b64decode }}"
|
||||||
|
master_etcd_nvme_config: "{{ lookup('template', 'master-etcd.ign.json.j2') }}"
|
||||||
|
when: ocp_master_etcd_nvme
|
||||||
|
|
||||||
|
- name: Dump new version of master.ign file
|
||||||
|
copy:
|
||||||
|
content: "{{ master_ign_config_base | combine(master_etcd_nvme_config) | to_json }}"
|
||||||
|
dest: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/{{ item }}"
|
||||||
|
with_items:
|
||||||
|
- master.ign
|
||||||
|
- master.ign.etcd
|
||||||
|
when: ocp_master_etcd_nvme
|
||||||
|
|
||||||
|
- name: Read original worker.ign config file
|
||||||
|
slurp:
|
||||||
|
src: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/worker.ign.orig"
|
||||||
|
register: worker_ign
|
||||||
|
when: worker_nvme_ephemeral
|
||||||
|
|
||||||
|
- name: Get worker nvme ephemeral storage ignition configuration
|
||||||
|
set_fact:
|
||||||
|
worker_ign_config_base: "{{ worker_ign['content'] | b64decode }}"
|
||||||
|
worker_nvme_config: "{{ lookup('template', 'worker-nvme.ign.json.j2') }}"
|
||||||
|
when: worker_nvme_ephemeral
|
||||||
|
|
||||||
|
- name: Dump new version of worker.ign file
|
||||||
|
copy:
|
||||||
|
content: "{{ worker_ign_config_base | combine(worker_nvme_config) | to_json }}"
|
||||||
|
dest: "/home/stack/ocp_clusters/{{ ocp_cluster_name }}/{{ item }}"
|
||||||
|
with_items:
|
||||||
|
- worker.ign
|
||||||
|
- worker.ign.nvme
|
||||||
|
when: worker_nvme_ephemeral
|
||||||
|
|
||||||
|
- name: Run openshift-installer
|
||||||
|
shell: |
|
||||||
|
./openshift-install --log-level=debug create cluster --dir=/home/stack/ocp_clusters/{{ ocp_cluster_name }}/ > /home/stack/ocp_clusters/{{ ocp_cluster_name }}/ocp_install.log 2>&1
|
||||||
|
args:
|
||||||
|
chdir: /home/stack/
|
||||||
|
|
||||||
|
- name: create .kube dir on home
|
||||||
|
file:
|
||||||
|
path: /home/stack/.kube
|
||||||
|
state: directory
|
||||||
|
mode: a+rwx
|
||||||
|
when: random_fip == false
|
||||||
|
|
||||||
|
- name: copy the kubeconfig file
|
||||||
|
copy:
|
||||||
|
src: /home/stack/ocp_clusters/{{ ocp_cluster_name }}/auth/kubeconfig
|
||||||
|
dest: /home/stack/.kube/config
|
||||||
|
mode: a+rx
|
||||||
|
remote_src: yes
|
||||||
|
when: random_fip == false
|
||||||
|
|
||||||
|
- import_playbook: create_ocp_infra_nodes.yml
|
||||||
|
when: (ocp_create_infra_nodes|bool == true or ocp_create_workload_nodes|bool == true)
|
25
ocp_on_osp/tasks/create_flavors.yml
Normal file
25
ocp_on_osp/tasks/create_flavors.yml
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
- name: import flavors data
|
||||||
|
include_vars:
|
||||||
|
dir: "{{ playbook_dir }}/vars"
|
||||||
|
files_matching: flavors.yaml
|
||||||
|
- name: create flavors
|
||||||
|
os_nova_flavor:
|
||||||
|
cloud: "overcloud"
|
||||||
|
state: present
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
ram: "{{ item.ram }}"
|
||||||
|
vcpus: "{{ item.vcpus }}"
|
||||||
|
disk: "{{ item.disk }}"
|
||||||
|
with_items: "{{ flavors }}"
|
||||||
|
|
||||||
|
- name: create pci flavors
|
||||||
|
os_nova_flavor:
|
||||||
|
cloud: "overcloud"
|
||||||
|
state: present
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
ram: "{{ item.ram }}"
|
||||||
|
vcpus: "{{ item.vcpus }}"
|
||||||
|
disk: "{{ item.disk }}"
|
||||||
|
extra_specs: "'pci_passthrough:alias'='nvme:1'"
|
||||||
|
with_items: "{{ nvme_flavors }}"
|
39
ocp_on_osp/tasks/create_project_and_user.yml
Normal file
39
ocp_on_osp/tasks/create_project_and_user.yml
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
- name: create shiftstack project
|
||||||
|
os_project:
|
||||||
|
cloud: "overcloud"
|
||||||
|
state: present
|
||||||
|
name: "{{ project_name }}"
|
||||||
|
domain: default
|
||||||
|
|
||||||
|
- name: create user
|
||||||
|
os_user:
|
||||||
|
cloud: "overcloud"
|
||||||
|
state: present
|
||||||
|
name: "{{ user_name }}"
|
||||||
|
password: "redhat"
|
||||||
|
domain: default
|
||||||
|
|
||||||
|
- name: add member role
|
||||||
|
os_user_role:
|
||||||
|
cloud: "overcloud"
|
||||||
|
state: present
|
||||||
|
project: "{{ project_name }}"
|
||||||
|
user: "{{ user_name }}"
|
||||||
|
role: "member"
|
||||||
|
|
||||||
|
- name: add admin role
|
||||||
|
os_user_role:
|
||||||
|
cloud: "overcloud"
|
||||||
|
state: present
|
||||||
|
project: "{{ project_name }}"
|
||||||
|
user: "{{ user_name }}"
|
||||||
|
role: "admin"
|
||||||
|
|
||||||
|
- name: add swiftoperator role
|
||||||
|
os_user_role:
|
||||||
|
cloud: "overcloud"
|
||||||
|
state: present
|
||||||
|
project: "{{ project_name }}"
|
||||||
|
user: "{{ user_name }}"
|
||||||
|
role: "swiftoperator"
|
51
ocp_on_osp/templates/cluster-monitoring-config.yml.j2
Normal file
51
ocp_on_osp/templates/cluster-monitoring-config.yml.j2
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
data:
|
||||||
|
config.yaml: |
|
||||||
|
prometheusOperator:
|
||||||
|
baseImage: quay.io/coreos/prometheus-operator
|
||||||
|
prometheusConfigReloaderBaseImage: quay.io/coreos/prometheus-config-reloader
|
||||||
|
configReloaderBaseImage: quay.io/coreos/configmap-reload
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/infra: ""
|
||||||
|
prometheusK8s:
|
||||||
|
retention: {{ openshift_prometheus_retention_period }}
|
||||||
|
baseImage: openshift/prometheus
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/infra: ""
|
||||||
|
volumeClaimTemplate:
|
||||||
|
spec:
|
||||||
|
storageClassName: {{ openshift_prometheus_storage_class }}
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: {{ openshift_prometheus_storage_size }}
|
||||||
|
alertmanagerMain:
|
||||||
|
baseImage: openshift/prometheus-alertmanager
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/infra: ""
|
||||||
|
volumeClaimTemplate:
|
||||||
|
spec:
|
||||||
|
storageClassName: {{ openshift_alertmanager_storage_class }}
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: {{ openshift_alertmanager_storage_size }}
|
||||||
|
nodeExporter:
|
||||||
|
baseImage: openshift/prometheus-node-exporter
|
||||||
|
kubeRbacProxy:
|
||||||
|
baseImage: quay.io/coreos/kube-rbac-proxy
|
||||||
|
kubeStateMetrics:
|
||||||
|
baseImage: quay.io/coreos/kube-state-metrics
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/infra: ""
|
||||||
|
grafana:
|
||||||
|
baseImage: grafana/grafana
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/infra: ""
|
||||||
|
auth:
|
||||||
|
baseImage: openshift/oauth-proxy
|
||||||
|
k8sPrometheusAdapter:
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/infra: ""
|
||||||
|
metadata:
|
||||||
|
name: cluster-monitoring-config
|
||||||
|
namespace: openshift-monitoring
|
51
ocp_on_osp/templates/install-config.yaml.j2
Normal file
51
ocp_on_osp/templates/install-config.yaml.j2
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
#jinja2: trim_blocks:True, lstrip_blocks:True
|
||||||
|
apiVersion: v1
|
||||||
|
baseDomain: "{{ ocp_base_domain }}"
|
||||||
|
compute:
|
||||||
|
- name: worker
|
||||||
|
replicas: {{ worker_nodes }}
|
||||||
|
{% if ocp_worker_flavor != "" %}
|
||||||
|
platform:
|
||||||
|
openstack:
|
||||||
|
type: {{ ocp_worker_flavor }}
|
||||||
|
{% else %}
|
||||||
|
platform: {}
|
||||||
|
{% endif %}
|
||||||
|
controlPlane:
|
||||||
|
name: master
|
||||||
|
replicas: {{ master_nodes }}
|
||||||
|
{% if ocp_master_flavor != "" %}
|
||||||
|
platform:
|
||||||
|
openstack:
|
||||||
|
type: {{ ocp_master_flavor }}
|
||||||
|
{% else %}
|
||||||
|
platform: {}
|
||||||
|
{% endif %}
|
||||||
|
metadata:
|
||||||
|
name: "{{ ocp_cluster_name }}"
|
||||||
|
networking:
|
||||||
|
clusterNetwork:
|
||||||
|
- cidr: 10.128.0.0/13
|
||||||
|
hostPrefix: 22
|
||||||
|
serviceCIDR: 172.30.0.0/16
|
||||||
|
machineCIDR: 10.0.0.0/16
|
||||||
|
type: "{{ ocp_network_type }}"
|
||||||
|
platform:
|
||||||
|
openstack:
|
||||||
|
{% if ocp_release[:3]|float >= 4.7 %}
|
||||||
|
defaultMachinePlatform:
|
||||||
|
type: {{ ocp_worker_flavor }}
|
||||||
|
{% else %}
|
||||||
|
computeFlavor: "m4.xlarge"
|
||||||
|
{% endif %}
|
||||||
|
cloud: "{{ osp_project_name }}"
|
||||||
|
externalNetwork: "{{ public_net_name }}"
|
||||||
|
region: "regionOne"
|
||||||
|
lbFloatingIP: "{{ api_floating_ip }}"
|
||||||
|
ingressFloatingIP: "{{ ingress_floating_ip }}"
|
||||||
|
externalDNS: ["{{ ansible_dns.nameservers| join('", "') }}"]
|
||||||
|
octaviaSupport: "1"
|
||||||
|
trunkSupport: "1"
|
||||||
|
pullSecret: {{ ocp_pull_secret }}
|
||||||
|
sshKey: |
|
||||||
|
{{ ssh_public_key }}
|
66
ocp_on_osp/templates/osp-infra-node-machineset.yml.j2
Normal file
66
ocp_on_osp/templates/osp-infra-node-machineset.yml.j2
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
items:
|
||||||
|
- apiVersion: machine.openshift.io/v1beta1
|
||||||
|
kind: MachineSet
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
labels:
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-cluster: {{ cluster_name.stdout }}
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-machine-role: infra
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-machine-type: infra
|
||||||
|
name: {{ cluster_name.stdout }}-infra
|
||||||
|
namespace: openshift-machine-api
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-cluster: {{ cluster_name.stdout }}
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-machineset: {{ cluster_name.stdout }}-infra
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
labels:
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-cluster: {{ cluster_name.stdout }}
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-machine-role: infra
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-machine-type: infra
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-machineset: {{ cluster_name.stdout }}-infra
|
||||||
|
spec:
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
labels:
|
||||||
|
node-role.kubernetes.io/infra: ""
|
||||||
|
providerSpec:
|
||||||
|
value:
|
||||||
|
apiVersion: openstackproviderconfig.openshift.io/v1alpha1
|
||||||
|
cloudName: openstack
|
||||||
|
cloudsSecret:
|
||||||
|
name: openstack-cloud-credentials
|
||||||
|
namespace: openshift-machine-api
|
||||||
|
flavor: "{{ openshift_infra_node_flavor }}"
|
||||||
|
image: {{ cluster_name.stdout }}-rhcos
|
||||||
|
kind: OpenstackProviderSpec
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
networks:
|
||||||
|
- filter: {}
|
||||||
|
subnets:
|
||||||
|
- filter:
|
||||||
|
name: {{ cluster_name.stdout }}-nodes
|
||||||
|
tags: openshiftClusterID={{ cluster_name.stdout }}
|
||||||
|
securityGroups:
|
||||||
|
- filter: {}
|
||||||
|
name: {{ cluster_name.stdout }}-worker
|
||||||
|
serverMetadata:
|
||||||
|
Name: {{ cluster_name.stdout }}-worker
|
||||||
|
openshiftClusterID: {{ cluster_name.stdout }}
|
||||||
|
tags:
|
||||||
|
- openshiftClusterID={{ cluster_name.stdout }}
|
||||||
|
trunk: true
|
||||||
|
userDataSecret:
|
||||||
|
name: worker-user-data
|
||||||
|
versions:
|
||||||
|
kubelet: ""
|
||||||
|
status:
|
||||||
|
replicas: 0
|
||||||
|
kind: List
|
||||||
|
metadata: {}
|
66
ocp_on_osp/templates/osp-workload-node-machineset.yml.j2
Normal file
66
ocp_on_osp/templates/osp-workload-node-machineset.yml.j2
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
items:
|
||||||
|
- apiVersion: machine.openshift.io/v1beta1
|
||||||
|
kind: MachineSet
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
labels:
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-cluster: {{ cluster_name.stdout }}
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-machine-role: workload
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-machine-type: workload
|
||||||
|
name: {{ cluster_name.stdout }}-workload
|
||||||
|
namespace: openshift-machine-api
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-cluster: {{ cluster_name.stdout }}
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-machineset: {{ cluster_name.stdout }}-workload
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
labels:
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-cluster: {{ cluster_name.stdout }}
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-machine-role: workload
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-machine-type: workload
|
||||||
|
{{ machineset_metadata_label_prefix }}/cluster-api-machineset: {{ cluster_name.stdout }}-workload
|
||||||
|
spec:
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
labels:
|
||||||
|
node-role.kubernetes.io/workload: ""
|
||||||
|
providerSpec:
|
||||||
|
value:
|
||||||
|
apiVersion: openstackproviderconfig.openshift.io/v1alpha1
|
||||||
|
cloudName: openstack
|
||||||
|
cloudsSecret:
|
||||||
|
name: openstack-cloud-credentials
|
||||||
|
namespace: openshift-machine-api
|
||||||
|
flavor: "{{ openshift_workload_node_flavor }}"
|
||||||
|
image: {{ cluster_name.stdout }}-rhcos
|
||||||
|
kind: OpenstackProviderSpec
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
networks:
|
||||||
|
- filter: {}
|
||||||
|
subnets:
|
||||||
|
- filter:
|
||||||
|
name: {{ cluster_name.stdout }}-nodes
|
||||||
|
tags: openshiftClusterID={{ cluster_name.stdout }}
|
||||||
|
securityGroups:
|
||||||
|
- filter: {}
|
||||||
|
name: {{ cluster_name.stdout }}-worker
|
||||||
|
serverMetadata:
|
||||||
|
Name: {{ cluster_name.stdout }}-worker
|
||||||
|
openshiftClusterID: {{ cluster_name.stdout }}
|
||||||
|
tags:
|
||||||
|
- openshiftClusterID={{ cluster_name.stdout }}
|
||||||
|
trunk: true
|
||||||
|
userDataSecret:
|
||||||
|
name: worker-user-data
|
||||||
|
versions:
|
||||||
|
kubelet: ""
|
||||||
|
status:
|
||||||
|
replicas: 0
|
||||||
|
kind: List
|
||||||
|
metadata: {}
|
15
ocp_on_osp/vars/flavors.yaml
Normal file
15
ocp_on_osp/vars/flavors.yaml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
flavors:
|
||||||
|
- { name: 'm4.xlarge', ram: 16384, vcpus: 4, disk: 40 }
|
||||||
|
- { name: 'm1.small', ram: 1740, vcpus: 1, disk: 71 }
|
||||||
|
- { name: 'm5.xlarge', ram: 16384, vcpus: 4, disk: 96 }
|
||||||
|
- { name: 'm5.4xlarge', ram: 65280, vcpus: 16, disk: 300 }
|
||||||
|
- { name: 'm5.2xlarge', ram: 31488, vcpus: 8, disk: 128 }
|
||||||
|
- { name: 'm5.large', ram: 7936, vcpus: 2, disk: 96 }
|
||||||
|
- { name: 'ci.master', ram: 124672, vcpus: 16, disk: 220 }
|
||||||
|
- { name: 'ci.worker', ram: 31488, vcpus: 8, disk: 100 }
|
||||||
|
- { name: 'ci.infra', ram: 124672, vcpus: 24, disk: 100 }
|
||||||
|
- { name: 'ci.workload', ram: 65280, vcpus: 16, disk: 300 }
|
||||||
|
nvme_flavors:
|
||||||
|
- { name: 'r5.4xlarge-pci', ram: 124672, vcpus: 16, disk: 128 }
|
||||||
|
- { name: 'm5.10xlarge-pci', ram: 163584, vcpus: 40, disk: 256 }
|
||||||
|
- { name: 'm5.4xlarge-pci', ram: 65280, vcpus: 16, disk: 200 }
|
45
ocp_on_osp/vars/shift_stack_vars.yaml
Normal file
45
ocp_on_osp/vars/shift_stack_vars.yaml
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
ocp_release: 4.10.18
|
||||||
|
ocp_dev_preview: false
|
||||||
|
install_client: true
|
||||||
|
ocp_base_domain: rdu2.scalelab.redhat.com
|
||||||
|
|
||||||
|
public_net_name: public
|
||||||
|
|
||||||
|
# by enabling random_ip, the ocp will choose randomly availble fips
|
||||||
|
# for api and apps for ocp deployment.
|
||||||
|
random_fip: true
|
||||||
|
|
||||||
|
ocp_master_flavor: m4.xlarge
|
||||||
|
ocp_worker_flavor: m4.xlarge
|
||||||
|
ocp_cluster_id: "{{ ansible_date_time.epoch | to_uuid }}"
|
||||||
|
master_nodes: 3
|
||||||
|
worker_nodes: 3
|
||||||
|
# specify the openshift SDN name, OpenShiftSDN or Kuryr
|
||||||
|
ocp_network_type: OpenShiftSDN
|
||||||
|
|
||||||
|
# if the master has a passthrough nvme, set to true to use this device for etcd
|
||||||
|
ocp_master_etcd_nvme: false
|
||||||
|
worker_nvme_ephemeral: false
|
||||||
|
worker_nvme_var_lib_size: 512000
|
||||||
|
worker_nvme_localstorage_tp_lv_count: 5
|
||||||
|
worker_nvme_localstorage_tp_lv_size: 90G
|
||||||
|
passthrough_nvme_device: /dev/nvme0n1
|
||||||
|
|
||||||
|
ssh_key_file: '/home/stack/.ssh/id_rsa.pub'
|
||||||
|
|
||||||
|
# when pull secret is not enabled it will try to read from the value
|
||||||
|
# from OPENSHIFT_INSTALL_PULL_SECRET env variable
|
||||||
|
pull_secret:
|
||||||
|
|
||||||
|
ocp_create_infra_nodes: true
|
||||||
|
ocp_create_workload_nodes: true
|
||||||
|
ocp_post_install_poll_attempts: 50
|
||||||
|
machineset_metadata_label_prefix: machine.openshift.io
|
||||||
|
openshift_alertmanager_storage_size: 2Gi
|
||||||
|
openshift_alertmanager_storage_class: standard
|
||||||
|
openshift_prometheus_storage_size: 10Gi
|
||||||
|
openshift_prometheus_storage_class: standard
|
||||||
|
openshift_prometheus_retention_period: 15d
|
||||||
|
openshift_workload_node_flavor: m4.xlarge
|
||||||
|
openshift_infra_node_flavor: m4.xlarge
|
||||||
|
dittybopper_enable: true
|
@ -43,9 +43,11 @@ Provider network:
|
|||||||
- provider_net_nova_boot_ping: Boots a VM and ping on random existing provider network
|
- provider_net_nova_boot_ping: Boots a VM and ping on random existing provider network
|
||||||
- provider_net_nova_delete: Delete all VM's and provider network
|
- provider_net_nova_delete: Delete all VM's and provider network
|
||||||
|
|
||||||
Shift on Stack:
|
Ocp on Osp:
|
||||||
|
|
||||||
- shift_on_stack: Runs specified kube-burner workload through e2e-benchmarking. e2e-benchmarking
|
- ocp_on_osp: Installs Openshift cluster on Openstack. Basically this workload uses an ansible-playbook
|
||||||
|
which triggers an Openshift Installer-provisioned installation.
|
||||||
|
- e2e_kube_burner: Runs specified kube-burner workload through e2e-benchmarking. e2e-benchmarking
|
||||||
is a repository that contains scripts to stress Openshift clusters. This workload uses e2e-benchmarking
|
is a repository that contains scripts to stress Openshift clusters. This workload uses e2e-benchmarking
|
||||||
to test Openshift on Openstack.
|
to test Openshift on Openstack.
|
||||||
|
|
||||||
@ -140,6 +142,7 @@ Provider Network:
|
|||||||
- provider_net_nova_boot_ping: Boots a VM and ping on random existing provider network
|
- provider_net_nova_boot_ping: Boots a VM and ping on random existing provider network
|
||||||
- provider_net_nova_delete: Delete all VM's and provider network
|
- provider_net_nova_delete: Delete all VM's and provider network
|
||||||
|
|
||||||
Shift on Stack:
|
Ocp on Osp:
|
||||||
|
|
||||||
|
- install_ocp_cluster: Starts an ansible-playbook which triggers an Openshift Installer-provisioned installation.
|
||||||
- run_kube_burner_workload: Run kube-burner workloads through e2e-benchmarking
|
- run_kube_burner_workload: Run kube-burner workloads through e2e-benchmarking
|
||||||
|
@ -20,7 +20,7 @@ import vm
|
|||||||
import trunk
|
import trunk
|
||||||
import octavia
|
import octavia
|
||||||
import provider_network
|
import provider_network
|
||||||
import shift_on_stack
|
import ocp_on_osp
|
||||||
|
|
||||||
|
|
||||||
@types.convert(octavia_image={"type": "glance_image"}, octavia_flavor={"type": "nova_flavor"})
|
@types.convert(octavia_image={"type": "glance_image"}, octavia_flavor={"type": "nova_flavor"})
|
||||||
@ -48,7 +48,7 @@ import shift_on_stack
|
|||||||
)
|
)
|
||||||
class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
|
class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
|
||||||
octavia.DynamicOctaviaBase, provider_network.DynamicProviderNetworkBase,
|
octavia.DynamicOctaviaBase, provider_network.DynamicProviderNetworkBase,
|
||||||
shift_on_stack.ShiftStackDynamicScenario):
|
ocp_on_osp.OcpOnOspDynamicScenario):
|
||||||
def run(
|
def run(
|
||||||
self, smallest_image, smallest_flavor, ext_net_id, num_vms_to_create_with_fip,
|
self, smallest_image, smallest_flavor, ext_net_id, num_vms_to_create_with_fip,
|
||||||
num_vms_to_migrate, num_stop_start_vms, trunk_image, trunk_flavor, num_initial_subports,
|
num_vms_to_migrate, num_stop_start_vms, trunk_image, trunk_flavor, num_initial_subports,
|
||||||
@ -56,8 +56,8 @@ class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
|
|||||||
num_delete_subports_trunks, octavia_image, octavia_flavor, user, user_data_file, num_lbs,
|
num_delete_subports_trunks, octavia_image, octavia_flavor, user, user_data_file, num_lbs,
|
||||||
num_pools, num_clients, delete_num_lbs, delete_num_members, num_create_vms, num_delete_vms,
|
num_pools, num_clients, delete_num_lbs, delete_num_members, num_create_vms, num_delete_vms,
|
||||||
provider_phys_net, iface_name, iface_mac, num_vms_provider_net, num_external_networks,
|
provider_phys_net, iface_name, iface_mac, num_vms_provider_net, num_external_networks,
|
||||||
shift_on_stack_job_iterations, shift_on_stack_qps, shift_on_stack_burst,
|
e2e_kube_burner_job_iterations, e2e_kube_burner_qps, e2e_kube_burner_burst,
|
||||||
shift_on_stack_workload, shift_on_stack_kubeconfig_paths, workloads="all",
|
e2e_kube_burner_workload, ocp_kubeconfig_paths, workloads="all",
|
||||||
router_create_args=None, network_create_args=None,
|
router_create_args=None, network_create_args=None,
|
||||||
subnet_create_args=None, **kwargs):
|
subnet_create_args=None, **kwargs):
|
||||||
|
|
||||||
@ -160,11 +160,14 @@ class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
|
|||||||
if "provider_net_nova_delete" in workloads_list:
|
if "provider_net_nova_delete" in workloads_list:
|
||||||
self.provider_net_nova_delete(provider_phys_net)
|
self.provider_net_nova_delete(provider_phys_net)
|
||||||
|
|
||||||
if "shift_on_stack" in workloads_list:
|
if "e2e_kube_burner" in workloads_list:
|
||||||
num_openshift_clusters = len(shift_on_stack_kubeconfig_paths)
|
num_openshift_clusters = len(ocp_kubeconfig_paths)
|
||||||
self.run_kube_burner_workload(shift_on_stack_workload,
|
self.run_kube_burner_workload(e2e_kube_burner_workload,
|
||||||
shift_on_stack_job_iterations,
|
e2e_kube_burner_job_iterations,
|
||||||
shift_on_stack_qps, shift_on_stack_burst,
|
e2e_kube_burner_qps, e2e_kube_burner_burst,
|
||||||
shift_on_stack_kubeconfig_paths[
|
ocp_kubeconfig_paths[
|
||||||
((self.context["iteration"] - 1)
|
((self.context["iteration"] - 1)
|
||||||
% num_openshift_clusters)])
|
% num_openshift_clusters)])
|
||||||
|
|
||||||
|
if "ocp_on_osp" in workloads_list:
|
||||||
|
self.install_ocp_cluster()
|
||||||
|
@ -23,11 +23,11 @@
|
|||||||
{% set num_add_subports = num_add_subports or 1 %}
|
{% set num_add_subports = num_add_subports or 1 %}
|
||||||
{% set num_delete_subports_trunks = num_delete_subports_trunks or 1 %}
|
{% set num_delete_subports_trunks = num_delete_subports_trunks or 1 %}
|
||||||
{% set num_delete_subports = num_delete_subports or 1 %}
|
{% set num_delete_subports = num_delete_subports or 1 %}
|
||||||
{% set shift_on_stack_job_iterations = shift_on_stack_job_iterations or 100 %}
|
{% set e2e_kube_burner_job_iterations = e2e_kube_burner_job_iterations or 100 %}
|
||||||
{% set shift_on_stack_qps = shift_on_stack_qps or 20 %}
|
{% set e2e_kube_burner_qps = e2e_kube_burner_qps or 20 %}
|
||||||
{% set shift_on_stack_burst = shift_on_stack_burst or 20 %}
|
{% set e2e_kube_burner_burst = e2e_kube_burner_burst or 20 %}
|
||||||
{% set shift_on_stack_workload = shift_on_stack_workload or 'poddensity' %}
|
{% set e2e_kube_burner_workload = e2e_kube_burner_workload or 'poddensity' %}
|
||||||
{% set shift_on_stack_kubeconfig_paths = shift_on_stack_kubeconfig_paths or ['/home/stack/.kube/config'] %}
|
{% set ocp_kubeconfig_paths = ocp_kubeconfig_paths or ['/home/stack/.kube/config'] %}
|
||||||
{% set cidr_prefix = cidr_prefix or '172.31' %}
|
{% set cidr_prefix = cidr_prefix or '172.31' %}
|
||||||
{% set num_external_networks = num_external_networks or 16 %}
|
{% set num_external_networks = num_external_networks or 16 %}
|
||||||
{% set router_external = router_external or True %}
|
{% set router_external = router_external or True %}
|
||||||
@ -72,11 +72,11 @@ BrowbeatPlugin.dynamic_workload:
|
|||||||
num_vms_to_create_with_fip: {{num_vms_to_create_with_fip}}
|
num_vms_to_create_with_fip: {{num_vms_to_create_with_fip}}
|
||||||
num_vms_to_migrate: {{num_vms_to_migrate}}
|
num_vms_to_migrate: {{num_vms_to_migrate}}
|
||||||
num_stop_start_vms: {{num_stop_start_vms}}
|
num_stop_start_vms: {{num_stop_start_vms}}
|
||||||
shift_on_stack_job_iterations: {{shift_on_stack_job_iterations}}
|
e2e_kube_burner_job_iterations: {{e2e_kube_burner_job_iterations}}
|
||||||
shift_on_stack_qps: {{shift_on_stack_qps}}
|
e2e_kube_burner_qps: {{e2e_kube_burner_qps}}
|
||||||
shift_on_stack_burst: {{shift_on_stack_burst}}
|
e2e_kube_burner_burst: {{e2e_kube_burner_burst}}
|
||||||
shift_on_stack_workload: '{{shift_on_stack_workload}}'
|
e2e_kube_burner_workload: '{{e2e_kube_burner_workload}}'
|
||||||
shift_on_stack_kubeconfig_paths: {{shift_on_stack_kubeconfig_paths}}
|
ocp_kubeconfig_paths: {{ocp_kubeconfig_paths}}
|
||||||
provider_phys_net: '{{ provider_phys_net }}'
|
provider_phys_net: '{{ provider_phys_net }}'
|
||||||
iface_name: '{{ iface_name }}'
|
iface_name: '{{ iface_name }}'
|
||||||
iface_mac: '{{ iface_mac }}'
|
iface_mac: '{{ iface_mac }}'
|
||||||
|
@ -10,15 +10,29 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
import dynamic_utils
|
from rally.task import atomic
|
||||||
|
|
||||||
|
|
||||||
class ShiftStackDynamicScenario(dynamic_utils.NovaUtils,
|
class OcpOnOspDynamicScenario():
|
||||||
dynamic_utils.NeutronUtils,
|
def install_ocp_cluster(self):
|
||||||
dynamic_utils.LockUtils):
|
"""Installs openshift cluster on openstack"""
|
||||||
|
|
||||||
|
ansible_log_file = "ocp_ansible_iter_{}.log".format(self.context["iteration"])
|
||||||
|
cmd = ("ansible-playbook -vvv /home/stack/browbeat/ocp_on_osp/ocp_on_osp.yml &> "
|
||||||
|
"/home/stack/ocp_ansible_logs/{}").format(ansible_log_file)
|
||||||
|
|
||||||
|
aname = "ocp_on_osp"
|
||||||
|
with atomic.ActionTimer(self, aname):
|
||||||
|
proc = subprocess.run(cmd, shell=True)
|
||||||
|
|
||||||
|
msg = "openshift cluster installation has failed"
|
||||||
|
if proc.returncode != 0:
|
||||||
|
self.assertTrue(False, err_msg=msg)
|
||||||
|
|
||||||
def run_kube_burner_workload(self, workload, job_iterations, qps, burst, kubeconfig):
|
def run_kube_burner_workload(self, workload, job_iterations, qps, burst, kubeconfig):
|
||||||
"""Run kube-burner workloads through e2e-benchmarking
|
"""Run kube-burner workloads through e2e-benchmarking
|
||||||
:param workload: str, kube-burner workload to run
|
:param workload: str, kube-burner workload to run
|
Loading…
x
Reference in New Issue
Block a user