Zuul v3 cross repo dependency and gate entrypoint

This patch set performs the ground work to use the zuul v3 ansible
scripts from openstack-helm-infra in the main openstack-helm repo.

Co-Authored-By: Pete Birley <pete@port.direct>
Co-Authored-By: Tin Lam <tin@irrational.io>

Depends-On: I376da8940ed085b7575dd528ec4082f42da1748c

Change-Id: I692c7c3a5102e69ad1fb271f73fea223642deb62
This commit is contained in:
Tin Lam 2017-10-30 16:04:09 -05:00
parent 45179ee6ba
commit 5ae0bff97c
8 changed files with 199 additions and 21 deletions

62
.zuul.yaml Normal file
View File

@ -0,0 +1,62 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- project:
name: openstack/openstack-helm
check:
jobs:
- openstack-helm-linter:
voting: true
- openstack-helm-legacy-ubuntu-vm-ovs-radosgw:
voting: false
- openstack-helm-legacy-ubuntu-vm-lb-radosgw:
voting: false
gate:
jobs:
- openstack-helm-linter
- job:
name: openstack-helm-linter
run: tools/gate/playbooks/zuul-linter.yaml
nodeset: openstack-helm-single-node
- job:
timeout: 7200
vars:
zuul_osh_infra_relative_path: ../openstack-helm-infra/
name: openstack-helm-legacy
pre-run:
- ../openstack-helm-infra/tools/gate/playbooks/osh-infra-deploy-docker.yaml
- ../openstack-helm-infra/tools/gate/playbooks/osh-infra-build.yaml
- ../openstack-helm-infra/tools/gate/playbooks/osh-infra-deploy-k8s.yaml
run: tools/gate/playbooks/legacy-gate-runner.yaml
post-run: tools/gate/playbooks/legacy-gate-post.yaml
required-projects:
- openstack/openstack-helm-infra
- job:
name: openstack-helm-legacy-ubuntu-vm-ovs-radosgw
parent: openstack-helm-legacy
vars:
sdn_plugin: ovs
glance_backend: radosgw
nodeset: openstack-helm-ubuntu
- job:
name: openstack-helm-legacy-ubuntu-vm-lb-radosgw
parent: openstack-helm-legacy
vars:
sdn_plugin: linuxbridge
glance_backend: radosgw
nodeset: openstack-helm-ubuntu

View File

@ -21,5 +21,5 @@ helm_build
mkdir -p ${LOGS_DIR}/dry-runs
for CHART in $(helm search | awk '{ print $1 }' | tail -n +2 | awk -F '/' '{ print $NF }'); do
echo "Dry Running chart: $CHART"
helm install --dry-run --debug local/$CHART --name=$CHART --namespace=openstack > ${LOGS_DIR}/dry-runs/$CHART
helm install --dry-run --debug local/$CHART --name="${CHART}-dry-run" --namespace=openstack > ${LOGS_DIR}/dry-runs/$CHART
done

View File

@ -19,20 +19,24 @@ source ${WORK_DIR}/tools/gate/funcs/kube.sh
source ${WORK_DIR}/tools/gate/funcs/network.sh
if [ "x$PVC_BACKEND" == "xceph" ]; then
kubectl label nodes ceph-mon=enabled --all
kubectl label nodes ceph-osd=enabled --all
kubectl label nodes ceph-mds=enabled --all
kubectl label nodes ceph-rgw=enabled --all
kubectl label nodes ceph-mon=enabled --all --overwrite
kubectl label nodes ceph-osd=enabled --all --overwrite
kubectl label nodes ceph-mds=enabled --all --overwrite
kubectl label nodes ceph-rgw=enabled --all --overwrite
fi
if [ "x$SDN_PLUGIN" == "xovs" ]; then
kubectl label nodes openvswitch=enabled --all --namespace=openstack --overwrite
kubectl label nodes openvswitch=enabled --all --overwrite
elif [ "x$SDN_PLUGIN" == "xlinuxbridge" ]; then
# first unlabel nodes with 'openvswitch' tag, which is applied by default
# by kubeadm-aio docker image
kubectl label nodes openvswitch- --all --namespace=openstack --overwrite
kubectl label nodes linuxbridge=enabled --all --namespace=openstack --overwrite
kubectl label nodes openvswitch- --all --overwrite
kubectl label nodes linuxbridge=enabled --all --overwrite
fi
#FIXME(portdirect): Ensure RBAC rules are essentially open until support added
# to all charts and helm-toolkit.
kubectl replace -f ${WORK_DIR}/tools/kubeadm-aio/assets/opt/rbac/dev.yaml
helm install --namespace=openstack ${WORK_DIR}/dns-helper --name=dns-helper
kube_wait_for_pods openstack ${POD_START_TIMEOUT_OPENSTACK}

View File

@ -0,0 +1,31 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: primary
vars:
logs_dir: "/tmp/logs"
environment:
LOGS_DIR: "{{ logs_dir }}"
tasks:
- name: Capture logs from environment
shell: ./tools/gate/dump_logs.sh 0
args:
chdir: "{{ zuul.project.src_dir }}"
ignore_errors: yes
- name: Downloads logs to executor
synchronize:
src: "{{ logs_dir }}/"
dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
mode: pull
ignore_errors: yes

View File

@ -0,0 +1,66 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: primary
tasks:
- name: Create nodepool directory
become: true
become_user: root
file:
path: /etc/nodepool
state: directory
mode: 0777
- name: Create nodepool sub_nodes file
copy:
dest: /etc/nodepool/sub_nodes
content: ""
- name: Create nodepool sub_nodes_private file
copy:
dest: /etc/nodepool/sub_nodes_private
content: ""
- name: Populate nodepool sub_nodes file
lineinfile:
path: /etc/nodepool/sub_nodes
line: "{{ hostvars[item]['nodepool']['private_ipv4'] }}"
with_items: "{{ groups['nodes'] }}"
when: groups['nodes'] is defined
- name: Populate nodepool sub_nodes_private file
lineinfile:
path: /etc/nodepool/sub_nodes_private
line: "{{ hostvars[item]['nodepool']['private_ipv4'] }}"
with_items: "{{ groups['nodes'] }}"
when: groups['nodes'] is defined
- name: Create nodepool primary file
copy:
dest: /etc/nodepool/primary_node
content: "{{ hostvars['primary']['nodepool']['private_ipv4'] }}"
when: hostvars['primary'] is defined
- name: Create nodepool node_private for this node
copy:
dest: /etc/nodepool/node_private
content: "{{ nodepool.private_ipv4 }}"
- name: Run OSH Deploy
shell: |
set -xe;
export INTEGRATION=multi
export INTEGRATION_TYPE=basic
export PVC_BACKEND=ceph
export ZUUL_VERSION=v3
export KUBECONFIG=${HOME}/.kube/config
export SDN_PLUGIN="{{ sdn_plugin }}"
export GLANCE="{{ glance_backend }}"
kubectl get nodes -o wide
./tools/gate/setup_gate.sh
args:
chdir: "{{ zuul.project.src_dir }}"

View File

@ -0,0 +1,20 @@
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: primary
tasks:
- name: Execute a Whitespace Linter check
command: find . -not -path "*/\.*" -not -path "*/doc/build/*" -not -name "*.tgz" -type f -exec egrep -l " +$" {} \;
register: result
failed_when: result.stdout != ""

View File

@ -30,11 +30,6 @@ source ${WORK_DIR}/tools/gate/funcs/kube.sh
rm -rf ${LOGS_DIR} || true
mkdir -p ${LOGS_DIR}
function dump_logs () {
${WORK_DIR}/tools/gate/dump_logs.sh
}
trap 'dump_logs "$?"' ERR
# Moving the ws-linter here to avoid it blocking all the jobs just for ws
if [ "x$INTEGRATION_TYPE" == "xlinter" ]; then
bash ${WORK_DIR}/tools/gate/whitespace.sh
@ -61,11 +56,13 @@ if [ "x$INTEGRATION_TYPE" == "xlinter" ]; then
helm_template_run
else
# Setup the K8s Cluster
if [ "x$INTEGRATION" == "xaio" ]; then
bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh
elif [ "x$INTEGRATION" == "xmulti" ]; then
bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh
bash ${WORK_DIR}/tools/gate/setup_gate_worker_nodes.sh
if ! [ "x$ZUUL_VERSION" == "xv3" ]; then
if [ "x$INTEGRATION" == "xaio" ]; then
bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh
elif [ "x$INTEGRATION" == "xmulti" ]; then
bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh
bash ${WORK_DIR}/tools/gate/setup_gate_worker_nodes.sh
fi
fi
# Pull all required images
cd ${WORK_DIR}; make pull-all-images
@ -91,7 +88,5 @@ else
bash ${WORK_DIR}/tools/gate/openstack/vm_cli_launch.sh
bash ${WORK_DIR}/tools/gate/openstack/vm_heat_launch.sh
fi
# Collect all logs from the environment
bash ${WORK_DIR}/tools/gate/dump_logs.sh 0
fi
fi

View File

@ -16,7 +16,7 @@
: ${WORK_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"}
# Set logs directory
export LOGS_DIR=${LOGS_DIR:-"${WORK_DIR}/logs"}
export LOGS_DIR=${LOGS_DIR:-"/tmp/logs"}
# Get Host OS
source /etc/os-release