Zuul: Gate script tidy
This PS cleans up some of the worst offenders in the gate scripts. Change-Id: If310ae798c9572e8bde4834e5a4af5f97196efea
This commit is contained in:
parent
8f9945f6fd
commit
04e015e49b
@ -37,10 +37,10 @@ if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||
kubectl label nodes ceph-mon=enabled --all
|
||||
kubectl label nodes ceph-osd=enabled --all
|
||||
kubectl label nodes ceph-mds=enabled --all
|
||||
CONTROLLER_MANAGER_POD=$(kubectl get -n kube-system pods -l component=kube-controller-manager --no-headers -o name | head -1 | awk -F '/' '{ print $NF }')
|
||||
CONTROLLER_MANAGER_POD=$(kubectl get -n kube-system pods -l component=kube-controller-manager --no-headers -o name | awk -F '/' '{ print $NF; exit }')
|
||||
kubectl exec -n kube-system ${CONTROLLER_MANAGER_POD} -- sh -c "cat > /etc/resolv.conf <<EOF
|
||||
nameserver 10.96.0.10
|
||||
nameserver 8.8.8.8
|
||||
nameserver ${UPSTREAM_DNS}
|
||||
search cluster.local svc.cluster.local
|
||||
EOF"
|
||||
|
||||
@ -66,7 +66,7 @@ EOF"
|
||||
|
||||
kube_wait_for_pods ceph 600
|
||||
|
||||
MON_POD=$(kubectl get pods -l application=ceph -l component=mon -n ceph --no-headers | awk '{print $1}' | head -1)
|
||||
MON_POD=$(kubectl get pods -l application=ceph -l component=mon -n ceph --no-headers | awk '{ print $1; exit }')
|
||||
|
||||
kubectl exec -n ceph ${MON_POD} -- ceph -s
|
||||
|
||||
|
@ -8,7 +8,6 @@ if ! type "kubectl" &> /dev/null; then
|
||||
fi
|
||||
|
||||
echo "Capturing logs from environment."
|
||||
|
||||
mkdir -p ${LOGS_DIR}/k8s/etc
|
||||
sudo cp -a /etc/kubernetes ${LOGS_DIR}/k8s/etc
|
||||
sudo chmod 777 --recursive ${LOGS_DIR}/*
|
||||
@ -81,12 +80,45 @@ for NAMESPACE in $(kubectl get namespaces -o name | awk -F '/' '{ print $NF }')
|
||||
done
|
||||
done
|
||||
|
||||
mkdir -p ${LOGS_DIR}/nodes/$(hostname)
|
||||
sudo docker logs kubelet 2> ${LOGS_DIR}/nodes/$(hostname)/kubelet.txt
|
||||
sudo iptables-save > ${LOGS_DIR}/nodes/$(hostname)/iptables.txt
|
||||
sudo ip a > ${LOGS_DIR}/nodes/$(hostname)/ip.txt
|
||||
sudo route -n > ${LOGS_DIR}/nodes/$(hostname)/routes.txt
|
||||
sudo arp -a > ${LOGS_DIR}/nodes/$(hostname)/arp.txt
|
||||
cat /etc/resolv.conf > ${LOGS_DIR}/nodes/$(hostname)/resolv.conf
|
||||
NODE_NAME=$(hostname)
|
||||
mkdir -p ${LOGS_DIR}/nodes/${NODE_NAME}
|
||||
echo "${NODE_NAME}" > ${LOGS_DIR}/nodes/master.txt
|
||||
sudo docker logs kubelet 2> ${LOGS_DIR}/nodes/${NODE_NAME}/kubelet.txt
|
||||
sudo docker images --digests --no-trunc --all > ${LOGS_DIR}/nodes/${NODE_NAME}/images.txt
|
||||
sudo iptables-save > ${LOGS_DIR}/nodes/${NODE_NAME}/iptables.txt
|
||||
sudo ip a > ${LOGS_DIR}/nodes/${NODE_NAME}/ip.txt
|
||||
sudo route -n > ${LOGS_DIR}/nodes/${NODE_NAME}/routes.txt
|
||||
sudo arp -a > ${LOGS_DIR}/nodes/${NODE_NAME}/arp.txt
|
||||
cat /etc/resolv.conf > ${LOGS_DIR}/nodes/${NODE_NAME}/resolv.conf
|
||||
sudo lshw > ${LOGS_DIR}/nodes/${NODE_NAME}/hardware.txt
|
||||
if [ "x$INTEGRATION" == "xmulti" ]; then
|
||||
: ${SSH_PRIVATE_KEY:="/etc/nodepool/id_rsa"}
|
||||
: ${SUB_NODE_IPS:="$(cat /etc/nodepool/sub_nodes_private)"}
|
||||
for NODE_IP in $SUB_NODE_IPS ; do
|
||||
ssh-keyscan "${NODE_IP}" >> ~/.ssh/known_hosts
|
||||
NODE_NAME=$(ssh -i ${SSH_PRIVATE_KEY} $(whoami)@${NODE_IP} hostname)
|
||||
mkdir -p ${LOGS_DIR}/nodes/${NODE_NAME}
|
||||
ssh -i ${SSH_PRIVATE_KEY} $(whoami)@${NODE_IP} sudo docker logs kubelet 2> ${LOGS_DIR}/nodes/${NODE_NAME}/kubelet.txt
|
||||
ssh -i ${SSH_PRIVATE_KEY} $(whoami)@${NODE_IP} sudo docker images --digests --no-trunc --all > ${LOGS_DIR}/nodes/${NODE_NAME}/images.txt
|
||||
ssh -i ${SSH_PRIVATE_KEY} $(whoami)@${NODE_IP} sudo iptables-save > ${LOGS_DIR}/nodes/${NODE_NAME}/iptables.txt
|
||||
ssh -i ${SSH_PRIVATE_KEY} $(whoami)@${NODE_IP} sudo ip a > ${LOGS_DIR}/nodes/${NODE_NAME}/ip.txt
|
||||
ssh -i ${SSH_PRIVATE_KEY} $(whoami)@${NODE_IP} sudo route -n > ${LOGS_DIR}/nodes/${NODE_NAME}/routes.txt
|
||||
ssh -i ${SSH_PRIVATE_KEY} $(whoami)@${NODE_IP} sudo arp -a > ${LOGS_DIR}/nodes/${NODE_NAME}/arp.txt
|
||||
ssh -i ${SSH_PRIVATE_KEY} $(whoami)@${NODE_IP} cat /etc/resolv.conf > ${LOGS_DIR}/nodes/${NODE_NAME}/resolv.conf
|
||||
ssh -i ${SSH_PRIVATE_KEY} $(whoami)@${NODE_IP} sudo lshw > ${LOGS_DIR}/nodes/${NODE_NAME}/hardware.txt
|
||||
done
|
||||
fi
|
||||
|
||||
source ${WORK_DIR}/tools/gate/funcs/openstack.sh
|
||||
mkdir -p ${LOGS_DIR}/openstack
|
||||
$OPENSTACK service list > ${LOGS_DIR}/openstack/service.txt
|
||||
$OPENSTACK endpoint list > ${LOGS_DIR}/openstack/endpoint.txt
|
||||
$OPENSTACK extension list > ${LOGS_DIR}/openstack/extension.txt
|
||||
$OPENSTACK compute service list > ${LOGS_DIR}/openstack/compute_service.txt
|
||||
$OPENSTACK compute agent list > ${LOGS_DIR}/openstack/compute_agent.txt
|
||||
$OPENSTACK host list > ${LOGS_DIR}/openstack/host.txt
|
||||
$OPENSTACK hypervisor list > ${LOGS_DIR}/openstack/hypervisor.txt
|
||||
$OPENSTACK hypervisor show $(hostname) > ${LOGS_DIR}/openstack/hypervisor-$(hostname).txt
|
||||
$OPENSTACK network agent list > ${LOGS_DIR}/openstack/network_agent.txt
|
||||
|
||||
exit $1
|
||||
|
@ -11,22 +11,33 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
set -e
|
||||
|
||||
function base_install {
|
||||
if [ "x$HOST_OS" == "xubuntu" ]; then
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y --no-install-recommends -qq \
|
||||
sudo apt-get install -y --no-install-recommends \
|
||||
iproute2 \
|
||||
iptables
|
||||
iptables \
|
||||
ipcalc \
|
||||
nmap \
|
||||
lshw
|
||||
elif [ "x$HOST_OS" == "xcentos" ]; then
|
||||
sudo yum install -y \
|
||||
epel-release
|
||||
# ipcalc is in the initscripts package
|
||||
sudo yum install -y \
|
||||
iproute \
|
||||
iptables
|
||||
iptables \
|
||||
initscripts \
|
||||
nmap \
|
||||
lshw
|
||||
elif [ "x$HOST_OS" == "xfedora" ]; then
|
||||
sudo dnf install -y \
|
||||
iproute \
|
||||
iptables
|
||||
iptables \
|
||||
ipcalc \
|
||||
nmap \
|
||||
lshw
|
||||
fi
|
||||
}
|
||||
|
||||
@ -42,4 +53,19 @@ function ceph_support_install {
|
||||
sudo dnf install -y \
|
||||
ceph
|
||||
fi
|
||||
sudo modprobe rbd
|
||||
}
|
||||
|
||||
function nfs_support_install {
|
||||
if [ "x$HOST_OS" == "xubuntu" ]; then
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y --no-install-recommends -qq \
|
||||
nfs-common
|
||||
elif [ "x$HOST_OS" == "xcentos" ]; then
|
||||
sudo yum install -y \
|
||||
nfs-utils
|
||||
elif [ "x$HOST_OS" == "xfedora" ]; then
|
||||
sudo dnf install -y \
|
||||
nfs-utils
|
||||
fi
|
||||
}
|
||||
|
@ -11,7 +11,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
set -e
|
||||
|
||||
function helm_install {
|
||||
if [ "x$HOST_OS" == "xubuntu" ]; then
|
||||
@ -100,8 +99,10 @@ function helm_plugin_template_install {
|
||||
|
||||
function helm_template_run {
|
||||
mkdir -p ${LOGS_DIR}/templates
|
||||
for CHART in $(helm search | awk '{ print $1 }' | tail -n +2 | awk -F '/' '{ print $NF }'); do
|
||||
set +x
|
||||
for CHART in $(helm search | tail -n +2 | awk '{ print $1 }' | awk -F '/' '{ print $NF }'); do
|
||||
echo "Running Helm template plugin on chart: $CHART"
|
||||
helm template --verbose $CHART > ${LOGS_DIR}/templates/$CHART
|
||||
done
|
||||
set -x
|
||||
}
|
||||
|
@ -11,14 +11,13 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
set -e
|
||||
|
||||
function kube_wait_for_pods {
|
||||
# From Kolla-Kubernetes, orginal authors Kevin Fox & Serguei Bezverkhi
|
||||
# Default wait timeout is 180 seconds
|
||||
set +x
|
||||
end=$(date +%s)
|
||||
if [ x$2 != "x" ]; then
|
||||
if ! [ -z $2 ]; then
|
||||
end=$((end + $2))
|
||||
else
|
||||
end=$((end + 180))
|
||||
@ -48,7 +47,7 @@ function kube_wait_for_nodes {
|
||||
# Default wait timeout is 180 seconds
|
||||
set +x
|
||||
end=$(date +%s)
|
||||
if [ x$2 != "x" ]; then
|
||||
if ! [ -z $2 ]; then
|
||||
end=$((end + $2))
|
||||
else
|
||||
end=$((end + 180))
|
||||
@ -76,14 +75,12 @@ function kubeadm_aio_reqs_install {
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y --no-install-recommends -qq \
|
||||
docker.io \
|
||||
nfs-common \
|
||||
jq
|
||||
elif [ "x$HOST_OS" == "xcentos" ]; then
|
||||
sudo yum install -y \
|
||||
epel-release
|
||||
sudo yum install -y \
|
||||
docker-latest \
|
||||
nfs-utils \
|
||||
jq
|
||||
sudo cp -f /usr/lib/systemd/system/docker-latest.service /etc/systemd/system/docker.service
|
||||
sudo sed -i "s|/var/lib/docker-latest|/var/lib/docker|g" /etc/systemd/system/docker.service
|
||||
@ -97,7 +94,6 @@ function kubeadm_aio_reqs_install {
|
||||
elif [ "x$HOST_OS" == "xfedora" ]; then
|
||||
sudo dnf install -y \
|
||||
docker-latest \
|
||||
nfs-utils \
|
||||
jq
|
||||
sudo cp -f /usr/lib/systemd/system/docker-latest.service /etc/systemd/system/docker.service
|
||||
sudo sed -i "s|/var/lib/docker-latest|/var/lib/docker|g" /etc/systemd/system/docker.service
|
||||
@ -116,7 +112,6 @@ function kubeadm_aio_reqs_install {
|
||||
chmod +x ${TMP_DIR}/kubectl
|
||||
sudo mv ${TMP_DIR}/kubectl /usr/local/bin/kubectl
|
||||
rm -rf ${TMP_DIR} )
|
||||
|
||||
}
|
||||
|
||||
function kubeadm_aio_build {
|
||||
@ -131,6 +126,23 @@ function kubeadm_aio_launch {
|
||||
kube_wait_for_pods default 240
|
||||
}
|
||||
|
||||
function kubeadm_aio_clean {
|
||||
sudo docker rm -f kubeadm-aio || true
|
||||
sudo docker rm -f kubelet || true
|
||||
sudo docker ps -aq | xargs -r -l1 -P16 sudo docker rm -f
|
||||
sudo rm -rfv \
|
||||
/etc/cni/net.d \
|
||||
/etc/kubernetes \
|
||||
/var/lib/etcd \
|
||||
/var/etcd \
|
||||
/var/lib/kubelet/* \
|
||||
/run/openvswitch \
|
||||
/var/lib/nova \
|
||||
${HOME}/.kubeadm-aio/admin.conf \
|
||||
/var/lib/openstack-helm \
|
||||
/var/lib/nfs-provisioner || true
|
||||
}
|
||||
|
||||
function ceph_kube_controller_manager_replace {
|
||||
sudo docker pull ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE}
|
||||
sudo docker tag ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE} ${BASE_KUBE_CONTROLLER_MANAGER_IMAGE}
|
||||
|
@ -11,13 +11,24 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
set -e
|
||||
|
||||
function net_default_iface {
|
||||
sudo ip -4 route list 0/0 | awk '{ print $5; exit }'
|
||||
}
|
||||
|
||||
function net_default_host_addr {
|
||||
sudo ip addr | awk "/inet / && /$(net_default_iface)/{print \$2; exit }"
|
||||
}
|
||||
|
||||
function net_default_host_ip {
|
||||
echo $(net_default_host_addr) | awk -F '/' '{ print $1; exit }'
|
||||
}
|
||||
|
||||
function net_resolv_pre_kube {
|
||||
sudo cp -f /etc/resolv.conf /etc/resolv-pre-kube.conf
|
||||
sudo rm -f /etc/resolv.conf
|
||||
cat << EOF | sudo tee /etc/resolv.conf
|
||||
nameserver 8.8.8.8
|
||||
nameserver ${UPSTREAM_DNS}
|
||||
EOF
|
||||
}
|
||||
|
||||
@ -27,11 +38,8 @@ function net_resolv_post_kube {
|
||||
|
||||
function net_hosts_pre_kube {
|
||||
sudo cp -f /etc/hosts /etc/hosts-pre-kube
|
||||
HOST_IFACE=$(sudo ip route | grep "^default" | awk '{ print $5 }')
|
||||
HOST_IP=$(sudo ip addr | awk "/inet/ && /${HOST_IFACE}/{sub(/\/.*$/,\"\",\$2); print \$2}")
|
||||
|
||||
sudo sed -i "/$(hostname)/d" /etc/hosts
|
||||
echo "${HOST_IP} $(hostname)" | sudo tee -a /etc/hosts
|
||||
echo "$(net_default_host_ip) $(hostname)" | sudo tee -a /etc/hosts
|
||||
}
|
||||
|
||||
function net_hosts_post_kube {
|
||||
@ -39,15 +47,10 @@ function net_hosts_post_kube {
|
||||
}
|
||||
|
||||
function find_subnet_range {
|
||||
DEFAULT_IFACE=$(sudo ip route | awk --posix '$1~/^default$/{print $5}')
|
||||
IFS=/ read IP_ADDR SUBNET_PREFIX <<< $(sudo ip addr show ${DEFAULT_IFACE} | awk --posix '$1~/^inet$/{print $2}')
|
||||
|
||||
set -- $(( 5 - (${SUBNET_PREFIX} / 8) )) 255 255 255 255 $(( (255 << (8 - (${SUBNET_PREFIX} % 8))) & 255 )) 0 0 0
|
||||
[ $1 -gt 1 ] && shift $1 || shift
|
||||
SUBNET_MASK=$(echo ${1-0}.${2-0}.${3-0}.${4-0})
|
||||
|
||||
IFS=. read -r i1 i2 i3 i4 <<< ${IP_ADDR}
|
||||
IFS=. read -r m1 m2 m3 m4 <<< ${SUBNET_MASK}
|
||||
BASE_SUBNET_IP=$(printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))")
|
||||
echo "$BASE_SUBNET_IP/$SUBNET_PREFIX"
|
||||
if [ "x$HOST_OS" == "xubuntu" ]; then
|
||||
ipcalc $(net_default_host_addr) | awk '/^Network/ { print $2 }'
|
||||
else
|
||||
eval $(ipcalc --network --prefix $(net_default_host_addr))
|
||||
echo "$NETWORK/$PREFIX"
|
||||
fi
|
||||
}
|
||||
|
@ -11,14 +11,34 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
set -e
|
||||
|
||||
: ${KS_USER:="admin"}
|
||||
: ${KS_PROJECT:="admin"}
|
||||
: ${KS_PASSWORD:="password"}
|
||||
: ${KS_USER_DOMAIN:="default"}
|
||||
: ${KS_PROJECT_DOMAIN:="default"}
|
||||
: ${KS_URL:="http://keystone.openstack/v3"}
|
||||
|
||||
# Setup openstack clients
|
||||
KEYSTONE_CREDS="--os-username ${KS_USER} \
|
||||
--os-project-name ${KS_PROJECT} \
|
||||
--os-auth-url ${KS_URL} \
|
||||
--os-project-domain-name ${KS_PROJECT_DOMAIN} \
|
||||
--os-user-domain-name ${KS_USER_DOMAIN} \
|
||||
--os-password ${KS_PASSWORD}"
|
||||
NEUTRON_POD=$(kubectl get -n openstack pods -l application=neutron,component=server --no-headers -o name | awk -F '/' '{ print $NF; exit }')
|
||||
NEUTRON="kubectl exec -n openstack ${NEUTRON_POD} -- neutron ${KEYSTONE_CREDS}"
|
||||
NOVA_POD=$(kubectl get -n openstack pods -l application=nova,component=os-api --no-headers -o name | awk -F '/' '{ print $NF; exit }')
|
||||
NOVA="kubectl exec -n openstack ${NOVA_POD} -- nova ${KEYSTONE_CREDS}"
|
||||
OPENSTACK_POD=$(kubectl get -n openstack pods -l application=keystone,component=api --no-headers -o name | awk -F '/' '{ print $NF; exit }')
|
||||
OPENSTACK="kubectl exec -n openstack ${OPENSTACK_POD} -- openstack ${KEYSTONE_CREDS} --os-identity-api-version 3 --os-image-api-version 2"
|
||||
|
||||
function wait_for_ping {
|
||||
# Default wait timeout is 180 seconds
|
||||
set +x
|
||||
PING_CMD="ping -q -c 1 -W 1"
|
||||
end=$(date +%s)
|
||||
if [ x$2 != "x" ]; then
|
||||
if ! [ -z $2 ]; then
|
||||
end=$((end + $2))
|
||||
else
|
||||
end=$((end + 180))
|
||||
@ -38,7 +58,7 @@ function openstack_wait_for_vm {
|
||||
# Default wait timeout is 180 seconds
|
||||
set +x
|
||||
end=$(date +%s)
|
||||
if [ x$2 != "x" ]; then
|
||||
if ! [ -z $2 ]; then
|
||||
end=$((end + $2))
|
||||
else
|
||||
end=$((end + 180))
|
||||
@ -54,3 +74,23 @@ function openstack_wait_for_vm {
|
||||
done
|
||||
set -x
|
||||
}
|
||||
|
||||
function wait_for_ssh_port {
|
||||
# Default wait timeout is 180 seconds
|
||||
set +x
|
||||
end=$(date +%s)
|
||||
if ! [ -z $2 ]; then
|
||||
end=$((end + $2))
|
||||
else
|
||||
end=$((end + 180))
|
||||
fi
|
||||
while true; do
|
||||
# Use Nmap as its the same on Ubuntu and RHEL family distros
|
||||
nmap -Pn -p22 $1 | awk '$1 ~ /22/ {print $2}' | grep -q 'open' && \
|
||||
break || true
|
||||
sleep 1
|
||||
now=$(date +%s)
|
||||
[ $now -gt $end ] && echo "Could not connect to $1 port 22 in time" && exit -1
|
||||
done
|
||||
set -x
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ sudo docker pull ${KUBEADM_IMAGE} || kubeadm_aio_build
|
||||
|
||||
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||
ceph_kube_controller_manager_replace
|
||||
sudo modprobe rbd
|
||||
fi
|
||||
|
||||
kubeadm_aio_launch
|
||||
|
@ -13,13 +13,6 @@
|
||||
# limitations under the License.
|
||||
set -xe
|
||||
|
||||
: ${KS_USER:="admin"}
|
||||
: ${KS_PROJECT:="admin"}
|
||||
: ${KS_PASSWORD:="password"}
|
||||
: ${KS_USER_DOMAIN:="default"}
|
||||
: ${KS_PROJECT_DOMAIN:="default"}
|
||||
: ${KS_URL:="http://keystone.openstack/v3"}
|
||||
|
||||
: ${OSH_BR_EX_ADDR:="172.24.4.1/24"}
|
||||
: ${OSH_EXT_SUBNET:="172.24.4.0/24"}
|
||||
: ${OSH_EXT_DNS:="8.8.8.8"}
|
||||
@ -36,22 +29,9 @@ set -xe
|
||||
: ${OSH_VM_KEY:="osh-smoketest-key"}
|
||||
|
||||
# Source some functions that will help us
|
||||
source ${WORK_DIR}/tools/gate/funcs/network.sh
|
||||
source ${WORK_DIR}/tools/gate/funcs/openstack.sh
|
||||
|
||||
# Setup openstack clients
|
||||
KEYSTONE_CREDS="--os-username ${KS_USER} \
|
||||
--os-project-name ${KS_PROJECT} \
|
||||
--os-auth-url ${KS_URL} \
|
||||
--os-project-domain-name ${KS_PROJECT_DOMAIN} \
|
||||
--os-user-domain-name ${KS_USER_DOMAIN} \
|
||||
--os-password ${KS_PASSWORD}"
|
||||
NEUTRON_POD=$(kubectl get -n openstack pods -l application=neutron,component=server --no-headers -o name | head -1 | awk -F '/' '{ print $NF }')
|
||||
NEUTRON="kubectl exec -n openstack ${NEUTRON_POD} -- neutron ${KEYSTONE_CREDS}"
|
||||
OPENSTACK_POD=$(kubectl get -n openstack pods -l application=keystone,component=api --no-headers -o name | head -1 | awk -F '/' '{ print $NF }')
|
||||
OPENSTACK="kubectl exec -n openstack ${OPENSTACK_POD} -- openstack ${KEYSTONE_CREDS} --os-identity-api-version 3 --os-image-api-version 2"
|
||||
NOVA_POD=$(kubectl get -n openstack pods -l application=nova,component=os-api --no-headers -o name | head -1 | awk -F '/' '{ print $NF }')
|
||||
NOVA="kubectl exec -n openstack ${NOVA_POD} -- nova ${KEYSTONE_CREDS}"
|
||||
|
||||
# Turn on ip forwarding if its not already
|
||||
if [ $(cat /proc/sys/net/ipv4/ip_forward) -eq 0 ]; then
|
||||
sudo bash -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
|
||||
@ -60,15 +40,13 @@ fi
|
||||
# Assign IP address to br-ex
|
||||
sudo ip addr add ${OSH_BR_EX_ADDR} dev br-ex
|
||||
sudo ip link set br-ex up
|
||||
# Setup masquerading on default route dev to public subnet
|
||||
sudo iptables -t nat -A POSTROUTING -o $(net_default_iface) -s ${OSH_EXT_SUBNET} -j MASQUERADE
|
||||
|
||||
# Disable In-Band rules on br-ex bridge to ease debugging
|
||||
OVS_VSWITCHD_POD=$(kubectl get -n openstack pods -l application=neutron,component=ovs-vswitchd --no-headers -o name | head -1 | awk -F '/' '{ print $NF }')
|
||||
kubectl exec -n openstack ${OVS_VSWITCHD_POD} -- ovs-vsctl set Bridge br-ex other_config:disable-in-band=true
|
||||
|
||||
# Setup masquerading on default route dev to public subnet
|
||||
DEFAULT_GW_DEV=$(sudo ip -4 route list 0/0 | cut -d ' ' -f 5)
|
||||
sudo iptables -t nat -A POSTROUTING -o ${DEFAULT_GW_DEV} -s ${OSH_EXT_SUBNET} -j MASQUERADE
|
||||
|
||||
# Create default networks
|
||||
$NEUTRON net-create ${OSH_PRIVATE_NET_NAME}
|
||||
$NEUTRON subnet-create \
|
||||
@ -98,6 +76,12 @@ $NEUTRON router-gateway-set $($NEUTRON router-show ${OSH_ROUTER} -f value -c id)
|
||||
ROUTER_PUBLIC_IP=$($NEUTRON router-show ${OSH_ROUTER} -f value -c external_gateway_info | jq -r '.external_fixed_ips[].ip_address')
|
||||
wait_for_ping ${ROUTER_PUBLIC_IP}
|
||||
|
||||
# Loosen up security group to allow access to the VM
|
||||
PROJECT=$($OPENSTACK project show admin -f value -c id)
|
||||
SECURITY_GROUP=$($OPENSTACK security group list -f csv | grep ${PROJECT} | grep "default" | awk -F "," '{ print $1 }' | tr -d '"')
|
||||
$OPENSTACK security group rule create ${SECURITY_GROUP} --protocol icmp --src-ip 0.0.0.0/0
|
||||
$OPENSTACK security group rule create ${SECURITY_GROUP} --protocol tcp --dst-port 22:22 --src-ip 0.0.0.0/0
|
||||
|
||||
# Setup SSH Keypair in Nova
|
||||
KEYPAIR_LOC="$(mktemp).pem"
|
||||
$OPENSTACK keypair create ${OSH_VM_KEY} > ${KEYPAIR_LOC}
|
||||
@ -120,15 +104,12 @@ openstack_wait_for_vm ${OSH_VM_NAME}
|
||||
FLOATING_IP=$($OPENSTACK floating ip create ${OSH_EXT_NET_NAME} -f value -c floating_ip_address)
|
||||
$OPENSTACK server add floating ip ${OSH_VM_NAME} ${FLOATING_IP}
|
||||
|
||||
# Loosen up security group to allow access to the VM
|
||||
PROJECT=$($OPENSTACK project show admin -f value -c id)
|
||||
SECURITY_GROUP=$($OPENSTACK security group list -f csv | grep ${PROJECT} | grep "default" | awk -F "," '{ print $1 }' | tr -d '"')
|
||||
$OPENSTACK security group rule create ${SECURITY_GROUP} --protocol icmp --src-ip 0.0.0.0/0
|
||||
$OPENSTACK security group rule create ${SECURITY_GROUP} --protocol tcp --dst-port 22:22 --src-ip 0.0.0.0/0
|
||||
|
||||
# Ping our VM
|
||||
wait_for_ping ${FLOATING_IP}
|
||||
|
||||
# Wait for SSH to come up
|
||||
wait_for_ssh_port ${FLOATING_IP}
|
||||
|
||||
# SSH into the VM and check it can reach the outside world
|
||||
ssh-keyscan "$FLOATING_IP" >> ~/.ssh/known_hosts
|
||||
ssh -i ${KEYPAIR_LOC} cirros@${FLOATING_IP} ping -q -c 1 -W 2 ${OSH_BR_EX_ADDR%/*}
|
||||
|
@ -16,9 +16,19 @@ set -ex
|
||||
cd ${WORK_DIR}
|
||||
source /etc/os-release
|
||||
export HOST_OS=${ID}
|
||||
source ${WORK_DIR}/tools/gate/funcs/common.sh
|
||||
source ${WORK_DIR}/tools/gate/funcs/network.sh
|
||||
source ${WORK_DIR}/tools/gate/funcs/kube.sh
|
||||
|
||||
# Install base requirements
|
||||
base_install
|
||||
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||
ceph_support_install
|
||||
elif [ "x$PVC_BACKEND" == "xnfs" ]; then
|
||||
nfs_support_install
|
||||
fi
|
||||
|
||||
# Install KubeadmAIO requirements and get image
|
||||
kubeadm_aio_reqs_install
|
||||
sudo docker pull ${KUBEADM_IMAGE} || kubeadm_aio_build
|
||||
|
||||
@ -27,24 +37,8 @@ sudo mkdir -p /var/lib/kubelet
|
||||
sudo mount --bind /var/lib/kubelet /var/lib/kubelet
|
||||
sudo mount --make-shared /var/lib/kubelet
|
||||
|
||||
# Cleanup any old deployment
|
||||
sudo docker rm -f kubeadm-aio || true
|
||||
sudo docker rm -f kubelet || true
|
||||
sudo docker ps -aq | xargs -r -l1 sudo docker rm -f
|
||||
sudo rm -rfv \
|
||||
/etc/cni/net.d \
|
||||
/etc/kubernetes \
|
||||
/var/lib/etcd \
|
||||
/var/etcd \
|
||||
/var/lib/kubelet/* \
|
||||
/var/lib/nova \
|
||||
/var/lib/openstack-helm \
|
||||
/run/openvswitch || true
|
||||
|
||||
# Load ceph kernel module if required
|
||||
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||
sudo modprobe rbd
|
||||
fi
|
||||
# Clean up any old install
|
||||
kubeadm_aio_clean
|
||||
|
||||
# Launch Container
|
||||
sudo docker run \
|
||||
|
@ -13,8 +13,10 @@
|
||||
# limitations under the License.
|
||||
set -ex
|
||||
|
||||
export HELM_VERSION=${2:-v2.5.1}
|
||||
export KUBE_VERSION=${3:-v1.6.7}
|
||||
export HELM_VERSION=${HELM_VERSION:-"v2.5.1"}
|
||||
export KUBE_VERSION=${KUBE_VERSION:-"v1.6.7"}
|
||||
export PVC_BACKEND=${PVC_BACKEND:-"ceph"}
|
||||
export UPSTREAM_DNS=${UPSTREAM_DNS:-"8.8.8.8"}
|
||||
export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf
|
||||
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:${KUBE_VERSION}
|
||||
export BASE_KUBE_CONTROLLER_MANAGER_IMAGE=gcr.io/google_containers/kube-controller-manager-amd64:${KUBE_VERSION}
|
||||
@ -26,7 +28,6 @@ export HOST_OS=${ID}
|
||||
source ${WORK_DIR}/tools/gate/funcs/common.sh
|
||||
source ${WORK_DIR}/tools/gate/funcs/network.sh
|
||||
source ${WORK_DIR}/tools/gate/funcs/helm.sh
|
||||
export PVC_BACKEND=ceph
|
||||
|
||||
# Setup the logging location: by default use the working dir as the root.
|
||||
export LOGS_DIR=${LOGS_DIR:-"${WORK_DIR}/logs"}
|
||||
@ -47,6 +48,8 @@ fi
|
||||
base_install
|
||||
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||
ceph_support_install
|
||||
elif [ "x$PVC_BACKEND" == "xnfs" ]; then
|
||||
nfs_support_install
|
||||
fi
|
||||
|
||||
# We setup the network for pre kube here, to enable cluster restarts on
|
||||
|
@ -21,7 +21,7 @@ export SUB_NODE_COUNT="$(($(echo ${SUB_NODE_IPS} | wc -w) + 1))"
|
||||
sudo chown $(whoami) ${SSH_PRIVATE_KEY}
|
||||
sudo chmod 600 ${SSH_PRIVATE_KEY}
|
||||
|
||||
KUBEADM_TOKEN=$(sudo docker exec kubeadm-aio kubeadm token list | tail -n -1 | awk '{ print $1 }')
|
||||
KUBEADM_TOKEN=$(sudo docker exec kubeadm-aio kubeadm token list | awk '/The default bootstrap token/ { print $1 ; exit }')
|
||||
|
||||
SUB_NODE_PROVISION_SCRIPT=$(mktemp --suffix=.sh)
|
||||
for SUB_NODE in $SUB_NODE_IPS ; do
|
||||
|
@ -20,7 +20,7 @@ set -e
|
||||
export KUBECONFIG=${KUBECONFIG}
|
||||
|
||||
end=$(date +%s)
|
||||
if [ x$2 != "x" ]; then
|
||||
if ! [ -z $2 ]; then
|
||||
end=$((end + $2))
|
||||
else
|
||||
end=$((end + 180))
|
||||
|
@ -21,7 +21,7 @@ set -e
|
||||
export KUBECONFIG=${KUBECONFIG}
|
||||
|
||||
end=$(date +%s)
|
||||
if [ x$2 != "x" ]; then
|
||||
if ! [ -z $2 ]; then
|
||||
end=$((end + $2))
|
||||
else
|
||||
end=$((end + 180))
|
||||
|
@ -23,7 +23,7 @@ sudo mount --make-shared /var/lib/kubelet
|
||||
# Cleanup any old deployment
|
||||
sudo docker rm -f kubeadm-aio || true
|
||||
sudo docker rm -f kubelet || true
|
||||
sudo docker ps -aq | xargs -r -l1 sudo docker rm -f
|
||||
sudo docker ps -aq | xargs -r -l1 -P16 sudo docker rm -f
|
||||
sudo rm -rfv \
|
||||
/etc/cni/net.d \
|
||||
/etc/kubernetes \
|
||||
|
Loading…
x
Reference in New Issue
Block a user