Update Debian build system to support Kubernetes
Extends the build tools to add support to Kubernetes. For kubernetes, it supports single node cluster and host path for the volumes. New environment variables: - PROJECT: build project name - STX_PLATFORM: minikube or kubernetes - STX_BUILD_CPUS: replaces MINIKUBECPUS - STX_K8S_NAMESPACE: kubernetes namespace name * Default values are set to minikube, more details added in import-stx.README. To deploy stx on k8s you need to follow the below steps: - create k8s namespace - export PROJECT, included to support multiproject environments it is used by the new default build home and also for docker tagging. - export STX_BUILD_HOME, e.g: /localdisk/user/$PROJECT - export STX_K8S_NAMESPACE="namespace_name" - export STX_PLATFORM="kubernetes" - export KUBECONFIG to your kubernetes config file - STX_BUILD_CPUS replaces MINIKUBECPUS, this variable is used by build-pkgs parallel jobs ($MAX_CPUS) - Create your build home $STX_BUILD_HOME - Init repo & repo sync - source import-stx, check the env variables - stx-init-env - stx control status/start/stop/enter Test Plan: Pass: Create env on minikube Pass: Create env on Kubernetes Pass: Apply patch on current minikube env and continue to work on the environment without issues Pass: build package on Debian Pass: build Debian image Story: 2009812 Task: 44391 Signed-off-by: Luis Sampaio <luis.sampaio@windriver.com> Change-Id: I7b760fbf1454f6aa90dd93dd9ff3a61d5fbd1b5c
This commit is contained in:
parent
a334124287
commit
52ef35d1bf
170
import-stx
170
import-stx
@ -1,25 +1,6 @@
|
||||
# bash
|
||||
|
||||
if [ -z "$MINIKUBE_HOME" ];then
|
||||
MINIKUBE_HOME=$HOME
|
||||
else
|
||||
if [ ! -d "$MINIKUBE_HOME" ]; then
|
||||
echo "The directory defined by \$MINIKUBE_HOME doesn't exist"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
FSTYPE=$(stat -f -L -c %T $MINIKUBE_HOME)
|
||||
if [ x"$FSTYPE" == x"nfs" ]; then
|
||||
echo ""
|
||||
echo "Warning: stx minikube doesn't allow \$MINIKUBE_HOME or \$HOME directory as nfs mount point!!!"
|
||||
echo " Please set non-nfs MINIKUBE_HOME with the command 'export MINIKUBE_HOME=XXX/YYY'"
|
||||
echo ""
|
||||
unset MINIKUBE_HOME
|
||||
return 1
|
||||
fi
|
||||
|
||||
notice_warn() {
|
||||
notice_warn () {
|
||||
local tty_on tty_off
|
||||
if [[ -t 2 ]] ; then
|
||||
tty_on=$'\033[1;33m'
|
||||
@ -28,23 +9,11 @@ notice_warn() {
|
||||
echo >&2 "${tty_on}$*${tty_off}"
|
||||
}
|
||||
|
||||
export PRJDIR=$(pwd)
|
||||
export PATH=$PRJDIR/stx/bin:$PATH
|
||||
export MINIKUBECPUS=${MINIKUBECPUS:-6}
|
||||
export MINIKUBEMEMORY=${MINIKUBEMEMORY:-16000}
|
||||
export MINIKUBENAME=${MINIKUBENAME:-minikube-$USER-upstream}
|
||||
export KUBECONFIG=$MINIKUBE_HOME/.kube/config
|
||||
export STX_BUILD_HOME="${STX_BUILD_HOME:-/localdisk/$USER}"
|
||||
|
||||
if [ ! -f "stx.conf" ]; then
|
||||
cp stx.conf.sample stx.conf
|
||||
fi
|
||||
|
||||
number_of_users () {
|
||||
local count
|
||||
|
||||
count=$(users | tr ' ' '\n' | sort --uniq | wc -l)
|
||||
|
||||
|
||||
# Add in non-login users that might trigger a parallel build
|
||||
# based on a timer, or other trigger.
|
||||
if getent passwd | grep -q jenkins; then
|
||||
@ -68,34 +37,125 @@ sqrt () {
|
||||
echo -e "sqrt($1)" | bc -q -i | head -2 | tail -1
|
||||
}
|
||||
|
||||
# Consider many users are just working with code and not actually building.
|
||||
NUM_USERS=$(sqrt $(number_of_users))
|
||||
ABSOLUTE_MAX_CPUS=$(($(number_of_cpus)/$NUM_USERS))
|
||||
MAX_CPUS=$(number_of_cpus)
|
||||
if [ "$MAX_CPUS" == "" ] || [ "$MAX_CPUS" == "0" ]; then
|
||||
MAX_CPUS=1
|
||||
|
||||
if [ -z "$PROJECT" ]; then
|
||||
notice_warn "\$PROJECT needs to be defined, this will be your project name."
|
||||
notice_warn "It will be used on the docker image tagging to support multiusers."
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ $MAX_CPUS -gt $ABSOLUTE_MAX_CPUS ]; then
|
||||
MAX_CPUS=$ABSOLUTE_MAX_CPUS
|
||||
# Host side path, exports STX lib to user's PATH
|
||||
export PRJDIR=$(pwd)
|
||||
export PATH=$PRJDIR/stx/bin:$PATH
|
||||
# Used by helm/stx-init to tag the user images
|
||||
DOCKER_TAG_VERSION="v0.1.0"
|
||||
export DOCKER_TAG_LOCAL="${USER}-${PROJECT}-${DOCKER_TAG_VERSION}"
|
||||
# Platform 'minikube' or 'kubernetes'
|
||||
export STX_PLATFORM="${STX_PLATFORM:-minikube}"
|
||||
# Max cpus for the build parallel jobs, replaces MINIKUBECPUS env var
|
||||
export STX_BUILD_CPUS=${STX_BUILD_CPUS:-6}
|
||||
|
||||
STX_BUILD_HOME_DEFAULT_v1="/localdisk/$USER"
|
||||
STX_BUILD_HOME_DEFAULT_v2="/localdisk/designer/$USER/$PROJECT"
|
||||
|
||||
if [ ! -f "stx.conf" ]; then
|
||||
cp stx.conf.sample stx.conf
|
||||
fi
|
||||
|
||||
if [ $MINIKUBECPUS -gt $MAX_CPUS ]; then
|
||||
notice_warn "MINIKUBECPUS setting:$MINIKUBECPUS is more than MAX_CPUS: $MAX_CPUS."
|
||||
notice_warn "Limit the minikube cluster with MAX_CPUS."
|
||||
# Platform specifics
|
||||
if [ "$STX_PLATFORM" = "minikube" ]; then
|
||||
# MINIKUBE Settings
|
||||
if [ -z "$STX_BUILD_HOME" ]; then
|
||||
# Verify default build home
|
||||
if [ -d "${STX_BUILD_HOME_DEFAULT_v1}/localdisk/designer/$USER" ]; then
|
||||
STX_BUILD_HOME="${STX_BUILD_HOME_DEFAULT_v1}"
|
||||
else
|
||||
STX_BUILD_HOME="${STX_BUILD_HOME_DEFAULT_v2}"
|
||||
fi
|
||||
export STX_BUILD_HOME
|
||||
fi
|
||||
|
||||
export MINIKUBECPUS=$MAX_CPUS
|
||||
fi
|
||||
if [ -z "$MINIKUBE_HOME" ]; then
|
||||
MINIKUBE_HOME=$HOME
|
||||
else
|
||||
if [ ! -d "$MINIKUBE_HOME" ]; then
|
||||
echo "The directory defined by \$MINIKUBE_HOME doesn't exist"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
MAX_MEMORY=`expr $(cat /proc/meminfo |grep MemTotal | awk '{print $2}') / 1024`
|
||||
if [ "$MAX_MEMORY" == "" ] || [ "$MAX_MEMORY" == "0" ]; then
|
||||
MAX_MEMORY=2048
|
||||
fi
|
||||
FSTYPE=$(stat -f -L -c %T $MINIKUBE_HOME)
|
||||
if [ x"$FSTYPE" == x"nfs" ]; then
|
||||
echo ""
|
||||
echo "Warning: stx minikube doesn't allow \$MINIKUBE_HOME or \$HOME directory as nfs mount point!!!"
|
||||
echo " Please set non-nfs MINIKUBE_HOME with the command 'export MINIKUBE_HOME=XXX/YYY'"
|
||||
echo ""
|
||||
unset MINIKUBE_HOME
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ $MINIKUBEMEMORY -gt $MAX_MEMORY ]; then
|
||||
export MINIKUBEMEMORY=${MINIKUBEMEMORY:-16000}
|
||||
export MINIKUBENAME=${MINIKUBENAME:-minikube-$USER-upstream}
|
||||
export KUBECONFIG=$MINIKUBE_HOME/.kube/config
|
||||
|
||||
notice_warn "MINIKUBEMEMORY setting:$MINIKUBEMEMORY is more than system MAX_MEMORY: $MAX_MEMORY M."
|
||||
notice_warn "Limit the minikube cluster with MAX_MEMORY."
|
||||
# Consider many users are just working with code and not actually building.
|
||||
NUM_USERS=$(sqrt $(number_of_users))
|
||||
ABSOLUTE_MAX_CPUS=$(($(number_of_cpus)/$NUM_USERS))
|
||||
MAX_CPUS=$(number_of_cpus)
|
||||
if [ "$MAX_CPUS" == "" ] || [ "$MAX_CPUS" == "0" ]; then
|
||||
MAX_CPUS=1
|
||||
fi
|
||||
|
||||
export MINIKUBEMEMORY=$MAX_MEMORY
|
||||
fi
|
||||
if [ $MAX_CPUS -gt $ABSOLUTE_MAX_CPUS ]; then
|
||||
MAX_CPUS=$ABSOLUTE_MAX_CPUS
|
||||
fi
|
||||
|
||||
if [ $STX_BUILD_CPUS -gt $MAX_CPUS ]; then
|
||||
notice_warn "\$STX_BUILD_CPUS setting:$STX_BUILD_CPUS is more than MAX_CPUS: $MAX_CPUS."
|
||||
notice_warn "Limit the minikube cluster with MAX_CPUS."
|
||||
|
||||
export STX_BUILD_CPUS=$MAX_CPUS
|
||||
fi
|
||||
|
||||
MAX_MEMORY=`expr $(cat /proc/meminfo |grep MemTotal | awk '{print $2}') / 1024`
|
||||
if [ "$MAX_MEMORY" == "" ] || [ "$MAX_MEMORY" == "0" ]; then
|
||||
MAX_MEMORY=2048
|
||||
fi
|
||||
|
||||
if [ $MINIKUBEMEMORY -gt $MAX_MEMORY ]; then
|
||||
|
||||
notice_warn "MINIKUBEMEMORY setting:$MINIKUBEMEMORY is more than system MAX_MEMORY: $MAX_MEMORY M."
|
||||
notice_warn "Limit the minikube cluster with MAX_MEMORY."
|
||||
|
||||
export MINIKUBEMEMORY=$MAX_MEMORY
|
||||
fi
|
||||
|
||||
elif [ "$STX_PLATFORM" = "kubernetes" ]; then
|
||||
# Host side path STX_BUILD_HOME
|
||||
export STX_BUILD_HOME="${STX_BUILD_HOME:-${STX_BUILD_HOME_DEFAULT_v2}}"
|
||||
|
||||
if [ -z "$STX_K8S_NAMESPACE" ]; then
|
||||
notice_warn "\$STX_K8S_NAMESPACE needs to be defined, this will be your namespace name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! kubectl get namespace 2>/dev/null | grep -q $STX_K8S_NAMESPACE; then
|
||||
notice_warn "namespace $STX_K8S_NAMESPACE not found"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ -z "$KUBECONFIG" ]; then
|
||||
# Kubeconfig default location inside STX_BUILD_HOME
|
||||
export KUBECONFIG=$STX_BUILD_HOME/.kube/config
|
||||
fi
|
||||
|
||||
if [ ! -f "$KUBECONFIG" ]; then
|
||||
notice_warn "KUBECONFIG: $KUBECONFIG not found"
|
||||
notice_warn "Fix the kube config and try again."
|
||||
return 1
|
||||
fi
|
||||
|
||||
else
|
||||
notice_warn "\$STX_PLATFORM not specified, valid options are: 'minikube' or 'kubernetes'"
|
||||
return 1
|
||||
fi
|
70
import-stx.README
Normal file
70
import-stx.README
Normal file
@ -0,0 +1,70 @@
|
||||
import-stx: Import StarlingX build environment and stx tool
|
||||
|
||||
For more information: https://wiki.openstack.org/wiki/StarlingX/DebianBuildEnvironment
|
||||
|
||||
###############################################
|
||||
# STX Environment variables
|
||||
###############################################
|
||||
|
||||
$PROJECT
|
||||
|
||||
* Required
|
||||
* Project name, used by docker image tagging for multiuser/multiproject environments. It is also used on the
|
||||
default STX_BUILD_HOME path.
|
||||
|
||||
$STX_PLATFORM
|
||||
|
||||
* Required for Kubernetes.
|
||||
* Platforms supported: "minikube" and "kubernetes"
|
||||
* Default value: "minikube"
|
||||
|
||||
$STX_BUILD_HOME
|
||||
|
||||
* Optional
|
||||
* Default value: "/localdisk/designer/$USER/$PROJECT"
|
||||
* Working directory for the build.
|
||||
|
||||
$STX_BUILD_CPUS
|
||||
|
||||
* Optional
|
||||
* Default value: 6
|
||||
* Number of cpus that build-pkgs can use for parallel jobs, in a minikube environment this option is also used
|
||||
to set the minikube cluster cpus.
|
||||
|
||||
###############################################
|
||||
# Minikube only
|
||||
###############################################
|
||||
|
||||
$MINIKUBENAME
|
||||
|
||||
* Optional
|
||||
* Default value: minikube-$USER-upstream
|
||||
* Sets the minikube cluster profile name.
|
||||
|
||||
$MINIKUBE_HOME
|
||||
|
||||
* Optional
|
||||
* Default value: $HOME
|
||||
* Sets the path for the .minikube and .kube directories that minikube uses for state/configuration.
|
||||
|
||||
$MINIKUBEMEMORY
|
||||
|
||||
* Optional
|
||||
* Default value: 16000
|
||||
* Amount of memory available for the minikube cluster.
|
||||
|
||||
###############################################
|
||||
# Kubernetes only
|
||||
###############################################
|
||||
|
||||
$STX_K8S_NAMESPACE
|
||||
|
||||
* Required
|
||||
* Kubernetes namespace name for your project, currently each namespace can host 1 stx deployment. Namespace must
|
||||
be created and access setup prior to sourcing import-stx and starting your build environment.
|
||||
|
||||
$KUBECONFIG
|
||||
|
||||
* Optional
|
||||
* Default value: $STX_BUILD_HOME/.kube/config
|
||||
* Path to your Kubernetes config file.
|
130
stx-init-env
130
stx-init-env
@ -29,19 +29,18 @@ notice() {
|
||||
echo >&2 "${tty_on}$*${tty_off}"
|
||||
}
|
||||
|
||||
|
||||
PROGNAME=$(basename "$0")
|
||||
MINIKUBE=minikube
|
||||
HELM=helm
|
||||
DOCKER=docker
|
||||
PYTHON3=python3
|
||||
KUBECTL=kubectl
|
||||
DOCKER_PREFIX="starlingx/"
|
||||
DOCKER_IMAGES="stx-builder stx-pkgbuilder stx-lat-tool stx-aptly"
|
||||
DOCKER_TAG="master-debian-latest"
|
||||
DOCKER_TAG_LOCAL="v0.1.0"
|
||||
|
||||
BUILD_DOCKER=0
|
||||
DELETE_MINIKUBE=0
|
||||
DELETE_ENV=0
|
||||
RESTART_MINIKUBE=0
|
||||
CLEAN_CONFIG=0
|
||||
|
||||
@ -96,7 +95,7 @@ while true ; do
|
||||
shift
|
||||
;;
|
||||
--nuke)
|
||||
DELETE_MINIKUBE=1
|
||||
DELETE_ENV=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
@ -114,19 +113,30 @@ done
|
||||
[[ "$#" -le 0 ]] || cmdline_error "too many arguments"
|
||||
|
||||
# make sure required programs are installed
|
||||
if ! command -v $MINIKUBE &> /dev/null; then
|
||||
echo >&2 "Command $MINIKUBE could not be found."
|
||||
echo >&2 "Please install it as https://minikube.sigs.k8s.io/docs/start/"
|
||||
echo ""
|
||||
exit 1
|
||||
if [ "$STX_PLATFORM" = "minikube" ]; then
|
||||
if ! command -v "$MINIKUBE" &> /dev/null; then
|
||||
echo >&2 "Command $MINIKUBE could not be found."
|
||||
echo >&2 "Please install it as https://minikube.sigs.k8s.io/docs/start/"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
if ! command -v $HELM &> /dev/null; then
|
||||
if [ "$STX_PLATFORM" = "kubernetes" ]; then
|
||||
if ! command -v "$KUBECTL" &> /dev/null; then
|
||||
echo >&2 "Command $KUBECTL could not be found."
|
||||
echo >&2 "Please install and configure kubectl."
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! command -v "$HELM" &> /dev/null; then
|
||||
echo >&2 "Command $HELM could not be found."
|
||||
echo >&2 "Please install it as https://helm.sh/"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
if ! command -v $DOCKER &> /dev/null; then
|
||||
if ! command -v "$DOCKER" &> /dev/null; then
|
||||
echo >&2 "Command $DOCKER could not be found. Please install it."
|
||||
echo >&2 ""
|
||||
exit 1
|
||||
@ -171,55 +181,64 @@ if [[ ! -d "$STX_BUILD_HOME/mirrors/starlingx" ]] ; then
|
||||
mkdir -p $STX_BUILD_HOME/mirrors/starlingx || exit 1
|
||||
fi
|
||||
|
||||
# --nuke: just delete the cluster and exit
|
||||
if [[ $DELETE_MINIKUBE -eq 1 ]] ; then
|
||||
if minikube_exists ; then
|
||||
notice "Deleting minikube cluster \`$MINIKUBENAME'"
|
||||
$MINIKUBE delete -p "$MINIKUBENAME" || exit 1
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Stop minikube if necessary
|
||||
WANT_START_MINIKUBE=0
|
||||
if [[ $RESTART_MINIKUBE -eq 1 ]] ; then
|
||||
if minikube_started ; then
|
||||
notice "Stopping minikube cluster \`$MINIKUBENAME'"
|
||||
$MINIKUBE stop -p $MINIKUBENAME
|
||||
if minikube_started ; then
|
||||
echo >&2 "minikube container $MINIKUBENAME exist!"
|
||||
echo >&2 "And the command 'minikube -p $MINIKUBENAME stop' failed. The reason may be"
|
||||
echo >&2 "the current MINIKUBE_HOME/HOME is not the same as the $MINIKUBENAME"
|
||||
echo >&2 "Please change the MINIKUBE_HOME/HOME directory to the previous value"
|
||||
echo >&2 "then re-execute this script"
|
||||
exit 1
|
||||
if [ "$STX_PLATFORM" = "minikube" ]; then
|
||||
# MINIKUBE
|
||||
# --nuke: just delete the cluster and exit
|
||||
if [[ $DELETE_ENV -eq 1 ]] ; then
|
||||
if minikube_exists ; then
|
||||
notice "Deleting minikube cluster \`$MINIKUBENAME'"
|
||||
$MINIKUBE delete -p "$MINIKUBENAME" || exit 1
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Stop minikube if necessary
|
||||
WANT_START_MINIKUBE=0
|
||||
if [[ $RESTART_MINIKUBE -eq 1 ]] ; then
|
||||
if minikube_started ; then
|
||||
notice "Stopping minikube cluster \`$MINIKUBENAME'"
|
||||
$MINIKUBE stop -p $MINIKUBENAME
|
||||
if minikube_started ; then
|
||||
echo >&2 "minikube container $MINIKUBENAME exist!"
|
||||
echo >&2 "And the command 'minikube -p $MINIKUBENAME stop' failed. The reason may be"
|
||||
echo >&2 "the current MINIKUBE_HOME/HOME is not the same as the $MINIKUBENAME"
|
||||
echo >&2 "Please change the MINIKUBE_HOME/HOME directory to the previous value"
|
||||
echo >&2 "then re-execute this script"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
WANT_START_MINIKUBE=1
|
||||
elif ! minikube_started ; then
|
||||
WANT_START_MINIKUBE=1
|
||||
fi
|
||||
|
||||
# Start minikube
|
||||
if [[ $WANT_START_MINIKUBE -eq 1 ]] ; then
|
||||
notice "Starting minikube cluster \`$MINIKUBENAME'"
|
||||
$MINIKUBE start --driver=docker -p $MINIKUBENAME \
|
||||
--cpus=$STX_BUILD_CPUS \
|
||||
--memory=$MINIKUBEMEMORY \
|
||||
--mount=true \
|
||||
--mount-string="$STX_BUILD_HOME:/workspace" \
|
||||
|| exit 1
|
||||
fi
|
||||
|
||||
# Record the project environment variables
|
||||
echo "The last minikube cluster startup date: `date`" > minikube_history.log
|
||||
echo "MINIKUBE_HOME: $MINIKUBE_HOME" >> minikube_history.log
|
||||
echo "MINIKUBENAME: $MINIKUBENAME" >> minikube_history.log
|
||||
echo "STX_BUILD_HOME: $STX_BUILD_HOME" >> minikube_history.log
|
||||
|
||||
# Import minikube's docker environment
|
||||
eval $(minikube -p $MINIKUBENAME docker-env)
|
||||
|
||||
elif [ "$STX_PLATFORM" = "kubernetes" ]; then
|
||||
if [[ $DELETE_ENV -eq 1 ]] ; then
|
||||
notice "--nuke not supported for Kubernetes platform"
|
||||
fi
|
||||
WANT_START_MINIKUBE=1
|
||||
elif ! minikube_started ; then
|
||||
WANT_START_MINIKUBE=1
|
||||
fi
|
||||
|
||||
# Start minikube
|
||||
if [[ $WANT_START_MINIKUBE -eq 1 ]] ; then
|
||||
notice "Starting minikube cluster \`$MINIKUBENAME'"
|
||||
$MINIKUBE start --driver=docker -p $MINIKUBENAME \
|
||||
--cpus=$MINIKUBECPUS \
|
||||
--memory=$MINIKUBEMEMORY \
|
||||
--mount=true \
|
||||
--mount-string="$STX_BUILD_HOME:/workspace" \
|
||||
|| exit 1
|
||||
fi
|
||||
|
||||
# Record the project environment variables
|
||||
echo "The last minikube cluster startup date: `date`" > minikube_history.log
|
||||
echo "MINIKUBE_HOME: $MINIKUBE_HOME" >> minikube_history.log
|
||||
echo "MINIKUBENAME: $MINIKUBENAME" >> minikube_history.log
|
||||
echo "STX_BUILD_HOME: $STX_BUILD_HOME" >> minikube_history.log
|
||||
|
||||
# Import minikube's docker environment
|
||||
eval $(minikube -p $MINIKUBENAME docker-env)
|
||||
|
||||
# Build container images
|
||||
if [[ $BUILD_DOCKER -eq 1 ]] ; then
|
||||
notice "Building docker images"
|
||||
@ -240,3 +259,4 @@ notice "Restarting pods"
|
||||
stx control stop || exit 1
|
||||
stx control start || exit 1
|
||||
|
||||
notice "Run 'stx control status' to check the pod startup status"
|
||||
|
@ -1,98 +0,0 @@
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
from stx import utils # pylint: disable=E0611
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
logger = logging.getLogger('STX-Command')
|
||||
utils.set_logger(logger)
|
||||
|
||||
|
||||
def check_prjdir_env():
|
||||
prjdir_value = os.getenv('PRJDIR', '')
|
||||
if not prjdir_value:
|
||||
logger.warning('Please source the file stx-init-env to export the \
|
||||
PRJDIR variable.')
|
||||
logger.warning('If the minikube had already started, please source \
|
||||
the file import-stx instead.')
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def get_pods_info():
|
||||
'''Get all pods information of the stx building tools.'''
|
||||
|
||||
cmd = 'minikube -p $MINIKUBENAME kubectl -- get pods '
|
||||
logger.info('stx-tools pods list:')
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
|
||||
def get_deployment_info():
|
||||
'''Get all deployment information of the stx building tools.'''
|
||||
|
||||
cmd = 'minikube -p $MINIKUBENAME kubectl -- get deployment'
|
||||
logger.info('stx-tools deployments list:')
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
|
||||
def get_helm_info():
|
||||
'''Get the helm list information of the stx building tools.'''
|
||||
|
||||
cmd = 'helm ls'
|
||||
logger.info('helm list:\n')
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
|
||||
def get_pod_name(dockername):
|
||||
'''get the detailed pod name from the four pods.'''
|
||||
|
||||
cmd = 'minikube -p $MINIKUBENAME kubectl -- get pods | grep Running| \
|
||||
grep stx-' + dockername + ' | awk \'{print $1}\' '
|
||||
output = subprocess.check_output(cmd, shell=True)
|
||||
podname = str(output.decode('utf8').strip())
|
||||
|
||||
return podname
|
||||
|
||||
|
||||
def helm_release_exists(projectname):
|
||||
'''Check if the helm release exists'''
|
||||
|
||||
cmd = 'helm ls | grep ' + projectname
|
||||
ret = subprocess.getoutput(cmd)
|
||||
if ret:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def generatePrefixCommand(podname, command, enableuser):
|
||||
'''Generate the command executed in the host'''
|
||||
|
||||
prefix_exec_cmd = 'minikube -p $MINIKUBENAME kubectl -- exec -ti '
|
||||
builder_exec_cmd = prefix_exec_cmd + podname
|
||||
prefix_bash_cmd = ' -- bash -l -c '
|
||||
prefix_bash_with_user_cmd = ' -- bash -l -c \'sudo -u ${MYUNAME} bash \
|
||||
--rcfile /home/$MYUNAME/userenv -i -c '
|
||||
builder_exec_bash_cmd = builder_exec_cmd + prefix_bash_cmd
|
||||
builder_exec_bash_with_user_cmd = builder_exec_cmd + \
|
||||
prefix_bash_with_user_cmd
|
||||
|
||||
if enableuser:
|
||||
cmd = builder_exec_bash_with_user_cmd + command
|
||||
else:
|
||||
cmd = builder_exec_bash_cmd + command
|
||||
|
||||
return cmd
|
115
stx/lib/stx/config.py
Normal file
115
stx/lib/stx/config.py
Normal file
@ -0,0 +1,115 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import os
|
||||
|
||||
from stx import stx_configparser
|
||||
from stx import utils
|
||||
|
||||
logger = logging.getLogger('STX-Config')
|
||||
utils.set_logger(logger)
|
||||
|
||||
|
||||
def require_env(var):
|
||||
value = os.getenv(var)
|
||||
if value is None:
|
||||
logger.error(
|
||||
f'{var} not found in the environment')
|
||||
logger.error(
|
||||
'Please source the file \'import-stx\' to define the ' +
|
||||
f'{var} variable and execute \'stx-init-env\' to start builder pods')
|
||||
raise LookupError(f'{var} not found in the environment!')
|
||||
return value
|
||||
|
||||
|
||||
class Config:
|
||||
"""Configuration interface.
|
||||
|
||||
This class provides a read-only interface to project
|
||||
configuration.
|
||||
|
||||
Usage
|
||||
=====
|
||||
::
|
||||
from stx import config
|
||||
|
||||
# load once
|
||||
config = Config().load()
|
||||
|
||||
# use this instance throughout the app
|
||||
value = config.get ('section', 'key')
|
||||
|
||||
# returns "minikube -p $PROFILE kubectl -n $NAMESPACE --"
|
||||
# or similar
|
||||
# kubectl_command = config.kubectl()
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Construct an empty instance; must call "load" explicitly before using"""
|
||||
self.prjdir = require_env('PRJDIR')
|
||||
self.config_filename = os.path.join(self.prjdir, 'stx.conf')
|
||||
self.use_minikube = os.getenv('STX_PLATFORM', 'minikube') == 'minikube'
|
||||
if self.use_minikube:
|
||||
self.minikube_profile = require_env('MINIKUBENAME')
|
||||
else:
|
||||
self.k8s_namespace = os.getenv('STX_K8S_NAMESPACE')
|
||||
|
||||
self.build_home = require_env('STX_BUILD_HOME')
|
||||
self.docker_tag = require_env('DOCKER_TAG_LOCAL')
|
||||
self.kubectl_cmd = None
|
||||
self.helm_cmd = None
|
||||
|
||||
def load(self):
|
||||
"""Load stx.conf"""
|
||||
self.data = stx_configparser.STXConfigParser(self.config_filename)
|
||||
self._init_kubectl_cmd()
|
||||
return self
|
||||
|
||||
def get(self, section, key):
|
||||
"""Get a config value"""
|
||||
assert self.data
|
||||
return self.data.getConfig(section, key)
|
||||
|
||||
def impl(self):
|
||||
"""Internal object that stores configuration"""
|
||||
return self.data
|
||||
|
||||
def prjdir(self):
|
||||
"""Path of starlingx/tools checkout"""
|
||||
return self.prjdir
|
||||
|
||||
def kubectl(self):
|
||||
"""Returns the command for invoking kubect"""
|
||||
assert self.data
|
||||
return self.kubectl_cmd
|
||||
|
||||
def helm(self):
|
||||
"""Returns the command for invoking helm"""
|
||||
assert self.data
|
||||
return self.helm_cmd
|
||||
|
||||
def _init_kubectl_cmd(self):
|
||||
# helm
|
||||
self.helm_cmd = 'helm'
|
||||
# kubectl
|
||||
if self.use_minikube:
|
||||
self.kubectl_cmd = f'minikube -p {self.minikube_profile} kubectl --'
|
||||
else:
|
||||
self.kubectl_cmd = 'kubectl'
|
||||
# Kubernetes namespace
|
||||
if self.k8s_namespace:
|
||||
self.kubectl_cmd += f' -n {self.k8s_namespace}'
|
||||
self.helm_cmd += f' -n {self.k8s_namespace}'
|
91
stx/lib/stx/k8s.py
Normal file
91
stx/lib/stx/k8s.py
Normal file
@ -0,0 +1,91 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from stx import utils # pylint: disable=E0611
|
||||
import subprocess
|
||||
|
||||
logger = logging.getLogger('STX-k8s')
|
||||
utils.set_logger(logger)
|
||||
|
||||
|
||||
class KubeHelper:
|
||||
"""Common k8s commands"""
|
||||
|
||||
"""Constructor:
|
||||
:param config: an instance of stx.config.Config
|
||||
"""
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def get_pods_info(self):
|
||||
'''Get all pods information of the stx building tools.'''
|
||||
|
||||
cmd = self.config.kubectl() + ' get pods '
|
||||
logger.info('stx-tools pods list:')
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
def get_deployment_info(self):
|
||||
'''Get all deployment information of the stx building tools.'''
|
||||
|
||||
cmd = self.config.kubectl() + ' get deployment'
|
||||
logger.info('stx-tools deployments list:')
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
def get_helm_info(self):
|
||||
'''Get the helm list information of the stx building tools.'''
|
||||
|
||||
cmd = self.config.helm() + ' ls'
|
||||
logger.info('helm list:\n')
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
def get_pod_name(self, dockername):
|
||||
'''get the detailed pod name from the four pods.'''
|
||||
|
||||
cmd = self.config.kubectl() + ' get pods | grep Running | ' + \
|
||||
'grep stx-' + dockername + ' | awk \'{print $1}\' '
|
||||
output = subprocess.check_output(cmd, shell=True)
|
||||
podname = str(output.decode('utf8').strip())
|
||||
|
||||
return podname
|
||||
|
||||
def helm_release_exists(self, projectname):
|
||||
'''Check if the helm release exists'''
|
||||
|
||||
cmd = self.config.helm() + ' ls | grep ' + projectname
|
||||
ret = subprocess.getoutput(cmd)
|
||||
if ret:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def generatePrefixCommand(self, podname, command, enableuser):
|
||||
'''Generate the command executed in the host'''
|
||||
|
||||
prefix_exec_cmd = self.config.kubectl() + ' exec -ti '
|
||||
builder_exec_cmd = prefix_exec_cmd + podname
|
||||
prefix_bash_cmd = ' -- bash -l -c '
|
||||
prefix_bash_with_user_cmd = ' -- bash -l -c \'sudo -u ${MYUNAME} bash \
|
||||
--rcfile /home/$MYUNAME/userenv -i -c '
|
||||
builder_exec_bash_cmd = builder_exec_cmd + prefix_bash_cmd
|
||||
builder_exec_bash_with_user_cmd = builder_exec_cmd + \
|
||||
prefix_bash_with_user_cmd
|
||||
|
||||
if enableuser:
|
||||
cmd = builder_exec_bash_with_user_cmd + command
|
||||
else:
|
||||
cmd = builder_exec_bash_cmd + command
|
||||
|
||||
return cmd
|
@ -18,7 +18,7 @@ import logging
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from stx import command # pylint: disable=E0611
|
||||
from stx.k8s import KubeHelper
|
||||
from stx import utils # pylint: disable=E0611
|
||||
|
||||
STX_BUILD_TYPES = ['rt', 'std']
|
||||
@ -28,7 +28,9 @@ STX_LAYERS = ['distro', 'flock']
|
||||
class HandleBuildTask:
|
||||
'''Handle the task for the build sub-command'''
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.k8s = KubeHelper(config)
|
||||
self.logger = logging.getLogger('STX-Build')
|
||||
utils.set_logger(self.logger)
|
||||
|
||||
@ -125,7 +127,7 @@ class HandleBuildTask:
|
||||
|
||||
self.logger.setLevel(args.loglevel)
|
||||
|
||||
podname = command.get_pod_name('builder')
|
||||
podname = self.k8s.get_pod_name('builder')
|
||||
if not podname:
|
||||
self.logger.error('The builder container does not exist, ' +
|
||||
'so please use the control module to start.')
|
||||
@ -135,7 +137,7 @@ class HandleBuildTask:
|
||||
|
||||
bashcmd = "\'find /home/${MYUNAME}/prepare-build.done "
|
||||
bashcmd += "&>/dev/null\'"
|
||||
cmd = command.generatePrefixCommand(podname, bashcmd, 0)
|
||||
cmd = self.k8s.generatePrefixCommand(podname, bashcmd, 0)
|
||||
|
||||
ret = subprocess.call(cmd, shell=True)
|
||||
if ret != 0:
|
||||
@ -148,7 +150,7 @@ class HandleBuildTask:
|
||||
'***********************************')
|
||||
sys.exit(1)
|
||||
|
||||
prefix_cmd = command.generatePrefixCommand(podname, '', 1)
|
||||
prefix_cmd = self.k8s.generatePrefixCommand(podname, '', 1)
|
||||
|
||||
if args.build_task == 'image':
|
||||
cmd = self.buildImageCMD(args, prefix_cmd)
|
||||
|
@ -22,7 +22,7 @@ from stx import helper # pylint: disable=E0611
|
||||
from stx import utils # pylint: disable=E0611
|
||||
import sys
|
||||
|
||||
logger = logging.getLogger('STX-Config')
|
||||
logger = logging.getLogger('STX-Config-Parser')
|
||||
utils.set_logger(logger)
|
||||
|
||||
|
||||
@ -128,8 +128,8 @@ class STXConfigParser:
|
||||
class HandleConfigTask:
|
||||
'''Handle the task for the config sub-command'''
|
||||
|
||||
def __init__(self):
|
||||
self.stxconfig = STXConfigParser()
|
||||
def __init__(self, config):
|
||||
self.stxconfig = config.impl()
|
||||
|
||||
def handleShow(self):
|
||||
self.stxconfig.showAll()
|
||||
|
@ -21,9 +21,8 @@ import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from stx import command # pylint: disable=E0611
|
||||
from stx import helper # pylint: disable=E0611
|
||||
from stx import stx_configparser # pylint: disable=E0611
|
||||
from stx.k8s import KubeHelper
|
||||
from stx import utils # pylint: disable=E0611
|
||||
|
||||
helmchartdir = 'stx/stx-build-tools-chart/stx-builder'
|
||||
@ -32,9 +31,10 @@ helmchartdir = 'stx/stx-build-tools-chart/stx-builder'
|
||||
class HandleControlTask:
|
||||
'''Handle the task for the control sub-command'''
|
||||
|
||||
def __init__(self):
|
||||
self.stxconfig = stx_configparser.STXConfigParser()
|
||||
self.projectname = self.stxconfig.getConfig('project', 'name')
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.k8s = KubeHelper(config)
|
||||
self.projectname = self.config.get('project', 'name')
|
||||
self.logger = logging.getLogger('STX-Control')
|
||||
utils.set_logger(self.logger)
|
||||
|
||||
@ -46,9 +46,9 @@ class HandleControlTask:
|
||||
remote_cmd = ' -- bash /etc/pulp/changepasswd'
|
||||
pulpname = ' stx-pulp'
|
||||
while count:
|
||||
podname = command.get_pod_name(pulpname)
|
||||
podname = self.k8s.get_pod_name(pulpname)
|
||||
if podname:
|
||||
cmd = 'minikube -p $MINIKUBENAME kubectl -- exec -ti '
|
||||
cmd = self.config.kubectl() + ' exec -ti '
|
||||
cmd = cmd + podname + remote_cmd
|
||||
subprocess.call(cmd, shell=True)
|
||||
count = 0
|
||||
@ -63,31 +63,30 @@ class HandleControlTask:
|
||||
def finish_configure(self):
|
||||
'''Before starting, we need to finish the setup'''
|
||||
|
||||
max_cpus = os.environ['MINIKUBECPUS']
|
||||
max_cpus = os.environ['STX_BUILD_CPUS']
|
||||
|
||||
projectname = self.stxconfig.getConfig('project', 'name')
|
||||
builder_uid = self.stxconfig.getConfig('builder', 'uid')
|
||||
builder_myuname = self.stxconfig.getConfig('builder', 'myuname')
|
||||
builder_release = self.stxconfig.getConfig('builder', 'release')
|
||||
builder_dist = self.stxconfig.getConfig('builder', 'dist')
|
||||
builder_stx_dist = self.stxconfig.getConfig('builder', 'stx_dist')
|
||||
builder_debfullname = self.stxconfig.getConfig('builder',
|
||||
'debfullname')
|
||||
builder_debemail = self.stxconfig.getConfig('builder', 'debemail')
|
||||
repomgr_type = self.stxconfig.getConfig('repomgr', 'type')
|
||||
gituser = self.stxconfig.getConfig('project', 'gituser')
|
||||
gitemail = self.stxconfig.getConfig('project', 'gitemail')
|
||||
proxy = self.stxconfig.getConfig('project', 'proxy')
|
||||
proxyserver = self.stxconfig.getConfig('project', 'proxyserver')
|
||||
proxyport = self.stxconfig.getConfig('project', 'proxyport')
|
||||
buildbranch = self.stxconfig.getConfig('project', 'buildbranch')
|
||||
manifest = self.stxconfig.getConfig('project', 'manifest')
|
||||
cengnurl = self.stxconfig.getConfig('repomgr', 'cengnurl')
|
||||
cengnstrategy = self.stxconfig.getConfig('repomgr', 'cengnstrategy')
|
||||
sourceslist = self.stxconfig.getConfig('repomgr', 'sourceslist')
|
||||
deblist = self.stxconfig.getConfig('repomgr', 'deblist')
|
||||
dsclist = self.stxconfig.getConfig('repomgr', 'dsclist')
|
||||
ostree_osname = self.stxconfig.getConfig('project', 'ostree_osname')
|
||||
projectname = self.config.get('project', 'name')
|
||||
builder_uid = self.config.get('builder', 'uid')
|
||||
builder_myuname = self.config.get('builder', 'myuname')
|
||||
builder_release = self.config.get('builder', 'release')
|
||||
builder_dist = self.config.get('builder', 'dist')
|
||||
builder_stx_dist = self.config.get('builder', 'stx_dist')
|
||||
builder_debfullname = self.config.get('builder', 'debfullname')
|
||||
builder_debemail = self.config.get('builder', 'debemail')
|
||||
repomgr_type = self.config.get('repomgr', 'type')
|
||||
gituser = self.config.get('project', 'gituser')
|
||||
gitemail = self.config.get('project', 'gitemail')
|
||||
proxy = self.config.get('project', 'proxy')
|
||||
proxyserver = self.config.get('project', 'proxyserver')
|
||||
proxyport = self.config.get('project', 'proxyport')
|
||||
buildbranch = self.config.get('project', 'buildbranch')
|
||||
manifest = self.config.get('project', 'manifest')
|
||||
cengnurl = self.config.get('repomgr', 'cengnurl')
|
||||
cengnstrategy = self.config.get('repomgr', 'cengnstrategy')
|
||||
sourceslist = self.config.get('repomgr', 'sourceslist')
|
||||
deblist = self.config.get('repomgr', 'deblist')
|
||||
dsclist = self.config.get('repomgr', 'dsclist')
|
||||
ostree_osname = self.config.get('project', 'ostree_osname')
|
||||
if sourceslist:
|
||||
if not (deblist or dsclist):
|
||||
self.logger.warning('*************************************\
|
||||
@ -98,7 +97,7 @@ when sourceslist is enabled!!!')
|
||||
*********************************')
|
||||
sys.exit(1)
|
||||
|
||||
repomgr_type = self.stxconfig.getConfig('repomgr', 'type')
|
||||
repomgr_type = self.config.get('repomgr', 'type')
|
||||
if repomgr_type not in ('aptly', 'pulp'):
|
||||
self.logger.warning('Repomgr type only supports [aptly] or [pulp],\
|
||||
please modify the value with config command!!!')
|
||||
@ -183,9 +182,16 @@ stx-pkgbuilder/configmap/')
|
||||
return repomgr_type
|
||||
|
||||
def handleStartTask(self, projectname):
|
||||
cmd = 'helm install ' + projectname + ' ' + helmchartdir
|
||||
cmd = self.config.helm() + ' install ' + projectname + ' ' + helmchartdir \
|
||||
+ ' --set global.image.tag=' + self.config.docker_tag
|
||||
|
||||
if not self.config.use_minikube:
|
||||
# Override hostDir for k8s local host mount
|
||||
# need to review this to support multi node (PV/PVCs)
|
||||
cmd += ' --set global.hostDir=' + self.config.build_home
|
||||
|
||||
self.logger.debug('Execute the helm start command: %s', cmd)
|
||||
helm_status = command.helm_release_exists(self.projectname)
|
||||
helm_status = self.k8s.helm_release_exists(self.projectname)
|
||||
if helm_status:
|
||||
self.logger.warning('The helm release %s already exists - nothing to do',
|
||||
projectname)
|
||||
@ -196,9 +202,9 @@ stx-pkgbuilder/configmap/')
|
||||
self.configurePulp()
|
||||
|
||||
def handleStopTask(self, projectname):
|
||||
helm_status = command.helm_release_exists(self.projectname)
|
||||
helm_status = self.k8s.helm_release_exists(self.projectname)
|
||||
if helm_status:
|
||||
cmd = 'helm uninstall ' + projectname
|
||||
cmd = self.config.helm() + ' uninstall ' + projectname
|
||||
self.logger.debug('Execute the helm stop command: %s', cmd)
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
else:
|
||||
@ -206,11 +212,10 @@ stx-pkgbuilder/configmap/')
|
||||
projectname)
|
||||
|
||||
def handleUpgradeTask(self, projectname):
|
||||
command.check_prjdir_env()
|
||||
self.finish_configure()
|
||||
helm_status = command.helm_release_exists(self.projectname)
|
||||
helm_status = self.k8s.helm_release_exists(self.projectname)
|
||||
if helm_status:
|
||||
cmd = 'helm upgrade ' + projectname + ' ' + helmchartdir
|
||||
cmd = self.config.helm() + ' upgrade ' + projectname + ' ' + helmchartdir
|
||||
self.logger.debug('Execute the upgrade command: %s', cmd)
|
||||
subprocess.call(cmd, shell=True, cwd=os.environ['PRJDIR'])
|
||||
else:
|
||||
@ -221,7 +226,7 @@ stx-pkgbuilder/configmap/')
|
||||
def handleEnterTask(self, args):
|
||||
default_docker = 'builder'
|
||||
container_list = ['builder', 'pkgbuilder', 'repomgr', 'lat']
|
||||
prefix_exec_cmd = 'minikube -p $MINIKUBENAME kubectl -- exec -ti '
|
||||
prefix_exec_cmd = self.config.kubectl() + ' exec -ti '
|
||||
|
||||
if args.dockername:
|
||||
if args.dockername not in container_list:
|
||||
@ -230,7 +235,7 @@ argument. eg: %s \n', container_list)
|
||||
sys.exit(1)
|
||||
default_docker = args.dockername
|
||||
|
||||
podname = command.get_pod_name(default_docker)
|
||||
podname = self.k8s.get_pod_name(default_docker)
|
||||
if podname:
|
||||
if default_docker == 'builder':
|
||||
cmd = prefix_exec_cmd + podname
|
||||
@ -251,7 +256,7 @@ enter has been started!!!\n')
|
||||
def handleControl(self, args):
|
||||
|
||||
self.logger.setLevel(args.loglevel)
|
||||
projectname = self.stxconfig.getConfig('project', 'name')
|
||||
projectname = self.config.get('project', 'name')
|
||||
if not projectname:
|
||||
projectname = 'stx'
|
||||
|
||||
@ -268,9 +273,9 @@ enter has been started!!!\n')
|
||||
self.handleEnterTask(args)
|
||||
|
||||
elif args.ctl_task == 'status':
|
||||
command.get_helm_info()
|
||||
command.get_deployment_info()
|
||||
command.get_pods_info()
|
||||
self.k8s.get_helm_info()
|
||||
self.k8s.get_deployment_info()
|
||||
self.k8s.get_pods_info()
|
||||
|
||||
else:
|
||||
self.logger.error('Control module doesn\'t support your \
|
||||
|
@ -15,7 +15,7 @@
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
from stx import command # pylint: disable=E0611
|
||||
from stx import config
|
||||
from stx import stx_build # pylint: disable=E0611
|
||||
from stx import stx_configparser # pylint: disable=E0611
|
||||
from stx import stx_control # pylint: disable=E0611
|
||||
@ -34,10 +34,11 @@ class CommandLine:
|
||||
'''Handles parsing the commandline parameters for stx tool'''
|
||||
|
||||
def __init__(self):
|
||||
command.check_prjdir_env()
|
||||
self.handleconfig = stx_configparser.HandleConfigTask()
|
||||
self.handlecontrol = stx_control.HandleControlTask()
|
||||
self.handlebuild = stx_build.HandleBuildTask()
|
||||
self.config = config.Config().load()
|
||||
self.handleconfig = stx_configparser.HandleConfigTask(self.config)
|
||||
self.handlecontrol = stx_control.HandleControlTask(self.config)
|
||||
self.handlebuild = stx_build.HandleBuildTask(self.config)
|
||||
self.handlerepomgr = stx_repomgr.HandleRepomgrTask(self.config)
|
||||
self.parser = self.parseCommandLine()
|
||||
|
||||
def parseCommandLine(self):
|
||||
@ -132,7 +133,7 @@ delete_pkg ]')
|
||||
help='[ list|download|sync|mirror|clean|\
|
||||
remove_repo|upload_pkg|delete_pkg ]: \
|
||||
Execute the management task.\n\n')
|
||||
repo_subparser.set_defaults(handle=stx_repomgr.handleRepomgr)
|
||||
repo_subparser.set_defaults(handle=self.handlerepomgr.handleCommand)
|
||||
|
||||
parser.add_argument('-d', '--debug',
|
||||
help='Enable debug output\n\n',
|
||||
|
@ -13,32 +13,37 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from stx import command # pylint: disable=E0611
|
||||
from stx.k8s import KubeHelper
|
||||
from stx import utils # pylint: disable=E0611
|
||||
import subprocess
|
||||
|
||||
|
||||
logger = logging.getLogger('STX-Repomgr')
|
||||
utils.set_logger(logger)
|
||||
|
||||
|
||||
def handleRepomgr(args):
|
||||
'''Sync the repo '''
|
||||
class HandleRepomgrTask:
|
||||
|
||||
logger.setLevel(args.loglevel)
|
||||
logger.debug('Execute the repomgr command: [%s]', args.repomgr_task)
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.k8s = KubeHelper(config)
|
||||
|
||||
podname = command.get_pod_name('builder')
|
||||
if not podname:
|
||||
logger.error('The builder container does not exist, so please \
|
||||
consider to use the control module')
|
||||
def handleCommand(self, args):
|
||||
'''Sync the repo '''
|
||||
|
||||
prefix_cmd = command.generatePrefixCommand(podname, '', 1)
|
||||
cmd = prefix_cmd + '"repo_manage.py ' + args.repomgr_task + '"\''
|
||||
logger.debug('Manage the repo with the command [%s]', cmd)
|
||||
logger.setLevel(args.loglevel)
|
||||
logger.debug('Execute the repomgr command: [%s]', args.repomgr_task)
|
||||
|
||||
try:
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
raise Exception('Failed to manage the repo with the command [%s].\n \
|
||||
Returncode: %s' % (cmd, exc.returncode))
|
||||
podname = self.k8s.get_pod_name('builder')
|
||||
if not podname:
|
||||
logger.error('The builder container does not exist, so please \
|
||||
consider to use the control module')
|
||||
|
||||
prefix_cmd = self.k8s.generatePrefixCommand(podname, '', 1)
|
||||
cmd = prefix_cmd + '"repo_manage.py ' + args.repomgr_task + '"\''
|
||||
logger.debug('Manage the repo with the command [%s]', cmd)
|
||||
|
||||
try:
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
raise Exception('Failed to manage the repo with the command [%s].\n \
|
||||
Returncode: %s' % (cmd, exc.returncode))
|
||||
|
@ -31,7 +31,7 @@ spec:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
tty: true
|
||||
volumeMounts:
|
||||
@ -44,7 +44,7 @@ spec:
|
||||
volumes:
|
||||
- name: {{ .Values.volumes.name }}
|
||||
hostPath:
|
||||
path: {{ .Values.volumes.hostPath.path }}
|
||||
path: {{ .Values.global.hostDir }}{{ .Values.volumes.hostPath.path }}
|
||||
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
|
@ -22,7 +22,7 @@ volumeMounts:
|
||||
volumes:
|
||||
name: shared-workspace
|
||||
hostPath:
|
||||
path: /workspace/aptly
|
||||
path: /aptly
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
|
@ -31,7 +31,7 @@ spec:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
tty: true
|
||||
volumeMounts:
|
||||
@ -46,11 +46,12 @@ spec:
|
||||
volumes:
|
||||
- name: {{ .Values.volumes.name }}
|
||||
hostPath:
|
||||
path: {{ .Values.volumes.hostPath.path }}
|
||||
path: {{ .Values.global.hostDir }}{{ .Values.volumes.hostPath.path }}
|
||||
- name: {{ .Values.volumes.entropyname }}
|
||||
hostPath:
|
||||
path: {{ .Values.volumes.entropyhostPath.path }}
|
||||
|
||||
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
@ -20,7 +20,7 @@ volumeMounts:
|
||||
volumes:
|
||||
name: latd-shared-workspace
|
||||
hostPath:
|
||||
path: /workspace/localdisk
|
||||
path: /localdisk
|
||||
entropyname: entropydevice
|
||||
entropyhostPath:
|
||||
path: /dev/urandom
|
||||
|
@ -2,6 +2,6 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.volumes.configmapname }}
|
||||
name: {{ .Values.volumes.envsetup.configMapName }}
|
||||
data:
|
||||
{{ (.Files.Glob "configmap/stx*").AsConfig | indent 2 }}
|
||||
|
@ -31,30 +31,30 @@ spec:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: {{ .Values.volumeMounts.name }}
|
||||
mountPath: {{ .Values.volumeMounts.mountPath}}
|
||||
- name: {{ .Values.volumeMounts.envsetupname }}
|
||||
mountPath: {{ .Values.volumeMounts.envsetupmountPath}}
|
||||
- name: {{ .Values.volumeMounts.mirrorName }}
|
||||
mountPath: {{ .Values.volumeMounts.mirrorMountPath }}
|
||||
- name: {{ .Values.volumeMounts.workspace.name }}
|
||||
mountPath: {{ .Values.volumeMounts.workspace.mountPath}}
|
||||
- name: {{ .Values.volumeMounts.envsetup.name }}
|
||||
mountPath: {{ .Values.volumeMounts.envsetup.mountPath }}
|
||||
- name: {{ .Values.volumeMounts.mirror.name }}
|
||||
mountPath: {{ .Values.volumeMounts.mirror.mountPath }}
|
||||
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
|
||||
volumes:
|
||||
- name: {{ .Values.volumes.name }}
|
||||
- name: {{ .Values.volumes.workspace.name }}
|
||||
hostPath:
|
||||
path: {{ .Values.volumes.hostPath.path }}
|
||||
- name: {{ .Values.volumes.envsetupname }}
|
||||
path: {{ .Values.global.hostDir }}{{ .Values.volumes.workspace.hostPath.path }}
|
||||
- name: {{ .Values.volumes.envsetup.name }}
|
||||
configMap:
|
||||
name: {{ .Values.volumes.configmapname }}
|
||||
- name: {{ .Values.volumes.mirrorName }}
|
||||
name: {{ .Values.volumes.envsetup.configMapName }}
|
||||
- name: {{ .Values.volumes.mirror.name }}
|
||||
hostPath:
|
||||
path: {{ .Values.volumes.mirrorHostPath.path }}
|
||||
path: {{ .Values.global.hostDir }}{{ .Values.volumes.mirror.hostPath.path }}
|
||||
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
|
@ -12,22 +12,28 @@ image:
|
||||
tag: "v0.1.0"
|
||||
|
||||
volumeMounts:
|
||||
name: shared-workspace
|
||||
mountPath: /localdisk
|
||||
envsetupname: env-setting
|
||||
envsetupmountPath: /usr/local/bin/stx
|
||||
mirrorName: mirror
|
||||
mirrorMountPath: /import/mirrors/starlingx
|
||||
workspace:
|
||||
name: shared-workspace
|
||||
mountPath: /localdisk
|
||||
envsetup:
|
||||
name: env-setting
|
||||
mountPath: /usr/local/bin/stx
|
||||
mirror:
|
||||
name: mirror
|
||||
mountPath: /import/mirrors/starlingx
|
||||
|
||||
volumes:
|
||||
name: shared-workspace
|
||||
hostPath:
|
||||
path: /workspace/localdisk
|
||||
envsetupname: env-setting
|
||||
configmapname: pkgbuilder
|
||||
mirrorName: mirror
|
||||
mirrorHostPath:
|
||||
path: /workspace/mirrors/starlingx
|
||||
workspace:
|
||||
name: shared-workspace
|
||||
hostPath:
|
||||
path: /localdisk
|
||||
envsetup:
|
||||
name: env-setting
|
||||
configMapName: pkgbuilder
|
||||
mirror:
|
||||
name: mirror
|
||||
hostPath:
|
||||
path: /mirrors/starlingx
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
|
@ -31,7 +31,7 @@ spec:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
tty: true
|
||||
volumeMounts:
|
||||
@ -53,13 +53,13 @@ spec:
|
||||
name: {{ .Values.volumes.configmapname }}
|
||||
- name: {{ .Values.volumes.storagename }}
|
||||
hostPath:
|
||||
path: {{ .Values.volumes.storagehostPath.path }}
|
||||
path: {{ .Values.global.hostDir }}{{ .Values.volumes.storagehostPath.path }}
|
||||
- name: {{ .Values.volumes.pgsqlname }}
|
||||
hostPath:
|
||||
path: {{ .Values.volumes.pgsqlhostPath.path }}
|
||||
path: {{ .Values.global.hostDir }}{{ .Values.volumes.pgsqlhostPath.path }}
|
||||
- name: {{ .Values.volumes.containersname }}
|
||||
hostPath:
|
||||
path: {{ .Values.volumes.containershostPath.path }}
|
||||
path: {{ .Values.global.hostDir }}{{ .Values.volumes.containershostPath.path }}
|
||||
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
|
@ -26,13 +26,13 @@ volumes:
|
||||
configmapname: pulp
|
||||
storagename: pulp-storage
|
||||
storagehostPath:
|
||||
path: /workspace/pulp/pulp-storage
|
||||
path: /pulp/pulp-storage
|
||||
pgsqlname: pulp-pgsql
|
||||
pgsqlhostPath:
|
||||
path: /workspace/pulp/pulp-pgsql
|
||||
path: /pulp/pulp-pgsql
|
||||
containersname: pulp-containers
|
||||
containershostPath:
|
||||
path: /workspace/pulp/pulp-containers
|
||||
path: /pulp/pulp-containers
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
|
@ -2,6 +2,6 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.volumes.configmapname }}
|
||||
name: {{ .Values.volumes.userSetting.configMapName }}
|
||||
data:
|
||||
{{ (.Files.Glob "configmap/stx*").AsConfig | indent 2 }}
|
||||
|
@ -31,30 +31,30 @@ spec:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.global.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: {{ .Values.volumeMounts.name }}
|
||||
mountPath: {{ .Values.volumeMounts.mountPath}}
|
||||
- name: {{ .Values.volumeMounts.usersetupname }}
|
||||
mountPath: {{ .Values.volumeMounts.usersetupmountPath}}
|
||||
- name: {{ .Values.volumeMounts.mirrorName }}
|
||||
mountPath: {{ .Values.volumeMounts.mirrorMountPath }}
|
||||
- name: {{ .Values.volumeMounts.workspace.name }}
|
||||
mountPath: {{ .Values.volumeMounts.workspace.mountPath}}
|
||||
- name: {{ .Values.volumeMounts.userSetting.name }}
|
||||
mountPath: {{ .Values.volumeMounts.userSetting.mountPath}}
|
||||
- name: {{ .Values.volumeMounts.mirror.name }}
|
||||
mountPath: {{ .Values.volumeMounts.mirror.mountPath }}
|
||||
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
|
||||
volumes:
|
||||
- name: {{ .Values.volumes.name }}
|
||||
- name: {{ .Values.volumes.workspace.name }}
|
||||
hostPath:
|
||||
path: {{ .Values.volumes.hostPath.path }}
|
||||
- name: {{ .Values.volumes.usersetupname }}
|
||||
path: {{ .Values.global.hostDir }}{{ .Values.volumes.workspace.hostPath.path }}
|
||||
- name: {{ .Values.volumes.userSetting.name }}
|
||||
configMap:
|
||||
name: {{ .Values.volumes.configmapname }}
|
||||
- name: {{ .Values.volumes.mirrorName }}
|
||||
name: {{ .Values.volumes.userSetting.configMapName }}
|
||||
- name: {{ .Values.volumes.mirror.name }}
|
||||
hostPath:
|
||||
path: {{ .Values.volumes.mirrorHostPath.path }}
|
||||
path: {{ .Values.global.hostDir }}{{ .Values.volumes.mirror.hostPath.path }}
|
||||
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
|
@ -8,26 +8,41 @@ replicaCount: 1
|
||||
image:
|
||||
repository: stx-builder
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "v0.1.0"
|
||||
|
||||
global:
|
||||
# This is the host source directory, all the other directories are under it
|
||||
# On minikube this is the /workspace, on K8s we can customize to any host directory
|
||||
# In the future we should move to PV/PVCs
|
||||
hostDir: /workspace
|
||||
image:
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
# Setting as global so all dependencies can use the same TAG
|
||||
# Not sure if this is the best way to handle this
|
||||
tag: "v.0.1.0"
|
||||
|
||||
volumeMounts:
|
||||
name: shared-workspace
|
||||
mountPath: /localdisk
|
||||
usersetupname: user-setting
|
||||
usersetupmountPath: /usr/local/bin/stx
|
||||
mirrorName: mirror
|
||||
mirrorMountPath: /import/mirrors/starlingx
|
||||
workspace:
|
||||
name: shared-workspace
|
||||
mountPath: /localdisk
|
||||
userSetting:
|
||||
name: user-setting
|
||||
mountPath: /usr/local/bin/stx
|
||||
mirror:
|
||||
name: mirror
|
||||
mountPath: /import/mirrors/starlingx
|
||||
|
||||
volumes:
|
||||
name: shared-workspace
|
||||
hostPath:
|
||||
path: /workspace/localdisk
|
||||
usersetupname: user-setting
|
||||
configmapname: builder
|
||||
mirrorName: mirror
|
||||
mirrorHostPath:
|
||||
path: /workspace/mirrors/starlingx
|
||||
workspace:
|
||||
name: shared-workspace
|
||||
hostPath:
|
||||
path: /localdisk
|
||||
userSetting:
|
||||
name: user-setting
|
||||
configMapName: builder
|
||||
mirror:
|
||||
name: mirror
|
||||
hostPath:
|
||||
path: /mirrors/starlingx
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
|
@ -37,12 +37,12 @@ To start a fresh source tree:
|
||||
repo init -u https://opendev.org/starlingx/manifest.git -m default.xml
|
||||
repo sync
|
||||
|
||||
To download the sources & 3rd-party to local mirror:
|
||||
downloader -b -s
|
||||
|
||||
To build all packages:
|
||||
build-pkgs -a | build-pkgs -p <packageA,packageB...>
|
||||
|
||||
To fill local binary repo:
|
||||
debdownloader <path binary package list>
|
||||
|
||||
To make image:
|
||||
build-image [ -t std|rt ]
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user