
The LAT deploy directory's owner is root because lat container's default user is root. The old version LAT deploy directory's attribute is rwxrwxrwx because when secure boot is enabled, LAT changes the attribute from rwxr-xr-x to rwxrwxrwx before writing sig files of initramfs to the deploy dir. In the secure boot enhancement commits, the default secure boot switch setting EFI_SECURE_BOOT is changed to "disable". We also remove all the hard coded signings in LAT including the attribute changes because they aren't needed any more. The signings are done in the LAT hook functions now. But the signings of iso (especially the one done in jenkins script) need write the sig file into deploy dir. We decide not to touch LAT again and change the owner of deploy directory to the default user of builder in build-image. Test Plan: Pass: build-image Pass: check the user/group of /localdisk/deploy and the files/directories under it are right. Story: 2008846 Task: 46366 Signed-off-by: Li Zhou <li.zhou@windriver.com> Change-Id: I6de16e43447116a7bbff08d820336d147725d628
605 lines
22 KiB
Python
Executable File
605 lines
22 KiB
Python
Executable File
#!/usr/bin/python3
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
# Copyright (C) 2021-2022 Wind River Systems,Inc
|
|
|
|
import argparse
|
|
import discovery
|
|
import getopt
|
|
import logging
|
|
import os
|
|
import re
|
|
import repo_manage
|
|
import shutil
|
|
import signal
|
|
import subprocess
|
|
import sys
|
|
import time
|
|
import utils
|
|
import yaml
|
|
|
|
STX_DEFAULT_DISTRO = discovery.STX_DEFAULT_DISTRO
|
|
ALL_LAYERS = discovery.get_all_layers(distro=STX_DEFAULT_DISTRO)
|
|
ALL_BUILD_TYPES = discovery.get_all_build_types(distro=STX_DEFAULT_DISTRO)
|
|
|
|
LAT_ROOT = '/localdisk'
|
|
REPO_ALL = 'deb-merge-all'
|
|
REPO_BINARY = 'deb-local-binary'
|
|
REPO_BUILD = 'deb-local-build'
|
|
DEB_CONFIG_DIR = 'stx-tools/debian-mirror-tools/config/'
|
|
PKG_LIST_DIR = os.path.join(os.environ.get('MY_REPO_ROOT_DIR'), DEB_CONFIG_DIR)
|
|
CERT_FILE = 'cgcs-root/public-keys/TiBoot.crt'
|
|
CERT_PATH = os.path.join(os.environ.get('MY_REPO_ROOT_DIR'), CERT_FILE)
|
|
img_pkgs = []
|
|
kernel_type = 'std'
|
|
stx_std_kernel = 'linux-image-5.10.0-6-amd64-unsigned'
|
|
stx_rt_kernel = 'linux-rt-image-5.10.0-6-rt-amd64-unsigned'
|
|
WAIT_TIME_BEFORE_CHECKING_LOG = 2
|
|
DEFAULT_TIME_WAIT_LOG = 15
|
|
|
|
logger = logging.getLogger('build-image')
|
|
utils.set_logger(logger)
|
|
|
|
|
|
def merge_local_repos(repomgr):
|
|
logger.debug('Calls repo manager to create/udpate the snapshot %s which is merged from local repositories', REPO_ALL)
|
|
# REPO_BUILD is higher priority than REPO_BINARY for repomgr to select package
|
|
try:
|
|
pubname = repomgr.merge(REPO_ALL, ','.join([REPO_BUILD, REPO_BINARY]))
|
|
except Exception as e:
|
|
logger.error(str(e))
|
|
logger.error('Exception when repo_manager creates/updates snapshot %s', REPO_ALL)
|
|
return False
|
|
if pubname:
|
|
logger.debug('repo manager successfully created/updated snapshot %s', REPO_ALL)
|
|
else:
|
|
logger.debug('repo manager failed to create/update snapshot %s', REPO_ALL)
|
|
return False
|
|
return True
|
|
|
|
|
|
def update_debootstrap_mirror(img_yaml):
|
|
repomgr_url = os.environ.get('REPOMGR_DEPLOY_URL')
|
|
if not repomgr_url:
|
|
logger.error('REPOMGR_URL is not in current sys ENV')
|
|
return False
|
|
|
|
try:
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
if not yaml_doc['debootstrap-mirror']:
|
|
logger.warning("There is not debootstrap-mirror in %s", img_yaml)
|
|
else:
|
|
mirror = yaml_doc['debootstrap-mirror']
|
|
if mirror == REPO_ALL:
|
|
yaml_doc['debootstrap-mirror'] = os.path.join(repomgr_url, REPO_ALL)
|
|
else:
|
|
yaml_doc['debootstrap-mirror'] = os.environ.get('DEBIAN_SNAPSHOT')
|
|
with open(img_yaml, 'w') as f:
|
|
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
|
|
logger.debug('Updating %s, setting debootstrap_mirror to %s', img_yaml, yaml_doc['debootstrap-mirror'])
|
|
return True
|
|
except IOError as e:
|
|
logger.error(str(e))
|
|
logger.debug('Failed to update %s, could not set debootstrap_mirror to %s', img_yaml, yaml_doc['debootstrap-mirror'])
|
|
return False
|
|
|
|
|
|
def update_ostree_osname(img_yaml):
|
|
|
|
ostree_osname = os.environ.get('OSTREE_OSNAME')
|
|
if ostree_osname is None:
|
|
return False
|
|
|
|
try:
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
yaml_doc['ostree']['ostree_osname'] = ostree_osname
|
|
with open(img_yaml, 'w') as f:
|
|
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
|
|
except IOError as e:
|
|
logger.error(str(e))
|
|
return False
|
|
|
|
logger.debug(' '.join(['Update', img_yaml, 'to update the ostree_osname']))
|
|
return True
|
|
|
|
|
|
def change_default_kernel(img_yaml, ktype):
|
|
|
|
rt_kernel = std_kernel = None
|
|
try:
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
multi_kernels = yaml_doc["multiple-kernels"].split(" ")
|
|
default_kernel = yaml_doc["default-kernel"]
|
|
if len(multi_kernels) == 1:
|
|
return False
|
|
for kernel in multi_kernels:
|
|
if re.search("-rt-", kernel):
|
|
rt_kernel = kernel
|
|
else:
|
|
std_kernel = kernel
|
|
if ktype == "rt":
|
|
if re.search("-rt-", default_kernel):
|
|
return True
|
|
elif rt_kernel != None:
|
|
yaml_doc["default-kernel"] = rt_kernel
|
|
else:
|
|
logger.error(f"No rt kernel is found in {multiple-kernels}")
|
|
return False
|
|
elif ktype == "std":
|
|
if not re.search("-rt-", default_kernel):
|
|
return True
|
|
elif std_kernel != None:
|
|
yaml_doc["default-kernel"] = std_kernel
|
|
else:
|
|
logger.error(f"No std kernel is found in {multiple-kernels}")
|
|
return False
|
|
|
|
logger.debug(f'Set default kernel as {yaml_doc["default-kernel"]}')
|
|
try:
|
|
with open(img_yaml, 'w') as f:
|
|
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
|
|
except IOError as e:
|
|
logger.error(str(e))
|
|
return False
|
|
|
|
except IOError as e:
|
|
logger.error(str(e))
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
def replace_in_yaml(dst_yaml, field, field_type, src_str, dst_str):
|
|
logger.debug("Start to replace %s in field %s of yaml %s", src_str, field, dst_yaml)
|
|
|
|
try:
|
|
with open(dst_yaml) as f:
|
|
main_doc = yaml.safe_load(f)
|
|
except Exception as e:
|
|
logger.error(str(e))
|
|
logger.error("Failed to open the yaml file %s", dst_yaml)
|
|
return False
|
|
else:
|
|
if field_type == 'yaml_string':
|
|
string_orig = main_doc[field]
|
|
if not string_orig:
|
|
logger.error("Failed to find the field %s", field)
|
|
return False
|
|
if not string_orig == src_str:
|
|
logger.error("Found field %s, but the value %s does not match target %s", field, string_orig, src_str)
|
|
return False
|
|
main_doc[field] = dst_str
|
|
logger.debug("Successfully updated the field %s with %s", field, dst_str)
|
|
elif field_type == 'yaml_list':
|
|
list_new = []
|
|
list_orig = main_doc[field]
|
|
if not list_orig:
|
|
logger.error("Failed to find the field %s", field)
|
|
return False
|
|
for item in list_orig:
|
|
list_new.append(item.replace(src_str, dst_str))
|
|
main_doc[field] = list_new
|
|
logger.debug("Successfully updated the value %s of field %s with %s", src_str, field, dst_str)
|
|
elif field_type == 'yaml_list_suffix':
|
|
list_new = []
|
|
list_orig = main_doc[field]
|
|
if not list_orig:
|
|
logger.error("Failed to find the field %s", field)
|
|
return False
|
|
for item in list_orig:
|
|
if src_str in item:
|
|
if '=' in item:
|
|
logger.error("Package version is defined, can't be appened with suffix %s", dst_str)
|
|
return False
|
|
list_new.append(item.strip() + dst_str)
|
|
else:
|
|
list_new.append(item)
|
|
main_doc[field] = list_new
|
|
logger.debug("Successfully updated %s in field %s with %s suffix", src_str, field, dst_str)
|
|
|
|
try:
|
|
with open(dst_yaml, 'w') as f:
|
|
yaml.safe_dump(main_doc, f, default_flow_style=False, sort_keys=False)
|
|
except Exception as e:
|
|
logger.error(str(e))
|
|
logger.error("Failed to write to %s", dst_yaml)
|
|
return False
|
|
logger.info("Successfully updated %s", dst_yaml)
|
|
return True
|
|
|
|
|
|
def update_rt_kernel_in_main_yaml(main_yaml):
|
|
return replace_in_yaml(main_yaml, 'rootfs-pre-scripts', 'yaml_list', stx_std_kernel, stx_rt_kernel)
|
|
|
|
|
|
def update_rt_kernel_in_initramfs_yaml(initramfs_yaml):
|
|
ret = False
|
|
|
|
# Updated the name of kernel module
|
|
for layer in ALL_LAYERS:
|
|
pkg_dirs = discovery.package_dir_list(distro=STX_DEFAULT_DISTRO, layer=layer, build_type='rt')
|
|
if not pkg_dirs:
|
|
continue
|
|
for pkg_dir in pkg_dirs:
|
|
pkg_name = discovery.package_dir_to_package_name(pkg_dir, STX_DEFAULT_DISTRO)
|
|
if pkg_name and pkg_name != 'linux-rt':
|
|
if replace_in_yaml(initramfs_yaml, 'packages', 'yaml_list_suffix', pkg_name, '-rt'):
|
|
logger.debug("RT Initramfs: Updated %s with rt suffix", pkg_name)
|
|
else:
|
|
logger.debug("RT Initramfs: Failed to update %s with rt suffix", pkg_name)
|
|
return ret
|
|
ret = replace_in_yaml(initramfs_yaml, 'packages', 'yaml_list', stx_std_kernel, stx_rt_kernel)
|
|
return ret
|
|
|
|
|
|
def include_initramfs(img_yaml, ramfs_yaml_path):
|
|
if not os.path.exists(img_yaml):
|
|
logger.error("LAT yaml file %s does not exist", img_yaml)
|
|
return False
|
|
try:
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
yaml_doc['system'][0]['contains'][0] = ramfs_yaml_path
|
|
|
|
with open(img_yaml, 'w') as f:
|
|
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
|
|
except Exception as e:
|
|
logger.error(str(e))
|
|
logger.error("Failed to add %s to %s", ramfs_yaml_path, img_yaml)
|
|
return False
|
|
|
|
logger.debug("Successfully included %s in %s", ramfs_yaml_path, img_yaml)
|
|
return True
|
|
|
|
|
|
def feed_lat_src_repos(img_yaml, repo_url):
|
|
if not os.path.exists(img_yaml):
|
|
logger.error(' '.join(['LAT yaml file', img_yaml, 'does not exist']))
|
|
return False
|
|
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
yaml_doc['package_feeds'].extend(repo_url)
|
|
yaml_doc['package_feeds'] = list(set(yaml_doc['package_feeds']))
|
|
yaml_doc['package_feeds'].sort()
|
|
|
|
with open(img_yaml, 'w') as f:
|
|
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
|
|
|
|
logger.debug(' '.join(['Update', img_yaml, 'to feed repos']))
|
|
return True
|
|
|
|
|
|
def add_lat_packages(img_yaml, packages):
|
|
if not os.path.exists(img_yaml):
|
|
logger.error(' '.join(['LAT yaml file', img_yaml, 'does not exist']))
|
|
return False
|
|
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
yaml_doc['packages'].extend(packages)
|
|
|
|
for build_type in ALL_BUILD_TYPES:
|
|
pkgs = discovery.package_iso_list(distro=STX_DEFAULT_DISTRO, layer="all", build_type=build_type)
|
|
yaml_doc['packages'].extend(pkgs)
|
|
|
|
yaml_doc['packages'] = list(set(yaml_doc['packages']))
|
|
yaml_doc['packages'].sort()
|
|
|
|
with open(img_yaml, 'w') as f:
|
|
yaml.safe_dump(yaml_doc, f, default_flow_style=False, sort_keys=False)
|
|
|
|
logger.debug(' '.join(['Update', img_yaml, 'to add packages']))
|
|
return True
|
|
|
|
|
|
def check_base_os_binaries(repomgr):
|
|
base_bins_list = os.path.join(os.environ.get('MY_REPO_ROOT_DIR'),
|
|
'cgcs-root/build-tools/stx/debian-image.inc')
|
|
if not os.path.exists(base_bins_list):
|
|
logger.error(' '.join(['Base OS packages list', base_bins_list,
|
|
'does not exist']))
|
|
return False
|
|
|
|
results = verify_pkgs_in_repo(repomgr, REPO_BINARY, base_bins_list)
|
|
if results:
|
|
logger.error("====OS binaries checking fail:")
|
|
for deb in results:
|
|
logger.error(deb)
|
|
logger.error("====OS binaries missing end====\n")
|
|
return False
|
|
logger.info("====All OS binary packages are ready ====\n")
|
|
return True
|
|
|
|
|
|
def check_stx_binaries(repomgr, btype='std'):
|
|
stx_bins_list = ''.join([PKG_LIST_DIR, '/debian/distro/os-', btype,
|
|
'.lst'])
|
|
if not os.path.exists(stx_bins_list):
|
|
logger.warning(' '.join(['STX binary packages list', stx_bins_list,
|
|
'does not exist']))
|
|
# Assume no such list here means ok
|
|
return True
|
|
|
|
results = verify_pkgs_in_repo(repomgr, REPO_BINARY, stx_bins_list)
|
|
if results:
|
|
logger.error("====STX binaries checking fail:")
|
|
for deb in results:
|
|
logger.error(deb)
|
|
logger.error("====STX binaries missing end====\n")
|
|
return False
|
|
logger.info("====All STX binary packages are ready ====\n")
|
|
return True
|
|
|
|
|
|
def check_stx_patched(repomgr, btype='std'):
|
|
stx_patched_list = ''.join([PKG_LIST_DIR, '/debian/distro/stx-', btype,
|
|
'.lst'])
|
|
if not os.path.exists(stx_patched_list):
|
|
logger.warning(''.join(['STX patched packages list', stx_patched_list,
|
|
'does not exist']))
|
|
return False
|
|
|
|
results = verify_pkgs_in_repo(repomgr, REPO_BUILD, stx_patched_list)
|
|
if results:
|
|
logger.error("====STX patched packages checking fail:")
|
|
for deb in results:
|
|
logger.error(deb)
|
|
logger.error("====STX patched packages missing end====\n")
|
|
return False
|
|
logger.info("====All STX patched packages are ready ====\n")
|
|
return True
|
|
|
|
|
|
def verify_pkgs_in_repo(repomgr, repo_name, pkg_list_path):
|
|
failed_pkgs = []
|
|
with open(pkg_list_path, 'r') as flist:
|
|
lines = list(line for line in (lpkg.strip() for lpkg in flist) if line)
|
|
for pkg in lines:
|
|
pkg = pkg.strip()
|
|
if pkg.startswith('#'):
|
|
continue
|
|
pname_parts = pkg.split()
|
|
name = pname_parts[0]
|
|
if len(pname_parts) > 1:
|
|
version = pname_parts[1]
|
|
pkg_name = ''.join([name, '_', version])
|
|
if repomgr.search_pkg(repo_name, name, version):
|
|
img_pkgs.append(''.join([name, '=', version]))
|
|
logger.debug(''.join(['Found package:name=', name,
|
|
' version=', version]))
|
|
else:
|
|
logger.debug(' '.join([pkg_name,
|
|
'is missing in local binary repo']))
|
|
failed_pkgs.append(pkg_name)
|
|
else:
|
|
if repomgr.search_pkg(repo_name, name, None, True):
|
|
img_pkgs.append(name)
|
|
logger.debug(''.join(['Found package with name:', name]))
|
|
else:
|
|
failed_pkgs.append(name)
|
|
|
|
return failed_pkgs
|
|
|
|
|
|
def stop_latd():
|
|
os.system("latc stop")
|
|
time.sleep(2)
|
|
|
|
cmd = 'latc status'
|
|
try:
|
|
status = subprocess.check_output(cmd, shell=True).decode()
|
|
except Exception as e:
|
|
logger.error(str(e))
|
|
else:
|
|
if status:
|
|
if 'idle' in status:
|
|
logger.info("Successfully stopped latd")
|
|
return
|
|
logger.info("Failed to stop latd, you may have to login pkgbuilder to kill")
|
|
|
|
|
|
def user_signal_handler(signum, frame):
|
|
stop_latd()
|
|
sys.exit(1)
|
|
|
|
|
|
def user_register_signals():
|
|
signal.signal(signal.SIGINT, user_signal_handler)
|
|
signal.signal(signal.SIGHUP, user_signal_handler)
|
|
signal.signal(signal.SIGTERM, user_signal_handler)
|
|
signal.signal(signal.SIGPIPE, user_signal_handler)
|
|
|
|
|
|
def sign_iso_dev(img_yaml):
|
|
'''
|
|
Sign the .iso file with the developer key
|
|
img_yaml: lat.yaml path
|
|
'''
|
|
logger.info("Trying to sign iso image with developer key")
|
|
key_path = os.path.join(os.environ.get('MY_REPO'), 'build-tools/signing/dev-private-key.pem')
|
|
deploy_dir = '/localdisk/deploy'
|
|
try:
|
|
with open(img_yaml) as f:
|
|
yaml_doc = yaml.safe_load(f)
|
|
except IOError as e:
|
|
logger.error(str(e))
|
|
|
|
if yaml_doc['name'] and yaml_doc['machine']:
|
|
iso_name = yaml_doc['name'] + '-' + yaml_doc['machine'] + '-cd'
|
|
else:
|
|
# default image name
|
|
iso_name = 'starlingx-intel-x86-64-cd'
|
|
# openssl dgst -sha256 -sign ${KEY_PATH} -binary -out ${DEPLOY_DIR}/${ISO_NOEXT}.sig $DEPLOY_DIR/starlingx-intel-x86-64-cd.iso
|
|
sign_cmd = f'sudo openssl dgst -sha256 -sign {key_path} -binary -out {deploy_dir}/{iso_name}.sig {deploy_dir}/{iso_name}.iso'
|
|
ret = subprocess.call(sign_cmd, shell=True)
|
|
if ret == 0:
|
|
logger.info("Image signed %s", os.path.join(deploy_dir, iso_name + '.iso'))
|
|
else:
|
|
raise Exception("Error while signing the image")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
parser = argparse.ArgumentParser(description="build-image helper")
|
|
kernel_types = parser.add_mutually_exclusive_group()
|
|
kernel_types.add_argument('--std', help="build standard image",
|
|
action='store_true')
|
|
kernel_types.add_argument('--rt', help="build rt image",
|
|
action='store_true')
|
|
parser.add_argument('-c', '--clean', help="(DEPRECATED) Start a fresh image build",
|
|
default=True, action='store_true')
|
|
parser.add_argument('-k', '--keep', help="Keep the current environment " +
|
|
"(ostree, deploy), mainly used for patching",
|
|
default=False, action='store_true')
|
|
args = parser.parse_args()
|
|
if args.rt:
|
|
kernel_type = 'rt'
|
|
else:
|
|
kernel_type = 'std'
|
|
|
|
user_register_signals()
|
|
|
|
rmg_logger = logging.getLogger('repo_manager')
|
|
utils.set_logger(rmg_logger)
|
|
repo_manager = repo_manage.RepoMgr('aptly', os.environ.get('REPOMGR_URL'),
|
|
'/tmp/', os.environ.get('REPOMGR_ORIGIN'),
|
|
rmg_logger)
|
|
repo_manager.upload_pkg(REPO_BUILD, None)
|
|
repo_manager.upload_pkg(REPO_BINARY, None)
|
|
|
|
logger.info("\n")
|
|
logger.info("=====Build Image start ......")
|
|
logger.info("checking OS binary packages ......")
|
|
base_bins_ready = check_base_os_binaries(repo_manager)
|
|
|
|
logger.info("\nchecking STX binary packages ......")
|
|
stx_bins_ready = check_stx_binaries(repo_manager, "std")
|
|
|
|
logger.info("\nchecking STX patched packages ......")
|
|
stx_patched_ready = check_stx_patched(repo_manager, "std")
|
|
|
|
if not base_bins_ready or not stx_bins_ready or not stx_patched_ready:
|
|
logger.error("Fail to get prepared to build image")
|
|
sys.exit(1)
|
|
|
|
base_yaml = os.path.join(PKG_LIST_DIR, 'debian/common/base-bullseye.yaml')
|
|
base_initramfs_yaml = os.path.join(PKG_LIST_DIR, 'debian/common/base-initramfs-bullseye.yaml')
|
|
os.environ["WORKSPACE_DIR"] = LAT_ROOT
|
|
lat_yaml = os.path.join(LAT_ROOT, "lat.yaml")
|
|
lat_initramfs_yaml = os.path.join(LAT_ROOT, "lat-initramfs.yaml")
|
|
|
|
for yaml_file in (base_yaml, base_initramfs_yaml):
|
|
if not os.path.exists(yaml_file):
|
|
logger.error(' '.join(['Base yaml file', yaml_file, 'does not exist']))
|
|
sys.exit(1)
|
|
|
|
if not os.path.exists(LAT_ROOT):
|
|
os.makedirs(LAT_ROOT)
|
|
|
|
try:
|
|
shutil.copyfile(base_yaml, lat_yaml)
|
|
shutil.copyfile(base_initramfs_yaml, lat_initramfs_yaml)
|
|
except IOError as e:
|
|
logger.error(str(e))
|
|
logger.error('Failed to copy yaml files to %s', LAT_ROOT)
|
|
sys.exit(1)
|
|
|
|
include_initramfs(lat_yaml, lat_initramfs_yaml)
|
|
|
|
if merge_local_repos(repo_manager):
|
|
if update_debootstrap_mirror(lat_yaml):
|
|
logger.debug("Debootstrap switches to mirror %s in %s", REPO_ALL, lat_yaml)
|
|
if update_debootstrap_mirror(lat_initramfs_yaml):
|
|
logger.debug("Debootstrap switches to mirror %s in %s", REPO_ALL, lat_initramfs_yaml)
|
|
|
|
binary_repo_url = ''.join(['deb ',
|
|
os.environ.get('REPOMGR_DEPLOY_URL'),
|
|
REPO_BINARY, ' bullseye main'])
|
|
build_repo_url = ''.join(['deb ',
|
|
os.environ.get('REPOMGR_DEPLOY_URL'),
|
|
REPO_BUILD, ' bullseye main'])
|
|
|
|
for yaml_file in (lat_yaml, lat_initramfs_yaml):
|
|
if not feed_lat_src_repos(yaml_file, [binary_repo_url, build_repo_url]):
|
|
logger.error(' '.join(['Failed to set local repos to', yaml_file]))
|
|
sys.exit(1)
|
|
else:
|
|
logger.info(' '.join(['Successfully set local repos to', yaml_file]))
|
|
|
|
update_ostree_osname(lat_yaml)
|
|
|
|
if not change_default_kernel(lat_yaml, kernel_type):
|
|
logger.error("Failed to change the default boot kernel")
|
|
sys.exit(1)
|
|
|
|
ret = 1
|
|
if not add_lat_packages(lat_yaml, img_pkgs):
|
|
logger.error("Failed to add packages into image YAML config")
|
|
sys.exit(ret)
|
|
|
|
if not args.keep:
|
|
os.system('sudo rm -rf ' + LAT_ROOT + '/workdir')
|
|
os.system('sudo rm -rf ' + LAT_ROOT + '/sub_workdir')
|
|
os.system('sudo rm -rf ' + LAT_ROOT + '/deploy')
|
|
|
|
# Prepare the boot cert
|
|
os.system('sudo mkdir ' + LAT_ROOT + '/CERTS > /dev/null 2>&1')
|
|
os.system('sudo cp ' + CERT_PATH + ' ' + LAT_ROOT + '/CERTS/')
|
|
|
|
os.system(' '.join(['latc --file=' + lat_yaml, 'build']))
|
|
# Sleep here to wait for the log file created and feeded by latd
|
|
# It should be noted that latd does not output to log from its start
|
|
time.sleep(WAIT_TIME_BEFORE_CHECKING_LOG)
|
|
lat_log = os.path.join(LAT_ROOT, "log/log.appsdk")
|
|
time_to_wait = DEFAULT_TIME_WAIT_LOG
|
|
time_counter = 0
|
|
while not os.path.exists(lat_log):
|
|
time.sleep(1)
|
|
time_counter += 1
|
|
if time_counter > time_to_wait:
|
|
break
|
|
if not os.path.exists(lat_log):
|
|
logger.info('The wait for %s has timed out, please wait a moment,' % lat_log)
|
|
logger.info('then run: tail -f %s to check the process.' % lat_log)
|
|
sys.exit(ret)
|
|
else:
|
|
log_printer = subprocess.Popen("tail -f " + lat_log,
|
|
stdout=subprocess.PIPE, shell=True,
|
|
universal_newlines=True)
|
|
while log_printer.poll() is None:
|
|
line = log_printer.stdout.readline()
|
|
line = line.strip()
|
|
if line:
|
|
print(line)
|
|
if "ERROR: " in line:
|
|
logger.info("Failed to build image, check the log %s", lat_log)
|
|
break
|
|
if "DEBUG: Deploy ovmf.qcow2" in line:
|
|
logger.info("build-image successfully done, check the output in %s", LAT_ROOT)
|
|
ret = 0
|
|
break
|
|
# stop latd
|
|
stop_latd()
|
|
|
|
os.system('sudo chown -R ${USER}: ' + LAT_ROOT + '/deploy' )
|
|
# Sign iso with developer key
|
|
if ret == 0:
|
|
sign_iso_dev(lat_yaml)
|
|
|
|
sys.exit(ret)
|