mirror of
https://github.com/truenas/scale-build.git
synced 2025-12-20 02:49:28 +00:00
Add ability to create update image
This commit is contained in:
@@ -6,7 +6,7 @@ from scale_build.exceptions import CallError
|
||||
from .cache import check_basechroot_changed, create_basehash, save_build_cache, validate_basecache
|
||||
from .cleanup import remove_boostrap_directory
|
||||
from .logger import get_logger
|
||||
from .utils import APT_PREFERENCES, BUILDER_DIR, CACHE_DIR, CHROOT_BASEDIR, get_manifest, has_low_ram, run, TMPFS
|
||||
from .utils import APT_PREFERENCES, BUILDER_DIR, CACHE_DIR, CHROOT_BASEDIR, get_manifest, run
|
||||
|
||||
|
||||
def make_bootstrapdir(bootstrapdir_type):
|
||||
@@ -28,10 +28,6 @@ def _make_bootstrapdir_impl(bootstrapdir_type):
|
||||
deopts = ''
|
||||
cache_name = 'package'
|
||||
|
||||
# TODO: Commenting out tmpfs logic, let's see if we can just get rid of it
|
||||
# if not has_low_ram() or bootstrapdir_type == 'update':
|
||||
# run(['mount', '-t', 'tmpfs', '-o', 'size=12G', 'tmpfs', TMPFS], **run_args)
|
||||
|
||||
# Check if we should invalidate the base cache
|
||||
if validate_basecache(cache_name):
|
||||
logger.debug('Basechroot cache is intact and does not need to be changed')
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from scale_build.utils.variables import LOG_DIR
|
||||
from scale_build.utils.logger import get_logger as _get_logger
|
||||
|
||||
|
||||
def get_log_file_name(bootstrap_dir_type):
|
||||
@@ -14,9 +11,4 @@ def get_log_file_name(bootstrap_dir_type):
|
||||
|
||||
|
||||
def get_logger(bootstrap_dir_type, mode='a+'):
|
||||
logger = logging.getLogger(f'bootstrap_dir_{bootstrap_dir_type}')
|
||||
logger.propagate = False
|
||||
logger.setLevel('DEBUG')
|
||||
logger.handlers = []
|
||||
logger.addHandler(logging.FileHandler(os.path.join(LOG_DIR, get_log_file_name(bootstrap_dir_type)), mode))
|
||||
return logger
|
||||
return _get_logger(f'bootstrap_dir_{bootstrap_dir_type}', get_log_file_name(bootstrap_dir_type), mode)
|
||||
|
||||
@@ -33,8 +33,12 @@ APT_PREFERENCES = textwrap.dedent('''
|
||||
''')
|
||||
|
||||
|
||||
def normalize_cache_type(cache_type):
|
||||
return 'cdrom' if cache_type == 'cd' else 'package'
|
||||
|
||||
|
||||
def get_cache_filename(cache_type):
|
||||
return f'basechroot-{cache_type}.squashfs'
|
||||
return f'basechroot-{normalize_cache_type(cache_type)}.squashfs'
|
||||
|
||||
|
||||
def get_cache_hash_filename(cache_type):
|
||||
|
||||
0
scale_build/image/__init__.py
Normal file
0
scale_build/image/__init__.py
Normal file
32
scale_build/image/bootstrap.py
Normal file
32
scale_build/image/bootstrap.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from scale_build.bootstrap.cache import restore_basecache
|
||||
from scale_build.utils.run import run
|
||||
from scale_build.utils.variables import CHROOT_BASEDIR, PKG_DIR, TMPFS
|
||||
|
||||
from .utils import PACKAGE_PATH
|
||||
|
||||
|
||||
def setup_chroot_basedir(basecache_type, logger=None):
|
||||
shutil.rmtree(CHROOT_BASEDIR, ignore_errors=True)
|
||||
os.makedirs(TMPFS, exist_ok=True)
|
||||
run(
|
||||
['mount', '-t', 'tmpfs', '-o', f'size=12G', 'tmpfs', TMPFS],
|
||||
logger=logger
|
||||
)
|
||||
restore_basecache(basecache_type, CHROOT_BASEDIR, logger)
|
||||
run(['mount', 'proc', os.path.join(CHROOT_BASEDIR, 'proc'), '-t', 'proc'], logger=logger)
|
||||
run(['mount', 'sysfs', os.path.join(CHROOT_BASEDIR, 'sys'), '-t', 'sysfs'], logger=logger)
|
||||
os.makedirs(PACKAGE_PATH, exist_ok=True)
|
||||
run(['mount', '--bind', PKG_DIR, PACKAGE_PATH], logger=logger)
|
||||
|
||||
|
||||
def umount_chroot_basedir():
|
||||
for command in (
|
||||
['umount', '-f', PACKAGE_PATH],
|
||||
['umount', '-f', os.path.join(CHROOT_BASEDIR, 'proc')],
|
||||
['umount', '-f', os.path.join(CHROOT_BASEDIR, 'sys')],
|
||||
['umount', '-f', TMPFS],
|
||||
):
|
||||
run(command)
|
||||
5
scale_build/image/logger.py
Normal file
5
scale_build/image/logger.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from scale_build.utils.logger import get_logger as _get_logger
|
||||
|
||||
|
||||
def get_logger(filename, mode='a+'):
|
||||
return _get_logger(filename, f'{filename}.log', mode)
|
||||
61
scale_build/image/manifest.py
Normal file
61
scale_build/image/manifest.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from datetime import datetime
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
from scale_build.utils.variables import CHROOT_BASEDIR, RELEASE_DIR, UPDATE_DIR
|
||||
|
||||
|
||||
UPDATE_FILE = os.path.join(RELEASE_DIR, 'TrueNAS-SCALE.update')
|
||||
UPDATE_FILE_HASH = f'{UPDATE_FILE}.sha256'
|
||||
|
||||
|
||||
def build_manifest():
|
||||
with open(os.path.join(CHROOT_BASEDIR, 'etc/version')) as f:
|
||||
version = f.read().strip()
|
||||
|
||||
size = int(int(subprocess.run(
|
||||
['du', '--block-size', '1', '-d', '0', '-x', CHROOT_BASEDIR],
|
||||
check=True, stdout=subprocess.PIPE, encoding='utf-8', errors='ignore',
|
||||
).stdout.split()[0]) * 1.1)
|
||||
|
||||
shutil.copytree(
|
||||
os.path.join(os.path.dirname(__file__), '../truenas_install'),
|
||||
os.path.join(UPDATE_DIR, 'truenas_install'),
|
||||
)
|
||||
|
||||
checksums = {}
|
||||
for root, dirs, files in os.walk(UPDATE_DIR):
|
||||
for file in files:
|
||||
abspath = os.path.join(root, file)
|
||||
checksums[os.path.relpath(abspath, UPDATE_DIR)] = subprocess.run(
|
||||
['sha1sum', abspath],
|
||||
check=True, stdout=subprocess.PIPE, encoding='utf-8', errors='ignore',
|
||||
).stdout.split()[0]
|
||||
|
||||
with open(os.path.join(UPDATE_DIR, 'manifest.json'), "w") as f:
|
||||
f.write(json.dumps({
|
||||
'date': datetime.utcnow().isoformat(),
|
||||
'version': version,
|
||||
'size': size,
|
||||
'checksums': checksums,
|
||||
'kernel_version': glob.glob(
|
||||
os.path.join(CHROOT_BASEDIR, 'boot/vmlinuz-*')
|
||||
)[0].split('/')[-1][len('vmlinuz-'):],
|
||||
}))
|
||||
|
||||
|
||||
def build_update_manifest(update_file_checksum):
|
||||
with open(os.path.join(UPDATE_DIR, 'manifest.json')) as f:
|
||||
manifest = json.load(f)
|
||||
|
||||
with open(os.path.join(RELEASE_DIR, 'manifest.json'), 'w') as f:
|
||||
json.dump({
|
||||
'filename': os.path.basename(UPDATE_FILE),
|
||||
'version': manifest['version'],
|
||||
'date': manifest['date'],
|
||||
'changelog': '',
|
||||
'checksum': update_file_checksum,
|
||||
}, f)
|
||||
146
scale_build/image/update.py
Normal file
146
scale_build/image/update.py
Normal file
@@ -0,0 +1,146 @@
|
||||
import glob
|
||||
import itertools
|
||||
import logging
|
||||
import os
|
||||
import textwrap
|
||||
import shutil
|
||||
|
||||
from scale_build.utils.manifest import get_manifest
|
||||
from scale_build.utils.run import run
|
||||
from scale_build.utils.variables import CHROOT_BASEDIR, CONF_SOURCES, RELEASE_DIR, UPDATE_DIR
|
||||
|
||||
from .logger import get_logger
|
||||
from .manifest import build_manifest, build_update_manifest, UPDATE_FILE, UPDATE_FILE_HASH
|
||||
from .utils import run_in_chroot
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def build_rootfs_image():
|
||||
for f in glob.glob(os.path.join('./tmp/release', '*.update*')):
|
||||
os.unlink(f)
|
||||
|
||||
shutil.rmtree(UPDATE_DIR, ignore_errors=True)
|
||||
os.makedirs(RELEASE_DIR, exist_ok=True)
|
||||
os.makedirs(UPDATE_DIR, exist_ok=True)
|
||||
|
||||
# We are going to build a nested squashfs image.
|
||||
|
||||
# Why nested? So that during update we can easily RO mount the outer image
|
||||
# to read a MANIFEST and verify signatures of the real rootfs inner image
|
||||
#
|
||||
# This allows us to verify without ever extracting anything to disk
|
||||
|
||||
build_logger = get_logger('rootfs-image', 'w')
|
||||
# Create the inner image
|
||||
run(
|
||||
['mksquashfs', CHROOT_BASEDIR, os.path.join(UPDATE_DIR, 'rootfs.squashfs'), 'comp', 'xz'],
|
||||
logger=build_logger
|
||||
)
|
||||
# Build any MANIFEST information
|
||||
build_manifest()
|
||||
|
||||
# Sign the image (if enabled)
|
||||
# TODO: Add this please
|
||||
|
||||
# Create the outer image now
|
||||
run(['mksquashfs', UPDATE_DIR, UPDATE_FILE, '-noD'], logger=build_logger)
|
||||
update_hash = run(['sha256sum', UPDATE_FILE]).stdout.decode(errors='ignore').strip()
|
||||
with open(UPDATE_FILE_HASH, 'w') as f:
|
||||
f.write(update_hash)
|
||||
|
||||
build_update_manifest(update_hash)
|
||||
|
||||
|
||||
def install_rootfs_packages():
|
||||
rootfs_logger = get_logger('rootfs-packages', 'w')
|
||||
os.makedirs(os.path.join(CHROOT_BASEDIR, 'etc/dpkg/dpkg.cfg.d'), exist_ok=True)
|
||||
with open(os.path.join(CHROOT_BASEDIR, 'etc/dpkg/dpkg.cfg.d/force-unsafe-io'), 'w') as f:
|
||||
f.write('force-unsafe-io')
|
||||
|
||||
run_in_chroot('apt update', rootfs_logger)
|
||||
|
||||
manifest = get_manifest()
|
||||
for package in itertools.chain(
|
||||
manifest['base-packages'], map(lambda d: d['package'], manifest['additional-packages'])
|
||||
):
|
||||
run_in_chroot(f'apt install -V -y {package}', rootfs_logger, f'Failed apt install {package}')
|
||||
|
||||
# Do any custom rootfs setup
|
||||
custom_rootfs_setup(rootfs_logger)
|
||||
|
||||
# Do any pruning of rootfs
|
||||
clean_rootfs(rootfs_logger)
|
||||
|
||||
# Copy the default sources.list file
|
||||
shutil.copy(CONF_SOURCES, os.path.join(CHROOT_BASEDIR, 'etc/apt/sources.list'))
|
||||
|
||||
run_in_chroot('depmod', rootfs_logger, check=False)
|
||||
|
||||
|
||||
def custom_rootfs_setup(rootfs_logger):
|
||||
# Any kind of custom mangling of the built rootfs image can exist here
|
||||
|
||||
# If we are upgrading a FreeBSD installation on USB, there won't be no opportunity to run truenas-initrd.py
|
||||
# So we have to assume worse.
|
||||
# If rootfs image is used in a Linux installation, initrd will be re-generated with proper configuration,
|
||||
# so initrd we make now will only be used on the first boot after FreeBSD upgrade.
|
||||
with open(os.path.join(CHROOT_BASEDIR, 'etc/default/zfs'), 'a') as f:
|
||||
f.write('ZFS_INITRD_POST_MODPROBE_SLEEP=15')
|
||||
|
||||
run_in_chroot('update-initramfs -k all -u', logger=rootfs_logger)
|
||||
|
||||
# Generate native systemd unit files for SysV services that lack ones to prevent systemd-sysv-generator warnings
|
||||
tmp_systemd = os.path.join(CHROOT_BASEDIR, 'tmp/systemd')
|
||||
os.makedirs(tmp_systemd)
|
||||
run_in_chroot(
|
||||
f'/usr/lib/systemd/system-generators/systemd-sysv-generator /tmp/systemd /tmp/systemd /tmp/systemd',
|
||||
rootfs_logger
|
||||
)
|
||||
for unit_file in filter(lambda f: f.endswith('.service'), os.listdir(tmp_systemd)):
|
||||
with open(os.path.join(tmp_systemd, unit_file), 'a') as f:
|
||||
f.write(textwrap.dedent('''
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
'''))
|
||||
|
||||
for file_path in map(
|
||||
lambda f: os.path.join(tmp_systemd, 'multi-user.target.wants', f),
|
||||
filter(
|
||||
lambda f: os.path.isfile(f) and not os.path.islink(f) and f != 'rrdcached.service',
|
||||
os.listdir(os.path.join(tmp_systemd, 'multi-user.target.wants'))
|
||||
)
|
||||
):
|
||||
os.unlink(file_path)
|
||||
|
||||
run_in_chroot('rsync -av /tmp/systemd/ /usr/lib/systemd/system/')
|
||||
shutil.rmtree(tmp_systemd, ignore_errors=True)
|
||||
|
||||
|
||||
def clean_rootfs(rootfs_logger):
|
||||
to_remove = get_manifest()['base-prune']
|
||||
run_in_chroot(
|
||||
f'apt remove -y {" ".join(to_remove)}', rootfs_logger, f'Failed removing {", ".join(to_remove)!r} packages.'
|
||||
)
|
||||
|
||||
# Remove any temp build depends
|
||||
run_in_chroot('apt autoremove -y', rootfs_logger, f'Failed atp autoremove')
|
||||
|
||||
# We install the nvidia-kernel-dkms package which causes a modprobe file to be written
|
||||
# (i.e /etc/modprobe.d/nvidia.conf). This file tries to modprobe all the associated
|
||||
# nvidia drivers at boot whether or not your system has an nvidia card installed.
|
||||
# For all truenas certified and truenas enterprise hardware, we do not include nvidia GPUS.
|
||||
# So to prevent a bunch of systemd "Failed" messages to be barfed to the console during boot,
|
||||
# we remove this file because the linux kernel dynamically loads the modules based on whether
|
||||
# or not you have the actual hardware installed in the system.
|
||||
if os.path.exists(os.path.join(CHROOT_BASEDIR, 'etc/modprobe.d/nvidia.conf')):
|
||||
os.unlink(os.path.join(CHROOT_BASEDIR, 'etc/modprobe.d/nvidia.conf'))
|
||||
|
||||
for path in (
|
||||
os.path.join(CHROOT_BASEDIR, 'usr/share/doc'),
|
||||
os.path.join(CHROOT_BASEDIR, 'var/cache/apt'),
|
||||
os.path.join(CHROOT_BASEDIR, 'var/lib/apt/lists'),
|
||||
):
|
||||
shutil.rmtree(path, ignore_errors=True)
|
||||
os.makedirs(path, exist_ok=True)
|
||||
16
scale_build/image/utils.py
Normal file
16
scale_build/image/utils.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import os
|
||||
|
||||
from scale_build.exceptions import CallError
|
||||
from scale_build.utils.environment import APT_ENV
|
||||
from scale_build.utils.run import run
|
||||
from scale_build.utils.variables import CHROOT_BASEDIR
|
||||
|
||||
|
||||
PACKAGE_PATH = os.path.join(CHROOT_BASEDIR, 'packages')
|
||||
|
||||
|
||||
def run_in_chroot(command, logger=None, exception_message=None, **kwargs):
|
||||
return run(
|
||||
f'chroot {CHROOT_BASEDIR} /bin/bash -c "{command}"', shell=True, logger=logger,
|
||||
exception=CallError, exception_msg=exception_message, env={**APT_ENV, **os.environ}, **kwargs
|
||||
)
|
||||
@@ -6,6 +6,7 @@ from scale_build.checkout import checkout_sources
|
||||
from scale_build.epoch import check_epoch
|
||||
from scale_build.package import build_packages
|
||||
from scale_build.preflight import preflight_check
|
||||
from scale_build.update_image import build_update_image
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -26,6 +27,7 @@ def main():
|
||||
|
||||
subparsers.add_parser('checkout', help='Checkout TrueNAS Scale repositories')
|
||||
subparsers.add_parser('packages', help='Build TrueNAS Scale packages')
|
||||
subparsers.add_parser('update', help='Create TrueNAS Scale update image')
|
||||
|
||||
args = parser.parse_args()
|
||||
if args.action == 'checkout':
|
||||
@@ -34,5 +36,7 @@ def main():
|
||||
elif args.action == 'packages':
|
||||
check_epoch()
|
||||
build_packages()
|
||||
elif args.action == 'update':
|
||||
build_update_image()
|
||||
else:
|
||||
parser.print_help()
|
||||
|
||||
@@ -4,8 +4,8 @@ import os
|
||||
import shutil
|
||||
|
||||
from datetime import datetime
|
||||
from distutils.dir_util import copy_tree
|
||||
from scale_build.exceptions import CallError
|
||||
from scale_build.utils.environment import APT_ENV
|
||||
from scale_build.utils.run import run
|
||||
from scale_build.utils.variables import PKG_DIR
|
||||
|
||||
@@ -18,14 +18,9 @@ class BuildPackageMixin:
|
||||
f'chroot {self.dpkg_overlay} /bin/bash -c "{command}"', shell=True, logger=self.logger,
|
||||
exception=exception, exception_msg=exception_message, env={
|
||||
**os.environ,
|
||||
# When logging in as 'su root' the /sbin dirs get dropped out of PATH
|
||||
'PATH': f'{os.environ["PATH"]}:/sbin:/usr/sbin:/usr/local/sbin',
|
||||
'LC_ALL': 'C', # Makes some perl scripts happy during package builds
|
||||
'LANG': 'C',
|
||||
'DEB_BUILD_OPTIONS': f'parallel={os.cpu_count()}', # Passed along to WAF for parallel build,
|
||||
**APT_ENV,
|
||||
'CONFIG_DEBUG_INFO': 'N', # Build kernel with debug symbols
|
||||
'CONFIG_LOCALVERSION': '+truenas',
|
||||
'DEBIAN_FRONTEND': 'noninteractive', # Never go full interactive on any packages
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
34
scale_build/update_image.py
Normal file
34
scale_build/update_image.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from scale_build.bootstrap.configure import make_bootstrapdir
|
||||
from scale_build.image.bootstrap import setup_chroot_basedir, umount_chroot_basedir
|
||||
from scale_build.image.logger import get_logger
|
||||
from scale_build.image.manifest import UPDATE_FILE
|
||||
from scale_build.image.update import install_rootfs_packages, build_rootfs_image
|
||||
from scale_build.utils.variables import CHROOT_BASEDIR, LOG_DIR, RELEASE_DIR
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def build_update_image():
|
||||
os.makedirs(RELEASE_DIR, exist_ok=True)
|
||||
|
||||
umount_chroot_basedir()
|
||||
shutil.rmtree(CHROOT_BASEDIR, ignore_errors=True)
|
||||
os.makedirs(CHROOT_BASEDIR)
|
||||
logger.debug('Bootstrapping TrueNAS rootfs [UPDATE] (%s/rootfs-bootstrap.log)', LOG_DIR)
|
||||
make_bootstrapdir('update')
|
||||
|
||||
logger.debug('Installing TrueNAS rootfs package [UPDATE] (%s/rootfs-package.log)', LOG_DIR)
|
||||
setup_chroot_basedir('update', get_logger('rootfs-bootstrap'))
|
||||
install_rootfs_packages()
|
||||
umount_chroot_basedir()
|
||||
|
||||
logger.debug('Building TrueNAS rootfs image [UPDATE] (%s/rootfs-image.log)', LOG_DIR)
|
||||
build_rootfs_image()
|
||||
shutil.rmtree(CHROOT_BASEDIR, ignore_errors=True)
|
||||
|
||||
logger.debug('Success! Update image created at: %s', UPDATE_FILE)
|
||||
11
scale_build/utils/environment.py
Normal file
11
scale_build/utils/environment.py
Normal file
@@ -0,0 +1,11 @@
|
||||
import os
|
||||
|
||||
|
||||
APT_ENV = {
|
||||
# When logging in as 'su root' the /sbin dirs get dropped out of PATH
|
||||
'PATH': f'{os.environ["PATH"]}:/sbin:/usr/sbin:/usr/local/sbin',
|
||||
'LC_ALL': 'C', # Makes some perl scripts happy during package builds
|
||||
'LANG': 'C',
|
||||
'DEB_BUILD_OPTIONS': f'parallel={os.cpu_count()}', # Passed along to WAF for parallel build,
|
||||
'DEBIAN_FRONTEND': 'noninteractive', # Never go full interactive on any packages
|
||||
}
|
||||
13
scale_build/utils/logger.py
Normal file
13
scale_build/utils/logger.py
Normal file
@@ -0,0 +1,13 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from .variables import LOG_DIR
|
||||
|
||||
|
||||
def get_logger(logger_name, logger_path, mode='a+'):
|
||||
logger = logging.getLogger(logger_name)
|
||||
logger.propagate = False
|
||||
logger.setLevel('DEBUG')
|
||||
logger.handlers = []
|
||||
logger.addHandler(logging.FileHandler(os.path.join(LOG_DIR, logger_path), mode))
|
||||
return logger
|
||||
@@ -7,7 +7,6 @@ def run(*args, **kwargs):
|
||||
args = tuple(args[0])
|
||||
kwargs.setdefault('stdout', subprocess.PIPE)
|
||||
kwargs.setdefault('stderr', subprocess.PIPE)
|
||||
stdout = stderr = ''
|
||||
exception = kwargs.pop('exception', None)
|
||||
exception_message = kwargs.pop('exception_msg', None)
|
||||
check = kwargs.pop('check', True)
|
||||
@@ -27,7 +26,7 @@ def run(*args, **kwargs):
|
||||
cp = subprocess.CompletedProcess(args, proc.returncode, stdout=stdout, stderr=stderr)
|
||||
if check:
|
||||
if cp.returncode and exception and exception_message:
|
||||
raise exception(exception_message)
|
||||
raise exception(f'{exception_message} ({stderr.decode(errors="ignore")}')
|
||||
else:
|
||||
cp.check_returncode()
|
||||
return cp
|
||||
|
||||
@@ -6,6 +6,7 @@ TMPFS = './tmp/tmpfs'
|
||||
CACHE_DIR = './tmp/cache'
|
||||
CHROOT_BASEDIR = os.path.join(TMPFS, 'chroot')
|
||||
CHROOT_OVERLAY = os.path.join(TMPFS, 'chroot-overlay')
|
||||
CONF_SOURCES = os.path.join(BUILDER_DIR, 'conf/sources.list')
|
||||
DPKG_OVERLAY = './tmp/dpkg-overlay'
|
||||
GIT_MANIFEST_PATH = './logs/GITMANIFEST'
|
||||
GIT_LOG_PATH = './logs/git-checkout.log'
|
||||
@@ -16,9 +17,11 @@ PARALLEL_BUILDS = int(os.environ.get('PARALLEL_BUILDS') or 4)
|
||||
PKG_DEBUG = bool(os.environ.get('PKG_DEBUG'))
|
||||
PKG_DIR = './tmp/pkgdir'
|
||||
PKG_LOG_DIR = os.path.join(LOG_DIR, 'packages')
|
||||
RELEASE_DIR = './tmp/release'
|
||||
REQUIRED_RAM = 16 # GB
|
||||
SOURCES_DIR = './sources'
|
||||
TMP_DIR = './tmp'
|
||||
UPDATE_DIR = os.path.join(TMP_DIR, 'update')
|
||||
WORKDIR_OVERLAY = os.path.join(TMPFS, 'workdir-overlay')
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user