Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cmdlib.sh: drop runvm_with_cache_snapshot() #3970

Merged
merged 4 commits into from
Nov 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .cci.jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ pod(image: imageName + ":latest", kvm: true, cpu: "${cpuCount}", memory: "${memo
kola(cosaDir: "/srv", addExtTests: ["${env.WORKSPACE}/ci/run-kola-self-tests"])

stage("Build Metal") {
cosaParallelCmds(cosaDir: "/srv", commands: ["metal", "metal4k"])
utils.cosaCmd(cosaDir: "/srv", args: "osbuild metal metal4k")
}

stage("Build Live Images") {
Expand Down
13 changes: 2 additions & 11 deletions src/cmd-osbuild
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,6 @@ main() {
done

tobuild=()
building_qemu=0
for platform in "${platforms[@]}"; do
# check if the image already exists in the meta.json
meta_img=$(meta_key "images.${platform}.path")
Expand All @@ -357,7 +356,6 @@ main() {
else
tobuild+=("${platform}")
echo "Will build $platform"
[ "${platform}" == 'qemu' ] && building_qemu=1
fi
done
if [ ${#tobuild[@]} == "0" ]; then
Expand All @@ -366,17 +364,10 @@ main() {
fi
platforms=("${tobuild[@]}")

# Run OSBuild now to build the platforms that were requested. In the jenkins
# pipelines we build the qemu image first and that operation will do a lot of
# the same work required for later artifacts (metal, metal4k, etc) so we want
# the cached output from that run to persist. For now, later artifacts get built
# in parallel, so we need to be able to access the cache by multiple processes,
# so for those we'll set `snapshot=on` so that each will get their own disk image.
# This is OK because we don't checkpoint (cache) any of those stages.
[ "${building_qemu}" == '1' ] && snapshot="off" || snapshot="on"
# Run OSBuild now to build the platforms that were requested.
runvm_osbuild_config_json="$(generate_runvm_osbuild_config)"
outdir=$(mktemp -p "${tmp_builddir}" -d)
runvm_with_cache_snapshot "$snapshot" -- /usr/lib/coreos-assembler/runvm-osbuild \
runvm_with_cache -- /usr/lib/coreos-assembler/runvm-osbuild \
--config "${runvm_osbuild_config_json}" \
--mpp "/usr/lib/coreos-assembler/osbuild-manifests/coreos.osbuild.${basearch}.mpp.yaml" \
--outdir "${outdir}" \
Expand Down
7 changes: 1 addition & 6 deletions src/cmd-supermin-run
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ dn=$(dirname "$0")
. "${dn}"/cmdlib.sh

print_help() {
echo "Usage: cosa supermin-run [--cache|--snapshot] COMMAND [ARG...]" >&2
echo "Usage: cosa supermin-run [--cache] COMMAND [ARG...]" >&2
}

if [ $# = 0 ]; then
Expand All @@ -21,11 +21,6 @@ case "$1" in
set -- runvm_with_cache -- "$@"
need_cache_dir=1
;;
--snapshot)
shift
set -- runvm_with_cache_snapshot on -- "$@"
need_cache_dir=1
;;
--*)
echo "unrecognized option: $1"
print_help
Expand Down
41 changes: 3 additions & 38 deletions src/cmdlib.sh
Original file line number Diff line number Diff line change
Expand Up @@ -574,38 +574,8 @@ runcompose_tree() {
fi
}

runcompose_extensions() {
local outputdir=$1; shift
local workdir=${workdir:-$(pwd)}
local repo=${tmprepo:-${workdir}/tmp/repo}

rm -f "${changed_stamp}"
# shellcheck disable=SC2086
set - ${COSA_RPMOSTREE_GDB:-} rpm-ostree compose extensions --repo="${repo}" \
--touch-if-changed "${changed_stamp}" --cachedir="${workdir}"/cache \
${COSA_RPMOSTREE_ARGS:-} "$@" --output-dir "$outputdir"

echo "Running: $*"

# this is the heart of the privs vs no privs dual path
if has_privileges; then
# we hardcode a umask of 0022 here to make sure that composes are run
# with a consistent value, regardless of the environment
(umask 0022 && sudo -E "$@")
sudo chown -R -h "${USER}":"${USER}" "${outputdir}"
else
# Use a snapshot version of the cache qcow2 to allow multiple users
# of the cache at the same time. This is needed because the extensions
# and other artifacts are built in parallel.
local snapshot='on'
runvm_with_cache_snapshot "${snapshot}" -- "$@"
fi
}

# Run with cache disk with optional snapshot=on, which means no changes get written back to
# the cache disk. `runvm_with_cache_snapshot on` will set snapshotting to on.
runvm_with_cache_snapshot() {
local snapshot=$1; shift
# Run with cache disk.
runvm_with_cache() {
local cache_size=${RUNVM_CACHE_SIZE:-30G}
# "cache2" has an explicit label so we can find it in qemu easily
if [ ! -f "${workdir}"/cache/cache2.qcow2 ]; then
Expand All @@ -618,16 +588,11 @@ runvm_with_cache_snapshot() {
fi
# And remove the old one
rm -vf "${workdir}"/cache/cache.qcow2
cache_args+=("-drive" "if=none,id=cache,discard=unmap,snapshot=${snapshot},file=${workdir}/cache/cache2.qcow2" \
cache_args+=("-drive" "if=none,id=cache,discard=unmap,file=${workdir}/cache/cache2.qcow2" \
"-device" "virtio-blk,drive=cache")
runvm "${cache_args[@]}" "$@"
}

runvm_with_cache() {
local snapshot='off'
runvm_with_cache_snapshot $snapshot "$@"
}

# Strips out the digest field from lockfiles since they subtly conflict with
# various workflows.
strip_out_lockfile_digests() {
Expand Down
Loading