diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..1feb126 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,25 @@ +on: + pull_request: + types: [opened, reopened, synchronize] + paths: + - '**.sh' + - '.github/workflows/**' + - 'build-container/**' + - 'packages/**' +name: Build + +permissions: + contents: read + +jobs: + build: + name: Build + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4.2.1 + - name: Build builder container + run: docker build -t archzfs-builder build-container + - name: Run builder container + run: docker run --privileged --rm -v "$(pwd):/src" archzfs-builder diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..5f50105 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,52 @@ +on: + push: + branches: [master] + paths: + - '**.sh' + - '.github/workflows/**' + - 'build-container/**' + - 'packages/**' + schedule: + - cron: "4 2 * * *" + +name: Release + +concurrency: + group: release + cancel-in-progress: true + +permissions: + contents: write + +jobs: + release: + name: Release + runs-on: ubuntu-latest + environment: Release + + steps: + - name: Checkout code + uses: actions/checkout@v4.2.1 + - name: Build builder container + run: docker build -t archzfs-builder build-container + - name: Run builder container + env: + GPG_KEY_DATA: "${{ secrets.GPG_KEY_DATA }}" + GPG_KEY_ID: "${{ vars.GPG_KEY_ID }}" + run: docker run -e GPG_KEY_DATA -e GPG_KEY_ID --privileged --rm -v "$(pwd):/src" archzfs-builder + - name: Release mainline + uses: ncipollo/release-action@v1.14.0 + with: + name: experimental + tag: experimental + commit: ${{ github.sha }} + artifacts: ./repo/* + allowUpdates: true + artifactErrorsFailBuild: true + omitBody: true + omitBodyDuringUpdate: true + removeArtifacts: true + - uses: rickstaa/action-create-tag@v1.7.2 + with: + tag: experimental + force_push_tag: true diff --git a/.gitignore b/.gitignore index e21b4bb..522eaf4 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,8 @@ *.log archiso/out/ archiso/work/ +/repo +/repo-tmp # TEMPORARY packages/linux/*-headers/ diff --git a/build-container/Dockerfile b/build-container/Dockerfile new file mode 100644 index 0000000..1560b4c --- /dev/null +++ b/build-container/Dockerfile @@ -0,0 +1,40 @@ +FROM archlinux:base-devel + +# The following is from https://github.com/archzfs/archzfs-ci/blob/master/worker/Dockerfile +RUN pacman -Syu --noconfirm --needed python-pipx python-twisted python-future git wget systemd-sysvcompat openresolv vi + +# add buildbot user and give passwordless sudo access (needed for archzfs build scripts) +RUN groupadd -r buildbot && \ + useradd -m -g buildbot buildbot && \ + mkdir /worker && \ + chown buildbot:buildbot /worker && \ + echo "buildbot ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +# aur prep +RUN useradd aur && \ + echo "aur ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers + +# clean-chroot-manager +USER aur +RUN mkdir -p /tmp/ccm-install && \ + cd /tmp/ccm-install && \ + wget https://aur.archlinux.org/cgit/aur.git/snapshot/clean-chroot-manager.tar.gz && \ + tar -xvf clean-chroot-manager.tar.gz && \ + cd clean-chroot-manager && \ + makepkg -si --noconfirm && \ + cd /tmp && \ + rm -rfv /tmp/ccm-install + +USER root +COPY ccm.conf /home/buildbot/.config/clean-chroot-manager.conf +RUN ccm64 p && \ + chown -R buildbot:buildbot /home/buildbot && \ + mkdir -p /scratch/.buildroot + +COPY entrypoint.sh /entrypoint.sh +COPY systemd-nspawn-wrapper /usr/local/bin/systemd-nspawn +RUN systemd-machine-id-setup + +USER buildbot +VOLUME /src +ENTRYPOINT [ "/entrypoint.sh" ] diff --git a/build-container/ccm.conf b/build-container/ccm.conf new file mode 100644 index 0000000..f806c35 --- /dev/null +++ b/build-container/ccm.conf @@ -0,0 +1,71 @@ +# Fully qualified path for build root. +# This should not use a variable like $HOME. If your machine has lots +# of memory, consider locating this to tmpfs to avoid usage to the disk and +# to minimize access times but know that unless you copy the contents to +# physical media, it will not survive a reboot. See the manpage for tips. +CHROOTPATH64="/scratch/.buildroot" + +# Number of threads makepkg in the clean chroot will use when building. +# The typical rule is physical cores + 1. +THREADS=9 + +# Optionally uncomment and define a custom pacman.conf and/or a custom +# makepkg.conf for the buildroot using a fully qualified path below. +# Leaving these two undefined to use the system files. + +#CUSTOM_PACMAN_CONF='/usr/share/devtools/pacman.conf.d/extra-testing.conf' +#CUSTOM_MAKEPKG_CONF='/usr/share/devtools/makepkg.conf.d/x86_64.conf' + +# Optionally uncomment and define a custom location and name for the local chroot +# package repo. +#REPO="/src/repo" +REPO_NAME="chroot_local" + +# Optionally uncomment to pass the --nocheck flag to the build which will skip +# the check function in the PKGBUILD is it is present. +#NOCHECK=1 + +# Optionally define the format of compression for compiled packages. Leave this +# undefined to use the Arch default. +PKGEXT= + +# If set, the value defined will be used in the buildroot's packages. +PACKAGER="ArchZFS Project (https://github.com/archzfs)" + +# Set this variable to anything if you want to run namcap on the built package. +RUNNAMCAP= + +# Set this to anything if you want makepkg to build using distcc for faster +# compilation. You must have distcc nodes properly configured on volunteers +# you define below. It does NOT need to be running on the native environment. +# +# Alternatively, you can invoke ccm with the 'cd' flag to create the chroot +# with distcc enabled rather than editing this value. +# +# For more on distcc, see: https://wiki.archlinux.org/index.php/Distcc +RUNDISTCC= + +# This is only needed for users planning to build with distcc. Take care to +# include the localhost plus all volunteers you define below. As a rule of thumb, +# set to about twice the total number of available server CPUs. See the distcc +# man page for more info. +DISTCC_THREADS= + +# Define all machines in the distcc cluster below using the distcc syntax of: +# "hostname/threads" and remember to list localhost/n first, followed by your +# volunteer nodes listed in decreasing order of CPU power. Additional supported +# options are passed through, see the manpage for distcc. +# +# In my experience, one sees best results using twice the number of physical +# cores on the volunteer machines. In the example below foo is a quad and bar +# is a dual. +#DISTCC_HOSTS="localhost/9 foo/8 bar/4" + +# To build with ccache in the buildroot, uncomment and define the directory where +# ccache will store its data below. If you're using a custom makepkg.conf +# (see below), you MUST enable ccache that file in addition to defining the path +# below. For more info about ccache, see: https://wiki.archlinux.org/index.php/ccache +# +#CCACHE_DIR="/scratch/.ccache" + +CUSTOM_PACMAN_CONF="/usr/share/devtools/pacman.conf.d/extra.conf" diff --git a/build-container/entrypoint.sh b/build-container/entrypoint.sh new file mode 100755 index 0000000..1766e00 --- /dev/null +++ b/build-container/entrypoint.sh @@ -0,0 +1,53 @@ +#!/bin/bash +set -e + +if [ ! -z "${GPG_KEY_DATA-}" ]; then + if [ -z "${GPG_KEY_ID-}" ]; then + echo 'GPG_KEY_ID is not set, but GPG_KEY_DATA is set. Please set GPG_KEY_ID to the key ID of the key.' + exit 1 + fi + gpg --import /dev/stdin <<<"${GPG_KEY_DATA}" +fi + +# Only set -x here so we can't accidently print the GPG key up there +set -x + +sudo chown -R buildbot:buildbot /src +cd /src + +sed -i "/^THREADS=/s/9/$(nproc)/" ~/.config/clean-chroot-manager.conf +sudo ccm64 d || true + +sudo bash build.sh -d -u all update + +build() { + sudo bash build.sh -d "$1" make +} + +build utils + +build std +build lts +build hardened +build zen +build dkms + +# Not implemented, yet, as documented in archzfs-ci +# sudo bash test.sh ... + +rm -rf /src/repo +mkdir -p /src/repo +cp -v /scratch/.buildroot/root/repo/*.pkg.tar* /src/repo/ + +cd /src/repo +# Ensure we do not have any stray signatures around +rm -fv *.sig + +if [ ! -z "${GPG_KEY_ID-}" ]; then + # We use find here as that allows us to exclude .sig files, which do not need to be passed to repo-add or signed again + find . -type f -iname '*.pkg.tar*' -not -iname '*.sig' -print -exec gpg --batch --yes --detach-sign --use-agent -u "${GPG_KEY_ID}" {} \; + find . -type f -iname '*.pkg.tar*' -not -iname '*.sig' -print0 | xargs -0 repo-add -k "${GPG_KEY_ID}" -s -v archzfs.db.tar.xz +else + repo-add archzfs.db.tar.xz *.pkg.tar* +fi +cd /src diff --git a/build-container/systemd-nspawn-wrapper b/build-container/systemd-nspawn-wrapper new file mode 100755 index 0000000..50549e3 --- /dev/null +++ b/build-container/systemd-nspawn-wrapper @@ -0,0 +1,3 @@ +#!/bin/sh + +exec /usr/bin/systemd-nspawn --keep-unit "$@" diff --git a/conf.sh b/conf.sh index 392e649..f8b99f2 100644 --- a/conf.sh +++ b/conf.sh @@ -30,4 +30,4 @@ gpg_sign_key='0EE7A126' chroot_path="/repo/chroot/x86_64/$(whoami)" # Used to run mkaurball and mksrcinfo with lower privledges -makepkg_nonpriv_user="jan" +makepkg_nonpriv_user="buildbot" diff --git a/mirror.sh b/mirror.sh new file mode 100755 index 0000000..812e633 --- /dev/null +++ b/mirror.sh @@ -0,0 +1,43 @@ +#!/bin/bash +set -euo pipefail + +# Dependencies: jq, curl, bash, rsync +# Config settings +OUTDIR='./repo' +TMPDIR='./repo-tmp' +# End of config settings + +rm -rf "${TMPDIR}" +mkdir -p "${OUTDIR}" "${TMPDIR}" + +# TODO: Update to use the upstream repository +RELEASE_INFO="$(curl -s https://api.github.com/repos/archzfs/archzfs/releases/experimental)" + +readarray -t FILE_INFO < <(echo "${RELEASE_INFO}" | jq '.assets | map(.browser_download_url + "|" + .updated_at) | join("\n")' -r) + +for info in "${FILE_INFO[@]}"; do + url="${info%|*}" + tmp_filename="${TMPDIR}/$(basename "${url}")" + filename="${OUTDIR}/$(basename "${url}")" + + updated_at_str="${info#*|}" + updated_at="$(date '+%s' --date "${updated_at_str}")" + + # Calculate old file modified time + current_at='0' + if [ -f "${filename}" ]; then + current_at="$(date '+%s' -r "${filename}")" + fi + + # Either download or copy pre-existing file + if [ "${updated_at}" -ne "${current_at}" ]; then + echo "Downloading ${filename}" + curl -L -o "${tmp_filename}" "${url}" + touch -h -d "${updated_at_str}" "${tmp_filename}" + else + echo "Skipping ${filename}" + cp -p "${filename}" "${tmp_filename}" + fi +done + +rsync --delete -av "${TMPDIR}/" "${OUTDIR}/"