initscript refactor and test

Init scripts have been refactored to improve readability. The most
important aspect of this refactor is that automated testing has been
implemented using Qemu. Eventually we will get to the point of
automatically building new archzfs packages as soon as a new kernel is
released to the arch testing repo, after that the tests will be used to
determine if the archzfs packages can be pushed out to aur and
archzfs.com.

Closes #22
Closes #31
Closes #38
Closes #48
extramodules
Jesus Alvarez 9 years ago
parent a4458a3e50
commit 2393d1bbec
  1. 1
      .gitignore
  2. 14
      TODO.rst
  3. 15
      build.sh
  4. 6
      conf.sh
  5. 83
      lib.sh
  6. 1
      repo.sh
  7. 56
      src/zfs-utils/zfs-utils.initcpio.hook
  8. 2
      src/zfs-utils/zfs-utils.initcpio.install
  9. 4
      testing/.gitignore
  10. 162
      testing/README.rst
  11. 6
      testing/files/poweroff.timer
  12. 244
      testing/test.sh
  13. 25
      testing/tests/archzfs-qemu-base/base.sh
  14. 32
      testing/tests/archzfs-qemu-base/packer.json
  15. 67
      testing/tests/archzfs-qemu-base/setup.sh
  16. 19
      testing/tests/archzfs-qemu-iso-test-00-default/pacman.sh
  17. 11
      testing/tests/archzfs-qemu-lts-test-00-zol-tests/runner.sh
  18. 52
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/README.rst
  19. 16
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso.sh
  20. 17
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/README.rst
  21. 0
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/etc/fstab
  22. 1
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/etc/hostname
  23. 1
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/etc/locale.conf
  24. 0
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/etc/machine-id
  25. 26
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/etc/systemd/scripts/choose-mirror
  26. 10
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/etc/systemd/system/choose-mirror.service
  27. 8
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/etc/systemd/system/etc-pacman.d-gnupg.mount
  28. 3
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/etc/systemd/system/getty@tty1.service.d/autologin.conf
  29. 15
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/etc/systemd/system/pacman-init.service
  30. 1
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/etc/udev/rules.d/81-dhcpcd.rules
  31. 34
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/root/.automated_script.sh
  32. 1
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/root/.zlogin
  33. 23
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/root/customize_airootfs.sh
  34. 3
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/airootfs/root/install.txt
  35. 277
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/build.sh
  36. 5
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/efiboot/loader/entries/archiso-x86_64.conf
  37. 2
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/efiboot/loader/loader.conf
  38. 6
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/isolinux/isolinux.cfg
  39. 2
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/mkinitcpio.conf
  40. 66
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/packages
  41. 92
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/pacman.conf
  42. BIN
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/syslinux/splash.png
  43. 11
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/archiso/syslinux/syslinux.cfg
  44. 5
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/boot.sh
  45. 36
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/chroot.sh
  46. 14
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/conf.sh
  47. 84
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/fs.sh
  48. 12
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/hooks.sh
  49. 41
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/pacman.sh
  50. 8
      testing/tests/archzfs-qemu-lts-test-01-root-bootfs/syslinux.cfg

1
.gitignore vendored

@ -1,4 +1,3 @@
*!
*/*.tar.*
*.log
testing/

@ -0,0 +1,14 @@
============
Things To Do
============
* Sun May 29 09:37 2016: auto increment numbers when building test packages
New version for kernel change
Pkgrel increase in case of build by user
* Sun May 29 03:32 2016: Add flag to disable signing in build.sh for testing purposes
Signing should be done in repo.sh when adding the packages to the archzfs repo

@ -226,7 +226,7 @@ generate_package_files() {
build_packages() {
for pkg in "${pkg_list[@]}"; do
msg "Building ${pkg}..."
run_cmd "cd \"${script_dir}/packages/${kernel_name}/${pkg}\" && sudo ~/bin/ccm64 s && mksrcinfo"
run_cmd "cd \"${script_dir}/packages/${kernel_name}/${pkg}\" && ~/bin/ccm64 s && mksrcinfo"
if [[ ${run_cmd_return} -ne 0 ]]; then
error "A problem occurred building the package"
exit 1
@ -241,6 +241,12 @@ build_packages() {
generate_mode_list
if [[ ${EUID} -ne 0 ]]; then
error "This script must be run as root."
exit 1;
fi
if [[ $# -lt 1 ]]; then
usage
fi
@ -306,17 +312,17 @@ if have_command "update_sums"; then
run_cmd_show_and_capture_output "sha256sum ${script_dir}/src/zfs-utils/zfs-utils.initcpio.hook"
azsha2=$(echo ${run_cmd_output} | awk '{ print $1 }')
run_cmd "sed -e 's/^zfs_initcpio_hook_hash.*/zfs_initcpio_hook_hash=\"${azsha1}\"/g' -i ${script_dir}/conf.sh"
run_cmd "sed -e 's/^zfs_initcpio_hook_hash.*/zfs_initcpio_hook_hash=\"${azsha2}\"/g' -i ${script_dir}/conf.sh"
run_cmd_show_and_capture_output "sha256sum ${script_dir}/src/zfs-utils/zfs-utils.initcpio.install"
azsha3=$(echo ${run_cmd_output} | awk '{ print $1 }')
run_cmd "sed -e 's/^zfs_initcpio_install_hash.*/zfs_initcpio_install_hash=\"${azsha1}\"/g' -i ${script_dir}/conf.sh"
run_cmd "sed -e 's/^zfs_initcpio_install_hash.*/zfs_initcpio_install_hash=\"${azsha3}\"/g' -i ${script_dir}/conf.sh"
fi
if have_command "update_chroot"; then
msg "Updating the x86_64 clean chroot..."
run_cmd "sudo ~/bin/ccm64 u"
run_cmd "~/bin/ccm64 u"
fi
@ -337,7 +343,6 @@ for func in "${update_funcs[@]}"; do
fi
if have_command "make"; then
build_packages
sign_packages
build_sources
fi
if have_command "sources"; then

@ -5,16 +5,16 @@ zol_version="0.6.5.7"
zfs_src_hash="4a9e271bb9a6af8d564e4d5800e4fff36224f1697b923a7253659bdda80dc590"
spl_src_hash="dc8690e407183eeb7a6af0e7692d6e0a1cd323d51dd1aa492522c421b1924ea0"
zfs_bash_completion_hash="b60214f70ffffb62ffe489cbfabd2e069d14ed2a391fac0e36f914238394b540"
zfs_initcpio_install_hash="8190b69853d9670c6aaf1d14c674598a14c58f8ec359e249a1c3010c0b39d074"
zfs_initcpio_hook_hash="67a96169d36853d8f18ee5a2443ecfcd2461a20f9109f4b281bee3945d83518a"
zfs_initcpio_install_hash="dd8901295349da729e23ec5d1da57d49d31f3d4ea2f9ab20398469e9e371c504"
zfs_initcpio_hook_hash="5f749dbe3b853c5b569d5050b50226b53961cf1fa2cfc5cea0ecc3df75885d2f"
# Notification address
email="jeezusjr@gmail.com"
# Repository path and name
repo_name="archzfs"
repo_name_test="archzfs-testing"
repo_basepath="/data/pacman/repo"
repo_name_test="archzfs-testing"
# SSH login address (can use ssh config Hosts)
remote_login="webfaction"

@ -6,8 +6,10 @@ shopt -s nullglob
dry_run=0
debug_flag=0
mode=""
test_mode=""
kernel_name="" # set by generate_mode_list
mode_list=() # set by generate_mode_list
test_commands_list=() # set by generate_test_commands_list
update_funcs=() # set by generate_mode_list
commands=()
@ -397,6 +399,47 @@ check_mode() {
exit 155
}
check_test_mode() {
# $1 the mode to check for
debug "check_test_mode: checking for mode in '$1'"
for m in "${mode_list[@]}"; do
debug "check_test_mode: on '${m}'"
local moden=$(echo ${m} | cut -f2 -d:)
# debug "moden: ${moden}"
if [[ "${moden}" == "$1" ]]; then
if [[ ${mode} != "" ]]; then
error "Already have mode '${moden}', only one mode can be used at a time!"
usage
exit 155
fi
mode="$1"
kernel_name=$(echo ${m} | cut -f1 -d:)
return
fi
done
debug "check_test_mode: checking for test mode in '$1'"
for m in "${test_commands_list[@]}"; do
debug "check_test_mode: on '${m}'"
local moden=$(echo ${m})
if [[ ${moden} =~ $1 ]]; then
debug "Found match! moden: ${moden} \$1: $1"
if [[ ${test_mode} != "" ]]; then
error "Already have test mode '${moden}', only one test mode can be used at a time!"
usage
exit 155
fi
test_mode="${moden}"
return
fi
done
error "Unrecognized argument '$1'"
usage
exit 155
}
have_command() {
# $1: The command to check for
# returns 0 if true, and 1 for false
@ -413,6 +456,22 @@ have_command() {
}
have_test_command() {
# $1: The command to check for
# returns 0 if true, and 1 for false
debug "have_test_command: checking '$1'"
for cmd in "${test_commands_list[@]}"; do
# debug "have_test_command: loop '$cmd'"
if [[ ${cmd} == $1 ]]; then
debug "have_test_command: '$1' is defined"
return 0
fi
done
debug "have_test_command: '$1' is not defined"
return 1
}
check_debug() {
# args must be defined in the source script that loads lib.sh!
# Returns 0 if debug argument is defined and 1 if not
@ -426,14 +485,32 @@ check_debug() {
generate_mode_list() {
for m in $(ls ${script_dir}/src/kernels); do
mn=$(source ${script_dir}/src/kernels/${m}; echo ${mode_name})
md=$(source ${script_dir}/src/kernels/${m}; echo ${mode_desc})
# $1: The path where the kernel things can be found, must have trailing slash
path="$1"
if [[ ${path} == "" ]]; then
path="${script_dir}/src/kernels"
fi
for m in $(ls ${path}); do
mn=$(source ${path}/${m}; echo ${mode_name})
md=$(source ${path}/${m}; echo ${mode_desc})
mode_list+=("${m%.*}:${mn}:${md}")
done
}
generate_test_commands_list() {
# $1: The path where the kernel things can be found, must have trailing slash
path="$1"
if [[ ${path} == "" ]]; then
path="${script_dir}"
fi
debug "generate_test_commands_list: path == ${path}"
for m in $(find ${path} -type d -iname "*archzfs-qemu-*-test-*"); do
test_commands_list+=("${m}")
done
}
get_kernel_update_funcs() {
for kernel in $(ls ${script_dir}/src/kernels); do
if [[ ${kernel%.*} != ${kernel_name} ]]; then

@ -65,7 +65,6 @@ for (( a = 0; a < $#; a++ )); do
if [[ ${args[$a]} == "azfs" ]]; then
repo_name="archzfs"
elif [[ ${args[$a]} == "test" ]]; then
# TODO: NOT IMPLMENTED YET
repo_name="archzfs-testing"
elif [[ ${args[$a]} == "-n" ]]; then
dry_run=1

@ -20,13 +20,12 @@ zfs_get_bootfs () {
}
zfs_mount_handler () {
if [ "$ZFS_DATASET" = "bootfs" ] ; then
if [ "${ZFS_DATASET}" = "bootfs" ] ; then
if ! zfs_get_bootfs ; then
# Lets import everything and try again
zpool import $ZPOOL_IMPORT_FLAGS -N -a $ZPOOL_FORCE
zpool import ${ZPOOL_IMPORT_FLAGS} -N -a ${ZPOOL_FORCE}
if ! zfs_get_bootfs ; then
echo "ZFS: Cannot find bootfs."
return 1
die "ZFS: Cannot find bootfs."
fi
fi
fi
@ -34,42 +33,41 @@ zfs_mount_handler () {
local pool="${ZFS_DATASET%%/*}"
local rwopt_exp=${rwopt:-ro}
if ! "zpool" list -H $pool 2>&1 > /dev/null ; then
if [ "$rwopt_exp" != "rw" ]; then
msg "ZFS: Importing pool $pool readonly."
ZPOOL_IMPORT_FLAGS="$ZPOOL_IMPORT_FLAGS -o readonly=on"
if ! zpool list -H ${pool} 2>1 > /dev/null ; then
if [ "${rwopt_exp}" != "rw" ]; then
msg "ZFS: Importing pool ${pool} readonly."
ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -o readonly=on"
else
msg "ZFS: Importing pool $pool."
msg "ZFS: Importing pool ${pool}."
fi
if ! "zpool" import $ZPOOL_IMPORT_FLAGS -N $pool $ZPOOL_FORCE ; then
echo "ZFS: Unable to import pool $pool."
return 1
if ! zpool import ${ZPOOL_IMPORT_FLAGS} -N ${pool} ${ZPOOL_FORCE} ; then
die "ZFS: Unable to import pool ${pool}."
fi
fi
local node=$1
local tab_file="$node/etc/fstab"
local zfs_datasets=$(zfs list -H -o name -t filesystem -r "$ZFS_DATASET")
local zfs_datasets=$(zfs list -H -o name -t filesystem -r "${ZFS_DATASET}")
# Mount the root, and any child datasets
for dataset in $zfs_datasets; do
mountpoint=$(zfs get -H -o value mountpoint "$dataset")
case $mountpoint in
for dataset in ${zfs_datasets}; do
mountpoint=$(zfs get -H -o value mountpoint "${dataset}")
case ${mountpoint} in
"none")
# skip this line/dataset.
;;
"legacy")
if [ -f "$tab_file" ]; then
if findmnt -snero source -F "$tab_file" -S "$dataset" > /dev/null 2>&1; then
opt=$(findmnt -snero options -F "$tab_file" -S "$dataset")
mnt=$(findmnt -snero target -F "$tab_file" -S "$dataset")
mount -t zfs -o "$opt" "$dataset" "$node$mnt"
if [ -f "${tab_file}" ]; then
if findmnt -snero source -F "${tab_file}" -S "${dataset}" > /dev/null 2>&1; then
opt=$(findmnt -snero options -F "${tab_file}" -S "${dataset}")
mnt=$(findmnt -snero target -F "${tab_file}" -S "${dataset}")
mount -t zfs -o "${opt}" "${dataset}" "${node}${mnt}"
fi
fi
;;
*)
mount -t zfs -o "zfsutil,$rwopt_exp" "$dataset" "$node$mountpoint"
mount -t zfs -o "zfsutil,${rwopt_exp}" "${dataset}" "${node}${mountpoint}"
;;
esac
done
@ -77,19 +75,19 @@ zfs_mount_handler () {
run_hook() {
# Force import the pools, useful if the pool has not properly been exported using 'zpool export <pool>'
[[ $zfs_force == 1 ]] && ZPOOL_FORCE='-f'
[[ ${zfs_force} == 1 ]] && ZPOOL_FORCE='-f'
# Add import directory to import command flags
[[ "$zfs_import_dir" != "" ]] && ZPOOL_IMPORT_FLAGS="$ZPOOL_IMPORT_FLAGS -d $zfs_import_dir"
[[ "${zfs_import_dir}" != "" ]] && ZPOOL_IMPORT_FLAGS="${ZPOOL_IMPORT_FLAGS} -d ${zfs_import_dir}"
# Wait 15 seconds for ZFS devices to show up
[[ "${zfs_wait}" == "" ]] && ZFS_WAIT="15" || ZFS_WAIT="${zfs_wait}"
if [ "$root" = 'zfs' ]; then
if [[ "${root}" == 'zfs' ]]; then
mount_handler='zfs_mount_handler'
fi
case $zfs in
case ${zfs} in
"")
# skip this line/dataset
;;
@ -98,20 +96,20 @@ run_hook() {
mount_handler="zfs_mount_handler"
;;
*)
ZFS_DATASET=$zfs
ZFS_DATASET=${zfs}
mount_handler="zfs_mount_handler"
;;
esac
# Allow up to n seconds for zfs device to show up
for i in {1..${ZFS_WAIT}}; do
[ -c "/dev/zfs" ] && break
[[ -c "/dev/zfs" ]] && break
sleep 1
done
}
run_latehook () {
zpool import -N -a $ZPOOL_FORCE
zpool import -N -a ${ZPOOL_FORCE}
}
# vim:set ts=4 sw=4 ft=sh et:

@ -43,7 +43,7 @@ build() {
add_runscript
# allow mount(8) to "autodetect" ZFS
echo 'zfs' >>"$BUILDROOT/etc/filesystems"
echo 'zfs' >>"${BUILDROOT}/etc/filesystems"
[[ -f /etc/zfs/zpool.cache ]] && add_file "/etc/zfs/zpool.cache"
[[ -f /etc/modprobe.d/zfs.conf ]] && add_file "/etc/modprobe.d/zfs.conf"

@ -0,0 +1,4 @@
*.qcow2
packer_work/
out/
work/

@ -0,0 +1,162 @@
=====================
archzfs testing guide
=====================
:Modified: Sun Sep 04 10:04 2016
--------
Overview
--------
* Hosted at archzfs.com
archzfs.com for the project webpage (webfaction)
archzfs.com/repo for the repo (webfaction)
build.archzfs.com for jenkins
deploy.archzfs.com custom webpage for deploying valid builds (local server)
* Bulder hardware Intel Xeon v3 with 16GB of ECC RAM @ home in DMZ
* Build a qemu base image using packer
* Provision a test environment with script, perform regression tests
Regression test suite (http://zfsonlinux.org/zfs-regression-suite.html)
Test booting into a zfs root filesystem
* MAYBE: deploy.archzfs.com for pushing packages to AUR and the archzfs package repo
2fa login
Shows complete list of changes from zfsonlinux git
Shows all log output from builders and tests
One button deploy
------------
Requirements
------------
To run the test automation, the following items are required:
* Reflector
For selecting a fast mirror.
* nfs (pacman package cache)
Be a good netizen and only download binaries once.
* packer
Used to build the base image from the latest archiso. Install from AUR.
* sshpass
To allow automated ssh logins.
* ksh
From AUR, needed for zfs-test
* python2.6
From AUR, needed for zfs-test
----------------------
Build and test process
----------------------
Stage 1
+++++++
1. Build the packages using the normal build process, but without signing.
Build on local machine and copy the packages to the test environment.
ccm64 command will need to be run without root priviledges.
#. Use packer to create a test instance with the zfs packages installed
#. Perform regression tests
Stage 2
+++++++
1. Use packer to build zfs root filesystem test instances
packer configurations for:
a. zfs single root filesystem
#. zfs with storage pool as root filesystem
#. zfs root with legacy mounts
---------------------------------------
Packer/KVM build/test environment setup
---------------------------------------
The goal of this article is to setup a qemu based testing environment for the
archzfs project.
This guide provides details on setting up VM's for multiple zfs usage
scenarios.
-------------
Helpful links
-------------
* http://blog.falconindy.com/articles/build-a-virtual-army.html
--------
Packages
--------
1. qemu
----------
Qemu Setup
----------
1. Check kvm compatibility
.. code:: bash
$ lscpu | grep Virtualization
#. Load kernel modules
.. code:: bash
# modprobe -a kvm tun virtio
#. Install qemu
.. code:: bash
# pacman -Sy qemu
nfs
+++
::
/var/cache/pacman/pkg 127.0.0.1(rw,async,no_root_squash,no_subtree_check,insecure)
qemu sends packets from 127.0.0.1:44730 to 127.0.0.1:2049 for mounting.
The insecure option allows packets from ports > 1024
-----
Notes
-----
- Sun Apr 19 19:45 2015: Found more tests at https://github.com/behlendorf/xfstests
Requires additional pools
- Sun Apr 19 19:51 2015: ztest slides http://blog.delphix.com/csiden/files/2012/01/ZFS_Backward_Compatability_Testing.pdf
- Sun Apr 19 20:05 2015: What I am trying to do is described here: https://github.com/zfsonlinux/zfs/issues/1534

@ -0,0 +1,6 @@
[Unit]
Description=Delayed poweroff
[Timer]
OnActiveSec=1
Unit=poweroff.target

@ -0,0 +1,244 @@
#!/bin/bash
args=("$@")
script_name=$(basename $0)
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if ! source ${script_dir}/../lib.sh; then
echo "!! ERROR !! -- Could not load lib.sh!"
exit 155
fi
source_safe "${script_dir}/../conf.sh"
ssh_cmd="/usr/sbin/ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=3 -p 2222"
ssh_pass="sshpass -p azfstest"
ssh="${ssh_pass} ${ssh_cmd}"
test_pkg_workdir="archzfs"
export packer_work_dir="${script_dir}/files/packer_work"
export base_image_output_dir="${script_dir}/files"
init_archiso_vars() {
if [[ ! -d ${test_mode}/archiso ]]; then
export archiso_baseurl="http://mirrors.kernel.org/archlinux/iso/latest"
debug "archiso_baseurl=${archiso_baseurl}"
export archiso_iso_name=$(curl -s ${archiso_baseurl}/ | grep -o "\".*dual.iso\"" | tr -d '"')
export archiso_sha=$(curl -s ${archiso_baseurl}/sha1sums.txt | grep ${archiso_iso_name} | awk '{ print $1 }')
export archiso_url="${packer_work_dir}/${archiso_iso_name}"
else
export archiso_iso_name=$(find files/packer_work/ -iname "archlinux*.iso" | xargs basename)
export archiso_sha=$(sha1sum ${packer_work_dir}/${archiso_iso_name} | awk '{ print $1 }')
export archiso_url="${packer_work_dir}/${archiso_iso_name}"
fi
debug "archiso_iso_name=${archiso_iso_name}"
debug "archiso_sha=${archiso_sha}"
debug "archiso_url=${archiso_url}"
}
gen_base_image_name() {
export base_image_basename="$(basename ${test_mode})-archiso-${archiso_iso_name:10:-9}"
debug "base_image_basename=${base_image_basename}"
run_cmd_output=$(find ${script_dir} -iname "*$(basename ${test_mode})-*" -printf "%P\\n" | sort -r | head -n 1)
if [[ ${run_cmd_output} == "" ]]; then
export base_image_name="${base_image_basename}-build-$(date +%Y.%m.%d).qcow2"
else
export base_image_name="${run_cmd_output}"
fi
export base_image_path="${script_dir}/${base_image_name}"
export work_image_randname="${base_image_name%.qcow2}_${RANDOM}.qcow2"
}
usage() {
echo "${script_name} - A test script for archzfs"
echo
echo "Usage: ${script_name} [options] [mode] [command [command option] [...]"
echo
echo "Options:"
echo
echo " -h: Show help information."
echo " -n: Dryrun; Output commands, but don't do anything."
echo " -d: Show debug info."
echo " -R: Re-use existing archzfs test packages."
echo
# echo "Modes:"
# echo
# for ml in "${mode_list[@]}"; do
# mn=$(echo ${ml} | cut -f2 -d:)
# md=$(echo ${ml} | cut -f3 -d:)
# echo -e " ${mn} ${md}"
# done
echo
echo "Commands:"
echo
for ml in "${test_commands_list[@]}"; do
mn=$(basename ${ml})
echo -e " ${mn#archzfs-qemu-}"
done
exit 155
}
generate_test_commands_list
debug_print_array "test_commands_list" "${test_commands_list[@]}"
# generate_mode_list "${script_dir}/../src/kernels"
for (( a = 0; a < $#; a++ )); do
if [[ ${args[$a]} == "-R" ]]; then
commands+=("reuse")
elif [[ ${args[$a]} == "-n" ]]; then
dry_run=1
elif [[ ${args[$a]} == "-d" ]]; then
debug_flag=1
elif [[ ${args[$a]} == "-h" ]]; then
usage
else
check_test_mode "${args[$a]}"
debug "have mode '${mode}'"
debug "have test mode '${test_mode}'"
fi
done
if [[ $# -lt 1 ]]; then
usage
fi
if [[ ${test_mode} == "" ]]; then
echo
error "A test command must be selected!"
usage
fi
# Check for internet (thanks Comcast!)
if [[ $(ping -w 1 -c 1 8.8.8.8 &> /dev/null; echo $?) != 0 ]]; then
error "Could not reach google dns server! (No internet?)"
exit 1;
fi
if [[ ${EUID} -ne 0 ]]; then
error "This script must be run as root."
exit 1;
fi
if [[ "${test_mode}" != "" ]]; then
msg "Building arch base image"
if [[ -d "${packer_work_dir}/output-qemu" ]]; then
msg2 "Deleting '${packer_work_dir}/output-qemu' because it should not exist"
run_cmd "rm -rf ${packer_work_dir}/output-qemu"
fi
if [[ ! -d "${packer_work_dir}" ]]; then
msg2 "Creating '${packer_work_dir}' because it does not exist"
run_cmd "mkdir ${packer_work_dir}"
fi
if [[ ! -f "${packer_work_dir}/mirrorlist" ]]; then
msg2 "Generating pacman mirrorlist"
run_cmd "/usr/bin/reflector -c US -l 5 -f 5 --sort rate 2>&1 > ${packer_work_dir}/mirrorlist"
fi
msg2 "Using packer to build the base image ..."
gen_base_image_name
# Base files
run_cmd "check_symlink '${script_dir}/tests/archzfs-qemu-base/packer.json' '${packer_work_dir}/packer.json'"
run_cmd "check_symlink '${script_dir}/tests/archzfs-qemu-base/base.sh' '${packer_work_dir}/base.sh'"
run_cmd "check_symlink '${script_dir}/tests/archzfs-qemu-base/setup.sh' '${packer_work_dir}/setup.sh'"
run_cmd "check_symlink '${script_dir}/../lib.sh' '${packer_work_dir}/lib.sh'"
run_cmd "check_symlink '${script_dir}/../conf.sh' '${packer_work_dir}/archzfs-conf.sh'"
run_cmd "check_symlink '${script_dir}/files/poweroff.timer' '${packer_work_dir}/poweroff.timer'"
# run_cmd "check_symlink '${repo_basepath}/${repo_name_test}' '${packer_work_dir}/${repo_name_test}'"
# Test files
run_cmd "check_symlink '${test_mode}/archiso.sh' '${packer_work_dir}/test-archiso.sh'"
run_cmd "check_symlink '${test_mode}/boot.sh' '${packer_work_dir}/test-boot.sh'"
run_cmd "check_symlink '${test_mode}/chroot.sh' '${packer_work_dir}/test-chroot.sh'"
run_cmd "check_symlink '${test_mode}/conf.sh' '${packer_work_dir}/test-conf.sh'"
run_cmd "check_symlink '${test_mode}/fs.sh' '${packer_work_dir}/test-fs.sh'"
run_cmd "check_symlink '${test_mode}/hooks.sh' '${packer_work_dir}/test-hooks.sh'"
run_cmd "check_symlink '${test_mode}/pacman.sh' '${packer_work_dir}/test-pacman.sh'"
run_cmd "check_symlink '${test_mode}/syslinux.cfg' '${packer_work_dir}/syslinux.cfg'"
# Make it easy to get the files into the archiso environment
run_cmd "tar --exclude='*.iso' --exclude=packer_cache --exclude=b.tar -C ${packer_work_dir} -cvhf ${packer_work_dir}/b.tar ."
msg "Building the archiso if required"
source_safe "${test_mode}/conf.sh" && source_safe "${test_mode}/archiso.sh" && test_build_archiso
init_archiso_vars
# Uncomment to enable packer debug
export PACKER_LOG=1
export PACKER_CACHE_DIR="${packer_work_dir}/packer_cache"
# run_cmd "cd ${packer_work_dir} && packer-io build -debug packer.json"
run_cmd "cd ${packer_work_dir} && packer-io build packer.json"
# msg "Moving the compiled base image"
# run_cmd "mv -f ${base_image_output_dir}/output-qemu/packer-qemu ${base_image_path}"
fi
# if have_command "test"; then
# msg "Testing package target '${mode}'"
# if ! have_command "reuse"; then
# msg2 "Building test packages"
# build_test_packages
# fi
# msg2 "Copying test packages"
# copy_latest_packages
# msg2 "Cloning ${base_image_path}"
# run_cmd "cp ${base_image_path} ${work_image_randname}"
# msg "Booting VM clone..."
# cmd="qemu-system-x86_64 -enable-kvm "
# cmd+="-m 4096 -smp 2 -redir tcp:2222::22 -drive "
# cmd+="file=${work_image_randname},if=virtio"
# run_cmd "${cmd}" &
# if [[ -z "${debug_flag}" ]]; then
# msg "Waiting for SSH..."
# while :; do
# run_cmd "${ssh} root@localhost echo &> /dev/null"
# if [[ ${run_cmd_return} -eq 0 ]]; then
# break
# fi
# done
# fi
# msg2 "Copying the latest packages to the VM"
# copy_latest_packages
# run_cmd "rsync -vrthP -e '${ssh}' archzfs/x64/ root@localhost:"
# run_cmd "${ssh} root@localhost pacman -U --noconfirm '*.pkg.tar.xz'"
# # msg2 "Cloning ZFS test suite"
# # run_cmd "${ssh} root@localhost git clone https://github.com/zfsonlinux/zfs-test.git /usr/src/zfs-test"
# # run_cmd "${ssh} root@localhost chown -R zfs-tests: /usr/src/zfs-test/"
# # msg2 "Building ZFS test suite"
# # run_cmd "${ssh} root@localhost 'cd /usr/src/zfs-test && ./autogen.sh && ./configure'"
# # run_cmd "${ssh} root@localhost 'cd /usr/src/zfs-test && ./autogen.sh && ./configure && make test'"
# # msg2 "Cause I'm housin"
# # run_cmd "${ssh} root@localhost systemctl poweroff &> /dev/null"
# # wait
# fi

@ -0,0 +1,25 @@
#!/bin/bash
cat <<-EOF > "${arch_target_dir}/usr/bin/base.sh"
echo '${fqdn}' > /etc/hostname
/usr/bin/ln -s /usr/share/zoneinfo/${timezone} /etc/localtime
echo 'KEYMAP=${keymap}' > /etc/vconsole.conf
/usr/bin/sed -i 's/#${language}/${language}/' /etc/locale.gen
/usr/bin/locale-gen
/usr/bin/sed -i 's/filesystems/zfs filesystems/' '/etc/mkinitcpio.conf'
/usr/bin/mkinitcpio -p linux-lts
/usr/bin/usermod --password ${password} root
# https://wiki.archlinux.org/index.php/Network_Configuration#Device_names
/usr/bin/ln -s /dev/null /etc/udev/rules.d/80-net-setup-link.rules
/usr/bin/ln -s '/usr/lib/systemd/system/dhcpcd@.service' '/etc/systemd/system/multi-user.target.wants/dhcpcd@eth0.service'
# Configure ssh
sed -e '/^#PermitRootLogin prohibit-password$/c PermitRootLogin yes' \
-e '/^#UseDNS no$/c UseDNS no' \
-i /etc/ssh/sshd_config
/usr/bin/systemctl enable sshd.service
EOF

@ -0,0 +1,32 @@
{
"variables": {
"http_dir": "{{env `packer_work_dir`}}",
"iso_url": "{{env `archiso_url`}}",
"iso_checksum": "{{env `archiso_sha`}}",
"iso_checksum_type": "sha1",
"base_image_name": "{{env `base_image_basename`}}"
},
"builders": [
{
"type": "qemu",
"iso_url": "{{user `iso_url`}}",
"iso_checksum": "{{user `iso_checksum`}}",
"iso_checksum_type": "{{user `iso_checksum_type`}}",
"http_directory": "{{user `http_dir`}}",
"boot_wait": "5s",
"headless": false,
"boot_command": [
"<enter><wait10>",
"curl -O http://{{.HTTPIP}}:{{.HTTPPort}}/b.tar && tar -xvf b.tar<enter><wait>",
"bash ./setup.sh<enter>"
],
"disk_size": 122880,
"ssh_username": "root",
"ssh_password": "azfstest",
"ssh_wait_timeout": "7200s",
"shutdown_command": "systemctl start poweroff.timer",
"vm_name": "{{user `base_image_name`}}",
"output_directory": "{{user `base_image_output_dir`}}"
}
]
}

@ -0,0 +1,67 @@
#!/usr/bin/env bash
export script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if ! source ${script_dir}/lib.sh; then
echo "!! ERROR !! -- Could not load lib.sh!"
exit 155
fi
# source_safe "${script_dir}/archzfs-conf.sh"
source_safe "${script_dir}/test-archiso.sh"
source_safe "${script_dir}/test-boot.sh"
source_safe "${script_dir}/test-chroot.sh"
source_safe "${script_dir}/test-conf.sh"
source_safe "${script_dir}/test-fs.sh"
source_safe "${script_dir}/test-hooks.sh"
source_safe "${script_dir}/test-pacman.sh"
export debug_flag=1
export dry_run=0
# Install nfs mount points to the archiso environment
test_fs_config_nfs
# Install the archzfs repo to the archiso environment
test_pacman_config
# Install the zfs root filesystem for the test
test_fs_config_root_preinstall
# Install base packages into the chroot
test_pacman_pacstrap
# Install nfs mount points to the arch chroot environment
test_fs_config_nfs "/mnt/ROOT"
# Configure pacman for the arch chroot environment
test_pacman_config "/mnt/ROOT"
# Finish installing arch in the chroot environment
test_chroot_setup "/mnt/ROOT"
# Install the boot loader!
test_bootloader_install
# Filesystem things to do after installation
test_fs_config_root_postinstall
exit 0
# Reboot!
setup_exit

@ -0,0 +1,19 @@
test_pacman_config() {
msg "Setting archiso pacman mirror"
# /usr/bin/cp mirrorlist /etc/pacman.d/mirrorlist
# setup pacman repositories in the archiso
# msg "Installing local pacman package repositories"
# test_pacman_config /etc/pacman.conf
# dirmngr < /dev/null
# pacman-key -r 0EE7A126
# if [[ $? -ne 0 ]]; then
# exit 1
# fi
# pacman-key --lsign-key 0EE7A126
# pacman -Sy archzfs-archiso-linux
# modprobe zfs
}

@ -0,0 +1,11 @@
# From old arch-config.sh
# # zfs-test configuration
# # /usr/bin/groupadd zfs-tests
# # /usr/bin/useradd --comment 'ZFS Test User' -d /var/tmp/test_results --create-home --gid users --groups zfs-tests zfs-tests
# # sudoers.d is the right way, but the zfs test suite checks /etc/sudoers...
# echo 'Defaults env_keep += "SSH_AUTH_SOCK"' > /etc/sudoers.d/10_zfs_test
# echo 'zfs-tests ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers.d/10_zfs_test
# /usr/bin/chmod 0440 /etc/sudoers.d/10_zfs_test

@ -0,0 +1,52 @@
=========================================
Test archzfs-qemu-lts-test-01-root-bootfs
=========================================
Tests all the steps required for archzfs-linux-lts to be used as a boot filesystem.
--------
Overview
--------
Builds a custom archiso with the linux-lts kernel used by packer to create a Qemu base image. Syslinux is used as the boot
loader.
---
How
---
1. The archzfs-linux-lts packages are built for the linux-lts kernel and added to a package repository named "archzfs-testing".
#. The archzfs-testing repo is shared over NFS.
#. A custom archiso is built that boots into the linux-lts kernel. See `Archiso customization`_
#. The test files are compressed into a tar archive.
#. Packer is used to build a qemu base image using the custom archiso.
#. `setup.sh` is ran in the archiso to install arch on ZFS.
#. After installation of Arch on ZFS, the VM is rebooted and packer finalizes the base image.
#. The qemu base image created by packer is booted, if the boot is successful, the test is considered passed.
---------------------
Archiso customization
---------------------
At the time of putting this test together (2016.09.03), there was no stable ZFSonLinux release that supported kernel 4.7 and
the archiso release at the time shipped with kernel 4.7. In order to install Arch on ZFS for test, I needed an archiso with
the linux-lts kernel. Thus, the archzfs-archiso was born!
The archiso is built by `test.sh` and used by packer to create a Qemu base image with ZFS as the root filesystem for testing.
The archiso comes with a bunch of features that are not needed in test, so they have been stripped out or modified. This
includes:
* boot straight into the linux-lts kernel to speed up the testing cycle.
* ZFS does not support arch-i686, so it was stripped.
* iPXE was not needed.
The archiso sources are copied from `/usr/share/archiso/configs/releng` after installation of the "archiso" package. The
modifed code is contained in the `testing/archiso-linux-lts` directory of this project.

@ -0,0 +1,16 @@
#
# Used to configure custom built archiso
#
# We need an archiso with the lts kernel used by default
test_build_archiso() {
msg "Building archiso"
cd ${test_root_dir}/archiso/ &> /dev/null
# if [[ -d ${packer_work_dir}/out ]] && [[ $(ls -1 | wc -l) -gt 0 ]]; then
# run_cmd "rm -rf ${test_root_dir}/archiso/out/archlinux*"
# fi
# run_cmd "./build.sh -v"
msg2 "Coping archiso to packer_work_dir"
run_cmd "cp ${test_root_dir}/archiso/out/archlinux* ${packer_work_dir} && rm -rf ${test_root_dir}/archiso/work"
cd - &> /dev/null
}

@ -0,0 +1,17 @@
===================
Archzfs Archiso LTS
===================
Used to create custom archiso with LTS kernel. Used only for testing archzfs. Supports only the x86_64 architecture.
How to use
.. code:: console
# ./build.sh -v
to test with qemu,
.. code:: console
# qemu-system-x86_64 -enable-kvm -m 4096 -smp 2 -drive file=./out/archlinux-2016.09.04.iso,if=virtio,media=disk,format=raw

@ -0,0 +1,26 @@
#!/bin/bash
get_cmdline() {
local param
for param in $(< /proc/cmdline); do
case "${param}" in
$1=*) echo "${param##*=}";
return 0
;;
esac
done
}
mirror=$(get_cmdline mirror)
[[ $mirror = auto ]] && mirror=$(get_cmdline archiso_http_srv)
[[ $mirror ]] || exit 0
mv /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.orig
cat >/etc/pacman.d/mirrorlist << EOF
#
# Arch Linux repository mirrorlist
# Generated by archiso
#
Server = ${mirror%%/}/\$repo/os/\$arch
EOF

@ -0,0 +1,10 @@
[Unit]
Description=Choose mirror from the kernel command line
ConditionKernelCommandLine=mirror
[Service]
Type=oneshot
ExecStart=/etc/systemd/scripts/choose-mirror
[Install]
WantedBy=multi-user.target

@ -0,0 +1,8 @@
[Unit]
Description=Temporary /etc/pacman.d/gnupg directory
[Mount]
What=tmpfs
Where=/etc/pacman.d/gnupg
Type=tmpfs
Options=mode=0755

@ -0,0 +1,3 @@
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin root --noclear %I 38400 linux

@ -0,0 +1,15 @@
[Unit]
Description=Initializes Pacman keyring
Wants=haveged.service
After=haveged.service
Requires=etc-pacman.d-gnupg.mount
After=etc-pacman.d-gnupg.mount
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStart=/usr/bin/pacman-key --init
ExecStart=/usr/bin/pacman-key --populate archlinux
[Install]
WantedBy=multi-user.target

@ -0,0 +1 @@
ACTION=="add", SUBSYSTEM=="net", ENV{INTERFACE}=="en*|eth*", ENV{SYSTEMD_WANTS}="dhcpcd@$name.service"

@ -0,0 +1,34 @@
#!/bin/bash
script_cmdline ()
{
local param
for param in $(< /proc/cmdline); do
case "${param}" in
script=*) echo "${param#*=}" ; return 0 ;;
esac
done
}
automated_script ()
{
local script rt
script="$(script_cmdline)"
if [[ -n "${script}" && ! -x /tmp/startup_script ]]; then
if [[ "${script}" =~ ^http:// || "${script}" =~ ^ftp:// ]]; then
wget "${script}" --retry-connrefused -q -O /tmp/startup_script >/dev/null
rt=$?
else
cp "${script}" /tmp/startup_script
rt=$?
fi
if [[ ${rt} -eq 0 ]]; then
chmod +x /tmp/startup_script
/tmp/startup_script
fi
fi
}
if [[ $(tty) == "/dev/tty1" ]]; then
automated_script
fi

@ -0,0 +1,23 @@
#!/bin/bash
set -e -u
sed -i 's/#\(en_US\.UTF-8\)/\1/' /etc/locale.gen
locale-gen
ln -sf /usr/share/zoneinfo/UTC /etc/localtime
usermod -s /usr/bin/zsh root
cp -aT /etc/skel/ /root/
chmod 700 /root
sed -i 's/#\(PermitRootLogin \).\+/\1yes/' /etc/ssh/sshd_config
sed -i "s/#Server/Server/g" /etc/pacman.d/mirrorlist
sed -i 's/#\(Storage=\)auto/\1volatile/' /etc/systemd/journald.conf
sed -i 's/#\(HandleSuspendKey=\)suspend/\1ignore/' /etc/systemd/logind.conf
sed -i 's/#\(HandleHibernateKey=\)hibernate/\1ignore/' /etc/systemd/logind.conf
sed -i 's/#\(HandleLidSwitch=\)suspend/\1ignore/' /etc/systemd/logind.conf
systemctl enable pacman-init.service choose-mirror.service
systemctl set-default multi-user.target

@ -0,0 +1,3 @@
View this installation guide online at
https://wiki.archlinux.org/index.php/Installation_Guide

@ -0,0 +1,277 @@
#!/bin/bash
#
# Custom archiso build script for archzfs lts testing
#
# Supports only x86_64 architecture
#
set -e -u
iso_name=archlinux
iso_label="ARCHLTS_$(date +%Y%m)"
iso_version=$(date +%Y.%m.%d)
install_dir=arch
work_dir=work
out_dir=out
gpg_key=
arch=$(uname -m)
verbose=""
script_path=$(readlink -f ${0%/*})
_usage ()
{
echo "usage ${0} [options]"
echo
echo " General options:"
echo " -N <iso_name> Set an iso filename (prefix)"
echo " Default: ${iso_name}"
echo " -V <iso_version> Set an iso version (in filename)"
echo " Default: ${iso_version}"
echo " -L <iso_label> Set an iso label (disk label)"
echo " Default: ${iso_label}"
echo " -D <install_dir> Set an install_dir (directory inside iso)"
echo " Default: ${install_dir}"
echo " -w <work_dir> Set the working directory"
echo " Default: ${work_dir}"
echo " -o <out_dir> Set the output directory"
echo " Default: ${out_dir}"
echo " -v Enable verbose output"
echo " -h This help message"
exit ${1}
}
# Helper function to run make_*() only one time per architecture.
run_once() {
if [[ ! -e ${work_dir}/build.${1}_${arch} ]]; then
$1
touch ${work_dir}/build.${1}_${arch}
fi
}
# Setup custom pacman.conf with current cache directories.
make_pacman_conf() {
local _cache_dirs
_cache_dirs=($(pacman -v 2>&1 | grep '^Cache Dirs:' | sed 's/Cache Dirs:\s*//g'))
sed -r "s|^#?\\s*CacheDir.+|CacheDir = $(echo -n ${_cache_dirs[@]})|g" ${script_path}/pacman.conf > ${work_dir}/pacman.conf
}
# Base installation, plus needed packages (airootfs)
make_basefs() {
setarch ${arch} mkarchiso ${verbose} -w "${work_dir}/${arch}" -C "${work_dir}/pacman.conf" -D "${install_dir}" init
setarch ${arch} mkarchiso ${verbose} -w "${work_dir}/${arch}" -C "${work_dir}/pacman.conf" -D "${install_dir}" -p "haveged intel-ucode mkinitcpio-nfs-utils nbd zsh" install
}
# Additional packages (airootfs)
make_packages() {
setarch ${arch} mkarchiso ${verbose} -w "${work_dir}/${arch}" -C "${work_dir}/pacman.conf" -D "${install_dir}" -p "$(grep -h -v ^# ${script_path}/packages)" install
}
# Needed packages for x86_64 EFI boot
make_packages_efi() {
setarch ${arch} mkarchiso ${verbose} -w "${work_dir}/${arch}" -C "${work_dir}/pacman.conf" -D "${install_dir}" -p "efitools" install
}
# Copy mkinitcpio archiso hooks and build initramfs (airootfs)
make_setup_mkinitcpio() {
local _hook
mkdir -p ${work_dir}/${arch}/airootfs/etc/initcpio/hooks
mkdir -p ${work_dir}/${arch}/airootfs/etc/initcpio/install
for _hook in archiso archiso_shutdown archiso_loop_mnt; do
cp /usr/lib/initcpio/hooks/${_hook} ${work_dir}/${arch}/airootfs/etc/initcpio/hooks
cp /usr/lib/initcpio/install/${_hook} ${work_dir}/${arch}/airootfs/etc/initcpio/install
done
sed -i "s|/usr/lib/initcpio/|/etc/initcpio/|g" ${work_dir}/${arch}/airootfs/etc/initcpio/install/archiso_shutdown
cp /usr/lib/initcpio/install/archiso_kms ${work_dir}/${arch}/airootfs/etc/initcpio/install
cp /usr/lib/initcpio/archiso_shutdown ${work_dir}/${arch}/airootfs/etc/initcpio
cp ${script_path}/mkinitcpio.conf ${work_dir}/${arch}/airootfs/etc/mkinitcpio-archiso.conf
gnupg_fd=
if [[ ${gpg_key} ]]; then
gpg --export ${gpg_key} >${work_dir}/gpgkey
exec 17<>${work_dir}/gpgkey
fi
ARCHISO_GNUPG_FD=${gpg_key:+17} setarch ${arch} mkarchiso ${verbose} -w "${work_dir}/${arch}" -C "${work_dir}/pacman.conf" -D "${install_dir}" -r 'mkinitcpio -c /etc/mkinitcpio-archiso.conf -k /boot/vmlinuz-linux-lts -g /boot/archiso.img' run
if [[ ${gpg_key} ]]; then
exec 17<&-
fi
}
# Customize installation (airootfs)
make_customize_airootfs() {
cp -af ${script_path}/airootfs ${work_dir}/${arch}
curl -o ${work_dir}/${arch}/airootfs/etc/pacman.d/mirrorlist 'https://www.archlinux.org/mirrorlist/?country=all&protocol=http&use_mirror_status=on'
lynx -dump -nolist 'https://wiki.archlinux.org/index.php/Installation_Guide?action=render' >> ${work_dir}/${arch}/airootfs/root/install.txt
setarch ${arch} mkarchiso ${verbose} -w "${work_dir}/${arch}" -C "${work_dir}/pacman.conf" -D "${install_dir}" -r '/root/customize_airootfs.sh' run
rm ${work_dir}/${arch}/airootfs/root/customize_airootfs.sh
}
# Prepare kernel/initramfs ${install_dir}/boot/
make_boot() {
mkdir -p ${work_dir}/iso/${install_dir}/boot/${arch}
cp ${work_dir}/${arch}/airootfs/boot/archiso.img ${work_dir}/iso/${install_dir}/boot/${arch}/archiso.img
cp ${work_dir}/${arch}/airootfs/boot/vmlinuz-linux-lts ${work_dir}/iso/${install_dir}/boot/${arch}/vmlinuz
}
# Add other aditional/extra files to ${install_dir}/boot/
make_boot_extra() {
# cp ${work_dir}/${arch}/airootfs/boot/memtest86+/memtest.bin ${work_dir}/iso/${install_dir}/boot/memtest
cp ${work_dir}/${arch}/airootfs/usr/share/licenses/common/GPL2/license.txt ${work_dir}/iso/${install_dir}/boot/memtest.COPYING
cp ${work_dir}/${arch}/airootfs/boot/intel-ucode.img ${work_dir}/iso/${install_dir}/boot/intel_ucode.img
cp ${work_dir}/${arch}/airootfs/usr/share/licenses/intel-ucode/LICENSE ${work_dir}/iso/${install_dir}/boot/intel_ucode.LICENSE
}
# Prepare /${install_dir}/boot/syslinux
make_syslinux() {
mkdir -p ${work_dir}/iso/${install_dir}/boot/syslinux
for _cfg in ${script_path}/syslinux/*.cfg; do
sed "s|%ARCHISO_LABEL%|${iso_label}|g;
s|%INSTALL_DIR%|${install_dir}|g" ${_cfg} > ${work_dir}/iso/${install_dir}/boot/syslinux/${_cfg##*/}
done
cp ${script_path}/syslinux/splash.png ${work_dir}/iso/${install_dir}/boot/syslinux
cp ${work_dir}/${arch}/airootfs/usr/lib/syslinux/bios/*.c32 ${work_dir}/iso/${install_dir}/boot/syslinux
cp ${work_dir}/${arch}/airootfs/usr/lib/syslinux/bios/lpxelinux.0 ${work_dir}/iso/${install_dir}/boot/syslinux
cp ${work_dir}/${arch}/airootfs/usr/lib/syslinux/bios/memdisk ${work_dir}/iso/${install_dir}/boot/syslinux
mkdir -p ${work_dir}/iso/${install_dir}/boot/syslinux/hdt
gzip -c -9 ${work_dir}/${arch}/airootfs/usr/share/hwdata/pci.ids > ${work_dir}/iso/${install_dir}/boot/syslinux/hdt/pciids.gz
gzip -c -9 ${work_dir}/${arch}/airootfs/usr/lib/modules/*-ARCH/modules.alias > ${work_dir}/iso/${install_dir}/boot/syslinux/hdt/modalias.gz
}
# Prepare /isolinux
make_isolinux() {
mkdir -p ${work_dir}/iso/isolinux
sed "s|%INSTALL_DIR%|${install_dir}|g" ${script_path}/isolinux/isolinux.cfg > ${work_dir}/iso/isolinux/isolinux.cfg
cp ${work_dir}/${arch}/airootfs/usr/lib/syslinux/bios/isolinux.bin ${work_dir}/iso/isolinux/
cp ${work_dir}/${arch}/airootfs/usr/lib/syslinux/bios/isohdpfx.bin ${work_dir}/iso/isolinux/
cp ${work_dir}/${arch}/airootfs/usr/lib/syslinux/bios/ldlinux.c32 ${work_dir}/iso/isolinux/
}
# Prepare /EFI
make_efi() {
mkdir -p ${work_dir}/iso/EFI/boot
cp ${work_dir}/x86_64/airootfs/usr/share/efitools/efi/PreLoader.efi ${work_dir}/iso/EFI/boot/bootx64.efi
cp ${work_dir}/x86_64/airootfs/usr/share/efitools/efi/HashTool.efi ${work_dir}/iso/EFI/boot/
cp ${work_dir}/x86_64/airootfs/usr/lib/systemd/boot/efi/systemd-bootx64.efi ${work_dir}/iso/EFI/boot/loader.efi
mkdir -p ${work_dir}/iso/loader/entries
cp ${script_path}/efiboot/loader/loader.conf ${work_dir}/iso/loader/
# cp ${script_path}/efiboot/loader/entries/uefi-shell-v2-x86_64.conf ${work_dir}/iso/loader/entries/
# cp ${script_path}/efiboot/loader/entries/uefi-shell-v1-x86_64.conf ${work_dir}/iso/loader/entries/
sed "s|%ARCHISO_LABEL%|${iso_label}|g;
s|%INSTALL_DIR%|${install_dir}|g" \
${script_path}/efiboot/loader/entries/archiso-x86_64.conf > ${work_dir}/iso/loader/entries/archiso.conf
# # EFI Shell 2.0 for UEFI 2.3+
# curl -o ${work_dir}/iso/EFI/shellx64_v2.efi https://raw.githubusercontent.com/tianocore/edk2/master/ShellBinPkg/UefiShell/X64/Shell.efi
# # EFI Shell 1.0 for non UEFI 2.3+
# curl -o ${work_dir}/iso/EFI/shellx64_v1.efi https://raw.githubusercontent.com/tianocore/edk2/master/EdkShellBinPkg/FullShell/X64/Shell_Full.efi
}
# Prepare efiboot.img::/EFI for "El Torito" EFI boot mode
# make_efiboot() {
# mkdir -p ${work_dir}/iso/EFI/archiso
# truncate -s 40M ${work_dir}/iso/EFI/archiso/efiboot.img
# mkfs.fat -n ARCHISO_EFI ${work_dir}/iso/EFI/archiso/efiboot.img
# mkdir -p ${work_dir}/efiboot
# mount ${work_dir}/iso/EFI/archiso/efiboot.img ${work_dir}/efiboot
# mkdir -p ${work_dir}/efiboot/EFI/archiso
# cp ${work_dir}/iso/${install_dir}/boot/x86_64/vmlinuz ${work_dir}/efiboot/EFI/archiso/vmlinuz.efi
# cp ${work_dir}/iso/${install_dir}/boot/x86_64/archiso.img ${work_dir}/efiboot/EFI/archiso/archiso.img
# cp ${work_dir}/iso/${install_dir}/boot/intel_ucode.img ${work_dir}/efiboot/EFI/archiso/intel_ucode.img
# mkdir -p ${work_dir}/efiboot/EFI/boot
# cp ${work_dir}/x86_64/airootfs/usr/share/efitools/efi/PreLoader.efi ${work_dir}/efiboot/EFI/boot/bootx64.efi
# cp ${work_dir}/x86_64/airootfs/usr/share/efitools/efi/HashTool.efi ${work_dir}/efiboot/EFI/boot/
# cp ${work_dir}/x86_64/airootfs/usr/lib/systemd/boot/efi/systemd-bootx64.efi ${work_dir}/efiboot/EFI/boot/loader.efi
# mkdir -p ${work_dir}/efiboot/loader/entries
# cp ${script_path}/efiboot/loader/loader.conf ${work_dir}/efiboot/loader/
# # cp ${script_path}/efiboot/loader/entries/uefi-shell-v2-x86_64.conf ${work_dir}/efiboot/loader/entries/
# # cp ${script_path}/efiboot/loader/entries/uefi-shell-v1-x86_64.conf ${work_dir}/efiboot/loader/entries/
# # sed "s|%ARCHISO_LABEL%|${iso_label}|g;
# # s|%INSTALL_DIR%|${install_dir}|g" \
# # ${script_path}/efiboot/loader/entries/archiso-x86_64-cd.conf > ${work_dir}/efiboot/loader/entries/archiso.conf
# # cp ${work_dir}/iso/EFI/shellx64_v2.efi ${work_dir}/efiboot/EFI/
# # cp ${work_dir}/iso/EFI/shellx64_v1.efi ${work_dir}/efiboot/EFI/
# umount -d ${work_dir}/efiboot
# }
# Build airootfs filesystem image
make_prepare() {
cp -a -l -f ${work_dir}/${arch}/airootfs ${work_dir}
setarch ${arch} mkarchiso ${verbose} -w "${work_dir}" -D "${install_dir}" pkglist
setarch ${arch} mkarchiso ${verbose} -w "${work_dir}" -D "${install_dir}" ${gpg_key:+-g ${gpg_key}} prepare
rm -rf ${work_dir}/airootfs
# rm -rf ${work_dir}/${arch}/airootfs (if low space, this helps)
}
# Build ISO
make_iso() {
mkarchiso ${verbose} -w "${work_dir}" -D "${install_dir}" -L "${iso_label}" -o "${out_dir}" iso "${iso_name}-${iso_version}.iso"
}
if [[ ${EUID} -ne 0 ]]; then
echo "This script must be run as root."
_usage 1
fi
if [[ ${arch} != x86_64 ]]; then
echo "This script needs to be run on x86_64"
_usage 1
fi
while getopts 'N:V:L:D:w:o:g:vh' arg; do
case "${arg}" in
N) iso_name="${OPTARG}" ;;
V) iso_version="${OPTARG}" ;;
L) iso_label="${OPTARG}" ;;
D) install_dir="${OPTARG}" ;;
w) work_dir="${OPTARG}" ;;
o) out_dir="${OPTARG}" ;;
g) gpg_key="${OPTARG}" ;;
v) verbose="-v" ;;
h) _usage 0 ;;
*)
echo "Invalid argument '${arg}'"
_usage 1
;;
esac
done
mkdir -p ${work_dir}
run_once make_pacman_conf
# Do all stuff for each airootfs
run_once make_basefs
run_once make_packages
run_once make_packages_efi
run_once make_setup_mkinitcpio
run_once make_customize_airootfs
run_once make_boot
# Do all stuff for "iso"
run_once make_boot_extra
run_once make_syslinux
run_once make_isolinux
run_once make_efi
# run_once make_efiboot
run_once make_prepare
run_once make_iso

@ -0,0 +1,5 @@
title Arch Linux archiso x86_64 UEFI USB
linux /%INSTALL_DIR%/boot/x86_64/vmlinuz
initrd /%INSTALL_DIR%/boot/intel_ucode.img
initrd /%INSTALL_DIR%/boot/x86_64/archiso.img
options archisobasedir=%INSTALL_DIR% archisolabel=%ARCHISO_LABEL%

@ -0,0 +1,6 @@
PATH /%INSTALL_DIR%/boot/syslinux/
DEFAULT loadconfig
LABEL loadconfig
CONFIG /%INSTALL_DIR%/boot/syslinux/syslinux.cfg
APPEND /%INSTALL_DIR%/

@ -0,0 +1,2 @@
HOOKS="base udev memdisk archiso_shutdown archiso archiso_loop_mnt archiso_kms block pcmcia filesystems keyboard"
COMPRESSION="xz"

@ -0,0 +1,66 @@
arch-install-scripts
b43-fwcutter
btrfs-progs
clonezilla
crda
darkhttpd
ddrescue
dhclient
dialog
dmraid
dnsmasq
dnsutils
dosfstools
elinks
ethtool
f2fs-tools
fsarchiver
gnu-netcat
gpm
gptfdisk
grml-zsh-config
grub
hdparm
ipw2100-fw
ipw2200-fw
irssi
lftp
linux-atm
lsscsi
mc
mtools
ndisc6
nfs-utils
nilfs-utils
nmap
ntfs-3g
ntp
openconnect
openssh
openvpn
partclone
parted
partimage
ppp
pptpclient
refind-efi
rfkill
rp-pppoe
rsync
sdparm
sg3_utils
smartmontools
speedtouch
sudo
tcpdump
testdisk
usb_modeswitch
vim-minimal
vpnc
wget
wireless_tools
wpa_actiond
wvdial
xl2tpd
zd1211-firmware
linux-lts

@ -0,0 +1,92 @@
#
# /etc/pacman.conf
#
# See the pacman.conf(5) manpage for option and repository directives
#
# GENERAL OPTIONS
#
[options]
# The following paths are commented out with their default values listed.
# If you wish to use different paths, uncomment and update the paths.
#RootDir = /
#DBPath = /var/lib/pacman/
#CacheDir = /var/cache/pacman/pkg/
#LogFile = /var/log/pacman.log
#GPGDir = /etc/pacman.d/gnupg/
HoldPkg = pacman glibc
#XferCommand = /usr/bin/curl -C - -f %u > %o
#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u
#CleanMethod = KeepInstalled
#UseDelta = 0.7
Architecture = auto
# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup
#IgnorePkg =
#IgnoreGroup =
#NoUpgrade =
#NoExtract =
# Misc options
#UseSyslog
#Color
#TotalDownload
# We cannot check disk space from within a chroot environment
#CheckSpace
#VerbosePkgLists
# By default, pacman accepts packages signed by keys that its local keyring
# trusts (see pacman-key and its man page), as well as unsigned packages.
SigLevel = Required DatabaseOptional
LocalFileSigLevel = Optional
#RemoteFileSigLevel = Required
# NOTE: You must run `pacman-key --init` before first using pacman; the local
# keyring can then be populated with the keys of all official Arch Linux
# packagers with `pacman-key --populate archlinux`.
#
# REPOSITORIES
# - can be defined here or included from another file
# - pacman will search repositories in the order defined here
# - local/custom mirrors can be added here or in separate files
# - repositories listed first will take precedence when packages
# have identical names, regardless of version number
# - URLs will have $repo replaced by the name of the current repo
# - URLs will have $arch replaced by the name of the architecture
#
# Repository entries are of the format:
# [repo-name]
# Server = ServerName
# Include = IncludePath
#
# The header [repo-name] is crucial - it must be present and
# uncommented to enable the repo.
#
# The testing repositories are disabled by default. To enable, uncomment the
# repo name header and Include lines. You can add preferred servers immediately
# after the header, and they will be used before the default mirrors.
#[testing]
#Include = /etc/pacman.d/mirrorlist
[core]
Include = /etc/pacman.d/mirrorlist
[extra]
Include = /etc/pacman.d/mirrorlist
#[community-testing]
#Include = /etc/pacman.d/mirrorlist
[community]
Include = /etc/pacman.d/mirrorlist
# An example of a custom package repository. See the pacman manpage for
# tips on creating your own repositories.
#[custom]
#SigLevel = Optional TrustAll
#Server = file:///home/custompkgs

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

@ -0,0 +1,11 @@
DEFAULT arch64
LABEL arch64
TEXT HELP
Boot the Arch Linux (x86_64) live medium.
It allows you to install Arch Linux or perform system maintenance.
ENDTEXT
MENU LABEL Boot Arch Linux (x86_64)
LINUX boot/x86_64/vmlinuz
INITRD boot/intel_ucode.img,boot/x86_64/archiso.img
APPEND archisobasedir=%INSTALL_DIR% archisolabel=%ARCHISO_LABEL%

@ -0,0 +1,5 @@
test_bootloader_install() {
# Setup the boot loader
run_cmd "mkdir -p ${arch_target_dir}/boot/syslinux; cp -f /root/syslinux.cfg '${arch_target_dir}/boot/syslinux/syslinux.cfg'"
run_cmd "arch-chroot ${arch_target_dir} /usr/bin/syslinux-install_update -i -a -m"
}

@ -0,0 +1,36 @@
export fqdn='test.archzfs.test'
export keymap='us'
export language='en_US.UTF-8'
export password=$(/usr/bin/openssl passwd -crypt 'azfstest')
export timezone='UTC'
test_chroot_setup() {
# $1 arch-chroot target dir
msg "Setting up arch install..."
export arch_target_dir="${test_target_dir}"
if [[ -n $1 ]]; then
arch_target_dir="${1}"
fi
msg2 "Setting base image pacman mirror"
run_cmd "/usr/bin/cp /etc/pacman.d/mirrorlist ${arch_target_dir}/etc/pacman.d/mirrorlist"
msg2 "generating the filesystem table"
run_cmd "/usr/bin/genfstab -p ${arch_target_dir} >> '${arch_target_dir}/etc/fstab'"
msg2 "Create base.sh"
run_cmd "/usr/bin/install --mode=0755 /dev/null '${arch_target_dir}/usr/bin/base.sh'"
# http://comments.gmane.org/gmane.linux.arch.general/48739
msg2 "Adding workaround for shutdown race condition"
run_cmd "/usr/bin/install --mode=0644 poweroff.timer '${arch_target_dir}/etc/systemd/system/poweroff.timer'"
# Special filesystem configure script
source_safe /root/base.sh
msg2 "Entering chroot and configuring system"
run_cmd "/usr/bin/arch-chroot ${arch_target_dir} base.sh"
msg2 "Deleting base.sh"
rm ${arch_target_dir}/usr/bin/base.sh
}

@ -0,0 +1,14 @@
#!/bin/bash
export test_root_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
debug "test_root_dir='${test_root_dir}'"
export test_target_dir='/mnt'
export test_archzfs_repo_name="archzfs-testing"
# Additional packages to install in the archiso
export test_archiso_packages="archzfs-linux-lts"
# Additional packages to install after base and base-devel
export test_chroot_packages="gptfdisk openssh syslinux archzfs-linux-lts"

@ -0,0 +1,84 @@
test_fs_config_nfs() {
# $1 arch-chroot directory
# prefix="${test_target_dir}"
if [[ -n $1 ]]; then
prefix="${1}"
fi
msg "Create NFS mount points"
run_cmd "/usr/bin/mkdir -p ${prefix}/repo"
msg "Setting the package cache (nfs mount)"
run_cmd "mount -t nfs4 -o rsize=32768,wsize=32768,timeo=3 10.0.2.2:/var/cache/pacman/pkg ${prefix}/var/cache/pacman/pkg"
msg "Mounting the AUR package repo"
run_cmd "mount -t nfs4 -o rsize=32768,wsize=32768,timeo=3 10.0.2.2:/mnt/data/pacman/repo ${prefix}/repo"
}
test_fs_config_root_preinstall() {
msg "Configuring root filesystem!"
export disk='/dev/vda'
export root_partition="${disk}1"
msg2 "Clearing partition table on ${disk}"
run_cmd "sgdisk --zap ${disk}"
msg2 "Destroying magic strings and signatures on ${disk}"
run_cmd "dd if=/dev/zero of=${disk} bs=512 count=2048"
run_cmd "wipefs --all ${disk}"
# See http://www.rodsbooks.com/gdisk/sgdisk-walkthrough.html
# http://www.rodsbooks.com/gdisk/sgdisk.htm
msg2 "Creating boot partition on ${disk}"
run_cmd "sgdisk --new=1:0:512M --typecode=1:8300 ${disk}"
msg2 "Creating root partition on ${disk}"
run_cmd "sgdisk --new=2:0:0 --typecode=2:bf00 ${disk}"
msg2 "The disk"
run_cmd "sgdisk -p ${disk}"
msg2 "Creating root filesystem"
run_cmd "zpool create -m ${test_target_dir} -f zroot /dev/vda2"
run_cmd "zfs create -o mountpoint=none zroot/ROOT"
run_cmd "zfs create -o compression=lz4 -o mountpoint=${test_target_dir}/ROOT zroot/ROOT/default"
run_cmd "zfs create -o mountpoint=none zroot/data"
run_cmd "zfs create -o compression=lz4 -o mountpoint=${test_target_dir}/ROOT/home zroot/data/home"
msg2 "Mounting /home"
run_cmd "mount -t zfs -o default,noatime zroot/data/home ${test_target_dir}/ROOT/home"
msg2 "Create boot directory"
run_cmd "mkdir -p ${test_target_dir}/ROOT/boot"
msg2 "Creating /boot filesystem (ext4)"
run_cmd "mkfs.ext4 -F -m 0 -q -L boot /dev/vda1"
msg2 "Mounting boot filesystem"
run_cmd "mount -o noatime,errors=remount-ro /dev/vda1 ${test_target_dir}/ROOT/boot"
}
test_fs_config_root_postinstall() {
msg "Performing final filesystem operations"
msg2 "Unmounting boot partition"
run_cmd "umount ${test_target_dir}/ROOT/boot"
msg2 "Unmounting nfs partitions"
run_cmd "umount -a -t nfs4"
msg2 "Unmounting home partition"
run_cmd "umount ${test_target_dir}/ROOT/home"
msg2 "Setting flags and exporting ZFS root"
run_cmd "zfs umount -a"
run_cmd "zpool set bootfs=zroot/ROOT/default zroot"
run_cmd "zfs set mountpoint=none zroot"
run_cmd "zfs set mountpoint=/ zroot/ROOT/default"
run_cmd "zfs set mountpoint=/home zroot/data/home"
run_cmd "zfs set mountpoint=legacy zroot/data/home"
run_cmd "zpool export zroot"
}

@ -0,0 +1,12 @@
#!/bin/bash
setup_exit() {
msg "Installation complete!"
/usr/bin/sleep 10
/usr/bin/umount /mnt/repo
/usr/bin/umount /mnt/var/cache/pacman/pkg
/usr/bin/umount ${arch_target_dir}
/usr/bin/umount /var/cache/pacman/pkg
/usr/bin/umount /repo
/usr/bin/systemctl reboot
}

@ -0,0 +1,41 @@
# Requires the pacman cache and pacman package repos be mounted via NFS
test_pacman_config() {
# $1 arch-chroot target directory
arch_target_dir=""
arch_packages="${test_archiso_packages}"
if [[ -n $1 ]]; then
arch_target_dir="${1}"
arch_chroot="/usr/bin/arch-chroot ${1}"
fi
msg "Installing archzfs repo into chroot"
printf "\n%s\n%s\n" "[${test_archzfs_repo_name}]" "Server = file:///repo/\$repo/\$arch" >> ${arch_target_dir}/etc/pacman.conf
msg2 "Setting up gnupg"
run_cmd "${arch_chroot} dirmngr < /dev/null"
msg2 "Installing the signer key"
run_cmd "${arch_chroot} pacman-key -r 0EE7A126"
if [[ $? != 0 ]]; then
exit 1
fi
run_cmd "${arch_chroot} pacman-key --lsign-key 0EE7A126"
if [[ ! -n $1 ]]; then
msg2 "Installing test packages"
# Install the required packages in the image
run_cmd "${arch_chroot} pacman -Sy --noconfirm ${arch_packages}"
if [[ $? != 0 ]]; then
exit 1
fi
msg2 "Loading zfs modules"
run_cmd "modprobe zfs"
fi
}
test_pacman_pacstrap() {
msg "bootstrapping the base installation"
/usr/bin/pacstrap -c ${test_target_dir}/ROOT base base-devel ${test_chroot_packages}
}

@ -0,0 +1,8 @@
DEFAULT arch
TIMEOUT 0.1
LABEL arch
MENU LABEL Arch Linux
LINUX ../vmlinuz-linux-lts
APPEND zfs=zroot/ROOT/default rw
INITRD ../initramfs-linux-lts.img
Loading…
Cancel
Save