pkgs.writeErofsLayers,profiles/ch-runner: MVE

$ nix-build -A examples.dummy.config.debug.closure.erofs.list | xargs
cat | xargs du -h
749M    /nix/store/bzfv5x6lycq6hzhjv6d6vlk1q8fdg9di-base0.erofs
24M     /nix/store/hp41jfq36y0mmjrzqilyh3jfsvqic3kb-nixos.erofs

$ nix run -f . examples.dummy.config.uvms.cloud-hypervisor.runner
...
<<< Welcome to NixOS 25.11pre-git (x86_64) - ttyS0 >>>

nixos login:

The definition of the `pkgs` fixpoint was moved to pkgs/default.nix.
For that, dirToAttrs was moved to lib/, imported ad hoc
This commit is contained in:
Else, Someone 2025-09-19 16:28:48 +03:00
parent 1828835a1d
commit 28d3f89ad4
12 changed files with 428 additions and 77 deletions

View file

@ -1,26 +1,7 @@
let
pkgs = import <nixpkgs> { };
inherit (pkgs) lib;
dirToAttrs =
root: patterns: f:
lib.listToAttrs (
lib.concatMap (
dirent:
let
fname = dirent.name;
typ = dirent.value;
fpath = root + "/${fname}";
doMatch =
pat:
let
match = pat fpath fname typ;
value = f match fpath typ;
in
if match == null then [ ] else [ (lib.nameValuePair match value) ];
in
(lib.take 1 (lib.concatMap (doMatch) patterns))
) (lib.attrsToList (builtins.readDir root))
);
dirToAttrs = import ./lib/dirToAttrs.nix { inherit lib; };
in
{
examples =
@ -35,23 +16,5 @@ in
name: fpath: _:
import <nixpkgs/nixos/lib/eval-config.nix> { modules = [ fpath ]; }
);
pkgs = lib.makeScope pkgs.newScope (
self:
dirToAttrs ./pkgs
[
(
path: fname: _:
lib.strings.removeSuffix ".nix" fname
)
]
(
name: fpath: typ:
if typ == "regular" then
self.callPackage fpath { }
else if typ == "directory" && builtins.pathExists (fpath + "/package.nix") then
self.callPackage (fpath + "/package.nix") { }
else
null
)
);
pkgs = pkgs.callPackage ./pkgs { };
}

View file

@ -2,21 +2,37 @@
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports = [
../profiles/all.nix
(modulesPath + "/profiles/minimal.nix")
];
system.stateVersion = "25.11";
# following microvm.nix:
boot.loader.grub.enable = false;
boot.initrd.systemd.enable = true;
fileSystems."/" = lib.mkDefault {
device = "rootfs"; # how does this work? does this assign a label to the tmpfs?
fsType = "tmpfs";
options = [ "size=10%,mode=0755" ];
options = [ "size=20%,mode=0755" ];
neededForBoot = true;
};
boot.initrd.systemd.emergencyAccess = true;
boot.initrd.systemd.settings.Manager.DefaultTimeoutStartSec = 15;
systemd.settings.Manager.DefaultTimeoutStartSec = 15;
networking.useNetworkd = true;
networking.nftables.enable = true;
uvms.cloud-hypervisor.enable = true;
users.mutableUsers = false;
users.users.root.password = "hacktheplanet!";
# TODO: cmdline, kernel, initrd, fileSystems
}

20
lib/dirToAttrs.nix Normal file
View file

@ -0,0 +1,20 @@
{ lib }:
root: patterns: f:
lib.listToAttrs (
lib.concatMap (
dirent:
let
fname = dirent.name;
typ = dirent.value;
fpath = root + "/${fname}";
doMatch =
pat:
let
match = pat fpath fname typ;
value = f match fpath typ;
in
if match == null then [ ] else [ (lib.nameValuePair match value) ];
in
(lib.take 1 (lib.concatMap (doMatch) patterns))
) (lib.attrsToList (builtins.readDir root))
)

7
lib/mkIfMicrovmGuest.nix Normal file
View file

@ -0,0 +1,7 @@
{
lib,
options,
config,
}:
def:
lib.optionalAttrs (options ? "microvm"."guest"."enable") (lib.mkIf config.microvm.guest.enable def)

23
pkgs/default.nix Normal file
View file

@ -0,0 +1,23 @@
{ lib, newScope }:
let
dirToAttrs = import ../lib/dirToAttrs.nix { inherit lib; };
in
lib.makeScope newScope (
self:
dirToAttrs ./.
[
(
path: fname: _:
if fname == "default.nix" then null else lib.strings.removeSuffix ".nix" fname
)
]
(
name: fpath: typ:
if typ == "regular" then
self.callPackage fpath { }
else if typ == "directory" && builtins.pathExists (fpath + "/package.nix") then
self.callPackage (fpath + "/package.nix") { }
else
null
)
)

View file

@ -0,0 +1,84 @@
{
lib,
erofs-utils,
runCommand,
python3Minimal,
bubblewrap,
writeClosure,
}:
{
name ? "${label}.erofs",
label ? "image",
roots ? [ ],
rootsExclude ? [ ],
erofsArgs ? [
"--mount-point=/nix/store"
"-z"
"lz4hc,level=3"
"-T0"
"-L"
label
"--all-root"
],
...
}@attrs:
let
old = writeClosure rootsExclude;
new = writeClosure roots;
attrs' = lib.removeAttrs [ "name" "roots" "rootsExclude" "erofsArgs" ] attrs;
in
runCommand name
(
attrs
// {
__structuredAttrs = true;
inherit name label;
unsafeDiscardReferences.out = true;
nativeBuildInputs = [
erofs-utils
bubblewrap
python3Minimal
]
++ attrs.nativeBuildInputs or [ ];
inherit old new erofsArgs;
}
)
''
mkdir -p store
python3 << EOF
import hashlib, json, os, subprocess
from uuid import UUID
def uuid_from_node(node: int = 0) -> UUID:
return UUID(fields=(*((0,) * 5), node))
out_path = os.environ.get("out")
default_uuid = uuid_from_node((2**48 - 1) & int.from_bytes(hashlib.sha256(out_path.encode("ascii")).digest()))
with open(os.environ.get("NIX_ATTRS_JSON_FILE"), mode="r") as f:
attrs = json.load(f)
name = attrs["name"]
with open(attrs["old"], mode="r") as f:
old_paths = set(ln.rstrip() for ln in f)
with open(attrs["new"], mode="r") as f:
new_paths = [ln.rstrip() for ln in f]
new_paths = [p for p in new_paths if p not in old_paths]
erofs_args = attrs.get("erofsArgs", [])
if not any(a.startswith("-U") for a in erofs_args):
erofs_args.extend(["-U", str(default_uuid)])
subprocess.run(
check=True,
env=os.environ,
args=[
"bwrap",
"--dev-bind", "/", "/",
"--chdir", os.getcwd(),
*[a for p in new_paths for a in [f"--ro-bind", p, f"{os.getcwd()}/store/{os.path.basename(p)}"]],
"--",
"mkfs.erofs", *attrs.get("erofsArgs", []), out_path, "store",
],
)
EOF
''

View file

@ -0,0 +1,35 @@
{ lib, writeErofs }:
{
layers,
labelLower ? "base",
labelUpper ? "image",
}:
let
inherit (lib.lists) toList;
nLayers = builtins.length layers;
mkLabel = seq: if seq + 1 < nLayers then labelLower + toString seq else labelUpper;
f =
{
roots ? [ ],
layers ? [ ],
seq ? 0,
}:
newRoots: {
seq = seq + 1;
roots = roots ++ (toList newRoots);
layers = layers ++ [
(writeErofs ({
roots = toList newRoots;
rootsExclude = roots;
passthru = {
inherit seq;
label = mkLabel seq;
};
label = mkLabel seq;
}))
];
};
acc = lib.foldl f { } layers;
in
acc.layers

114
profiles/ch-runner.nix Normal file
View file

@ -0,0 +1,114 @@
{
config,
lib,
pkgs,
...
}:
# It is not the intent to stick to the microvm.nix-like static interface,
# but we shall begin by reproducing at least some of their work.
let
cfg = config.uvms.cloud-hypervisor;
inherit (config.debug.closure.erofs) layers;
in
{
options = {
uvms.cloud-hypervisor.enable = lib.mkEnableOption "Configure guest (e.g. fileSystems)";
uvms.cloud-hypervisor.runner = lib.mkOption {
type = lib.types.package;
description = "A naive script for running this system in cloud-hypervisor";
};
uvms.cloud-hypervisor.extraArgv = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
};
uvms.cloud-hypervisor.argv = lib.mkOption {
type = lib.types.listOf lib.types.str;
};
uvms.cloud-hypervisor.extraCmdline = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
};
uvms.cloud-hypervisor.cmdline = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [
"earlyprintk=ttyS0"
"console=ttyS0"
"reboot=t"
"panic=-1"
"init=${config.system.build.toplevel}/init"
]
++ config.boot.kernelParams
++ config.uvms.cloud-hypervisor.extraCmdline;
};
};
config = lib.mkMerge [
{
uvms.cloud-hypervisor.argv = lib.mkBefore (
[
(lib.getExe pkgs.cloud-hypervisor)
"--cmdline=${lib.concatStringsSep " " cfg.cmdline}"
"--kernel=${config.boot.kernelPackages.kernel}/${pkgs.stdenv.hostPlatform.linux-kernel.target}"
"--initramfs=${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}"
"--vsock=cid=4,socket=CONNECT.sock"
"--api-socket=ch.sock"
"--serial=tty"
"--console=null"
"--watchdog"
"--seccomp=true"
]
++ cfg.extraArgv
);
uvms.cloud-hypervisor.runner = pkgs.writeShellScriptBin "run-${config.networking.hostName}" ''
set -euo pipefail
GUESTNAME=${config.networking.hostName}
args=(
${lib.concatMapStringsSep "\n" lib.escapeShellArg cfg.argv}
)
mkdir -p "$HOME/uvms/$GUESTNAME"
cd "$HOME/uvms/$GUESTNAME"
cleanup() {
rm "$HOME/uvms/$GUESTNAME"/{ch,CONNECT}.sock
}
exec -a "uuvm/$GUESTNAME" "''${args[@]}"
'';
}
(lib.mkIf cfg.enable {
boot.initrd.availableKernelModules = [
"erofs"
"overlay"
"virtio_mmio"
"virtio_pci"
"virtio_blk"
# "9pnet_virtio"
# "9p"
"virtiofs"
];
boot.initrd.systemd.enable = true;
fileSystems = {
"/nix/store" = {
fsType = "overlay";
overlay.lowerdir = map (img: "/nix/.ro-stores/${toString img.seq}") layers;
neededForBoot = true;
};
}
// lib.listToAttrs (
map (
img:
lib.nameValuePair "/nix/.ro-stores/${toString img.seq}" {
device = "/dev/disk/by-label/${img.label}";
neededForBoot = true;
options = [ "x-systemd.device-timeout=5" ];
}
) layers
);
uvms.cloud-hypervisor.argv = [
"--memory=size=1536M,hotplug_size=1536M,hotplugged_size=512M,hotplug_method=virtio-mem,mergeable=on,shared=on"
"--cpus=boot=4"
"--disk"
]
++ map (img: "path=${img},readonly=true,id=${toString img.label}") layers;
})
];
}

View file

@ -0,0 +1,85 @@
{
config,
lib,
pkgs,
modulesPath,
...
}:
let
cfg = config.debug.closure;
inherit (lib.types) package;
inherit (config.system.build) toplevel;
ps = pkgs.callPackage ../pkgs { };
inherit (ps) writeErofsLayers;
emptySystem = import (pkgs.path + "/nixos/lib/eval-config.nix") {
modules = [
(modulesPath + "/profiles/minimal.nix")
{
system.stateVersion = config.system.stateVersion;
fileSystems."/".fsType = "tmpfs";
boot.loader.grub.enable = false;
networking.hostName = "base";
networking.nftables.enable = true;
networking.useNetworkd = true;
systemd.network.enable = true;
}
];
};
in
{
options.debug.closure = {
info = lib.mkOption {
type = package;
description = "closureInfo { rootPaths = [ toplevel ]; }";
};
write = lib.mkOption {
type = package;
description = "writeClosure [ toplevel ]";
};
extraLayers = lib.mkOption {
type = lib.types.listOf package;
description = "Roots from which to generate `erofs.layers`";
defaultText = ''
[
(nixosSystem { modules = [ ]; }).config.system.build.toplevel
# implicitly appended: config.system.build.toplevel
]
'';
};
erofs.layers = lib.mkOption {
type = lib.types.listOf package;
description = "writeErofs [ ... ]";
readOnly = true;
};
erofs.list = lib.mkOption {
type = package;
description = "writeClosure [ (writeErofs [ ... ])... ]";
};
};
config.debug.closure = {
info = pkgs.closureInfo { rootPaths = [ toplevel ]; };
write = pkgs.writeClosure [ toplevel ];
erofs.list =
pkgs.runCommand "layers"
{
__structuredAttrs = true;
inherit (cfg.erofs) layers;
}
''
printf "%s\n" "''${layers[@]}" > "$out"
'';
extraLayers = lib.mkMerge [
(lib.mkBefore [ emptySystem.config.system.build.toplevel ])
# (lib.mkAfter [config.system.build.toplevel])
];
erofs.layers = writeErofsLayers {
layers = cfg.extraLayers ++ [
toplevel
];
labelUpper = config.networking.hostName;
};
};
}

View file

@ -6,7 +6,7 @@
# zswap is said to be more reliable than zram
boot.kernelParams = lib.optionals (!config.zramSwap.enable) [ "zswap.enabled=1" ];
}
(lib.optionalAttrs (options ? microvm) {
(lib.optionalAttrs (options ? "microvm" && config.microvm.guest.enable) {
microvm = {
hypervisor = lib.mkDefault "cloud-hypervisor";
graphics.enable = lib.mkDefault true;

View file

@ -1,15 +1,20 @@
{
options,
config,
lib,
pkgs,
...
}:
let
mkIfGuest = import ../lib/mkIfMicrovmGuest.nix { inherit options config lib; };
in
{
imports = [
./vsock-connect-guest.nix
./uvms-users.nix
];
config = lib.optionalAttrs (options ? microvm) {
config = lib.mkMerge [
(mkIfGuest {
microvm = {
hypervisor = "cloud-hypervisor";
volumes = [
@ -19,9 +24,6 @@
size = 1024;
}
];
cloud-hypervisor.extraArgs = [
"--api-socket=ch.sock"
];
};
swapDevices = [
{
@ -29,19 +31,12 @@
size = 768;
}
];
})
{
boot.kernelParams = [ "zswap.enabled=1" ];
zramSwap.enable = false;
systemd.services."microvm@".serviceConfig.ExecStartPost =
pkgs.writeShellScript "microvm-fix-umask" ''
if [[ -e CONNECT.vsock ]] ; then
chmod g+r CONNECT.vsock
fi
'';
systemd.tmpfiles.settings."10-muvm" = {
"/var/lib/microvms/*/CONNECT.sock".z.mode = "660";
};
};
}
];
}

View file

@ -1,18 +1,27 @@
{ options, lib, ... }:
{
config =
lib.optionalAttrs (options ? "microvm") {
options,
config,
lib,
...
}:
let
mkIfGuest = import ../lib/mkIfMicrovmGuest.nix { inherit options config lib; };
in
{
config = lib.mkMerge [
(mkIfGuest {
microvm.cloud-hypervisor.extraArgs = [
"--vsock"
"cid=4,socket=CONNECT.sock"
];
}
// {
})
{
# Somehow, sshd calls to PAM with PAM_RHOST="UNKNOWN",
# prompting a slow DNS look-up each time...
#
# https://mastodon.acm.org/@nobody/115108458851355328
# https://github.com/linux-pam/linux-pam/issues/885#issuecomment-3030698895
networking.hosts."100::" = [ "UNKNOWN" ];
};
}
];
}