pkgs.uvms: init

...with some basic optional persistence and without having to rebuild
images for every app

nix run -f . pkgs.uvms -- --persist-home librewolf alacritty --run librewolf --run alacritty
This commit is contained in:
Else Someone 2026-02-27 18:26:41 +02:00
parent 22b613d157
commit 384b45bdef
15 changed files with 1155 additions and 452 deletions

View file

@ -7,29 +7,11 @@
}: }:
let let
uvmsPkgs = pkgs.callPackage ../pkgs { }; uvmsPkgs = pkgs.callPackage ../pkgs { };
waylandSock = "/run/user/1000/wayland-1";
env = {
XDG_RUNTIME_DIR = "/run/user/1000";
WAYLAND_DISPLAY = "wayland-1";
MESA_LOADER_DRIVER_OVERRIDE = "zink";
# WAYLAND_DEBUG = "1";
# WAYLAND_DEBUG_PROXY = "1";
ELECTRON_OZONE_PLATFORM_HINT = "wayland";
MOZ_ENABLE_WAYLAND = "1";
QT_QPA_PLATFORM = "wayland"; # Qt Applications
GDK_BACKEND = "wayland"; # GTK Applications
XDG_SESSION_TYPE = "wayland"; # Electron Applications
SDL_VIDEODRIVER = "wayland";
CLUTTER_BACKEND = "wayland";
NIXOS_OZONE_WL = "1";
};
in in
{ {
imports = [ imports = [
../profiles/all.nix ../profiles/ch-runner.nix
../profiles/baseImage.nix
(modulesPath + "/profiles/minimal.nix") (modulesPath + "/profiles/minimal.nix")
]; ];
@ -41,103 +23,23 @@ in
_module.args.inputs = import ../npins; _module.args.inputs = import ../npins;
# boot.isContainer = true; # boot.isContainer = true;
# boot.initrd.enable = true;
boot.loader.grub.enable = false; boot.loader.grub.enable = false;
boot.initrd.systemd.enable = true; boot.initrd.systemd.enable = true;
services.logrotate.enable = false;
services.udisks2.enable = false;
system.tools.nixos-generate-config.enable = false;
# system.activationScripts.specialfs = lib.mkForce "";
systemd.coredump.enable = false;
# networking.firewall.enable = false;
powerManagement.enable = false;
boot.kexec.enable = false;
# console.enable = false;
# system.switch.enable = false;
# services.udev.packages = lib.mkDefault [ ];
services.resolved.enable = false;
systemd.services.generate-shutdown-ramfs.enable = lib.mkForce false;
systemd.services.systemd-remount-fs.enable = lib.mkForce false;
systemd.services.systemd-pstore.enable = lib.mkForce false;
systemd.services.lastlog2-import.enable = lib.mkForce false;
systemd.services.suid-sgid-wrappers.enable = lib.mkForce false;
fileSystems."/" = lib.mkDefault {
device = "rootfs"; # how does this work? does this assign a label to the tmpfs?
fsType = "tmpfs";
options = [ "size=20%,mode=0755" ];
neededForBoot = true;
};
boot.initrd.systemd.settings.Manager.DefaultTimeoutStartSec = 5; boot.initrd.systemd.settings.Manager.DefaultTimeoutStartSec = 5;
systemd.settings.Manager.DefaultTimeoutStopSec = 10; systemd.settings.Manager.DefaultTimeoutStopSec = 10;
networking.useNetworkd = true;
networking.nftables.enable = true;
uvms.cloud-hypervisor.enable = true; uvms.cloud-hypervisor.enable = true;
systemd.sysusers.enable = false;
services.userborn.enable = true; # nikstur it
users.mutableUsers = false;
users.groups.user = { };
users.users.user = {
isNormalUser = true;
password = "hacktheplanet!";
extraGroups = [
"video"
"render"
];
};
users.users.root.password = "hacktheplanet!";
systemd.services."suid-sgid-wrappers".serviceConfig = { systemd.services."suid-sgid-wrappers".serviceConfig = {
StandardOutput = "journal+console"; StandardOutput = "journal+console";
StandardError = "journal+console"; StandardError = "journal+console";
}; };
environment.variables = env;
systemd.globalEnvironment = env;
systemd.tmpfiles.settings."10-xdg" = {
${env.XDG_RUNTIME_DIR}.d = {
user = "user";
group = "user";
mode = "0755";
};
};
systemd.sockets."wayland-proxy" = {
listenStreams = [
waylandSock
];
socketConfig = {
SocketUser = "user";
SocketGroup = "user";
FileDescriptorName = "wayland";
};
wantedBy = [ "sockets.target" ];
partOf = [ "wayland-proxy.service" ];
};
systemd.services."wayland-proxy" = {
wantedBy = [ "default.target" ];
serviceConfig = {
User = "user";
Group = "user";
ExecStart = "${lib.getExe pkgs.wayland-proxy-virtwl} --virtio-gpu";
# ExecStart = "${lib.getExe uvmsPkgs.wl-cross-domain-proxy} --listen-fd --filter-global wp_presentation";
ExecStartPre = [
"+/run/current-system/sw/bin/chmod 0666 /dev/dri/card0 /dev/dri/renderD128"
];
StandardOutput = "journal+console";
StandardError = "journal+console";
Restart = "on-failure";
RestartSec = 5;
};
};
fonts.enableDefaultPackages = true;
systemd.services."terminal" = { systemd.services."terminal" = {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
wants = [ "wayland-proxy.service" ]; wants = [ "wayland-proxy.service" ];
after = [ "wayland-proxy.service" ]; after = [ "wayland-proxy.service" ];
environment = env;
serviceConfig = { serviceConfig = {
User = "user"; User = "user";
WorkingDirectory = "/home/user"; WorkingDirectory = "/home/user";
@ -146,11 +48,6 @@ in
StandardError = "journal+console"; StandardError = "journal+console";
}; };
}; };
boot.kernelModules = [
"drm"
"virtio_gpu"
];
hardware.graphics.enable = true;
# TODO: cmdline, kernel, initrd, fileSystems # TODO: cmdline, kernel, initrd, fileSystems
} }

3
pkgs/baseImage.nix Normal file
View file

@ -0,0 +1,3 @@
{ nixos }:
nixos ../profiles/baseImage.nix

View file

@ -0,0 +1,59 @@
{
lib,
cloud-hypervisor,
fetchFromGitHub,
rustPlatform,
enableDebug ? true,
}:
let
spectrum = builtins.fetchTree {
url = "https://spectrum-os.org/git/spectrum";
type = "git";
rev = "0f3388f0191d9a03c7bf471c269a34a79f22018b";
};
in
cloud-hypervisor.overrideAttrs (
finalAttrs: oldAttrs:
{
# Verbatim from spectrum
postUnpack = oldAttrs.postUnpack or "" + ''
unpackFile $vhost
chmod -R +w vhost
'';
vhost = fetchFromGitHub {
name = "vhost";
owner = "rust-vmm";
repo = "vhost";
rev = "vhost-user-backend-v0.20.0";
hash = "sha256-KK1+mwYQr7YkyGT9+51v7TJael9D0lle2JXfRoTqYq8=";
};
patches = oldAttrs.patches or [ ] ++ [
"${spectrum}/pkgs/cloud-hypervisor/0001-build-use-local-vhost.patch"
"${spectrum}/pkgs/cloud-hypervisor/0002-virtio-devices-add-a-GPU-device.patch"
];
vhostPatches = builtins.concatMap (
name:
lib.optionals (lib.hasSuffix ".patch" name) [ "${spectrum}/pkgs/cloud-hypervisor/vhost/${name}" ]
) (builtins.attrNames (builtins.readDir "${spectrum}/pkgs/cloud-hypervisor/vhost"));
# Verbatim copy from spectrum
postPatch = oldAttrs.postPatch or "" + ''
pushd ../vhost
for patch in $vhostPatches; do
echo applying patch $patch
patch -p1 < $patch
done
popd
'';
cargoDeps = rustPlatform.fetchCargoVendor {
inherit (finalAttrs) patches;
inherit (oldAttrs) src;
hash = "sha256-wGtsyKDg1z1QK9mJ1Q43NSjoPbm3m81p++DoD8ipIUI=";
};
}
// lib.optionalAttrs enableDebug {
buildType = "debug";
dontStrip = true;
}
)

View file

@ -4,6 +4,14 @@ let
in in
lib.makeScope newScope ( lib.makeScope newScope (
self: self:
let
callPackage =
fun: overrides:
let
result = self.callPackage fun overrides;
in
result // { override = result.__originalOverride or result.override; };
in
dirToAttrs ./. dirToAttrs ./.
[ [
( (
@ -14,9 +22,9 @@ lib.makeScope newScope (
( (
name: fpath: typ: name: fpath: typ:
if typ == "regular" then if typ == "regular" then
self.callPackage fpath { } callPackage fpath { }
else if typ == "directory" && builtins.pathExists (fpath + "/package.nix") then else if typ == "directory" && builtins.pathExists (fpath + "/package.nix") then
self.callPackage (fpath + "/package.nix") { } callPackage (fpath + "/package.nix") { }
else else
null null
) )

View file

@ -6,8 +6,7 @@
let let
inherit (lib.kernel) yes no unset; inherit (lib.kernel) yes no unset;
inherit (lib) mkForce; inherit (lib) mkForce;
in result = linux_latest.override {
linux_latest.override {
structuredExtraConfig = { structuredExtraConfig = {
BASE_SMALL = yes; BASE_SMALL = yes;
DRM_VIRTIO_GPU = yes; DRM_VIRTIO_GPU = yes;
@ -20,15 +19,19 @@ linux_latest.override {
VIRTIO_BALLOON = yes; VIRTIO_BALLOON = yes;
VIRTIO_BLK = yes; VIRTIO_BLK = yes;
VIRTIO_CONSOLE = yes; VIRTIO_CONSOLE = yes;
VIRTIO_PCI = yes; VIRTIO_FS = yes;
VIRTIO_MMIO = yes; VIRTIO_MMIO = yes;
VIRTIO_PCI = yes;
VIRTIO = yes; VIRTIO = yes;
FUSE_FS = yes;
VSOCKETS = yes; VSOCKETS = yes;
NO_HZ_IDLE = mkForce yes; NO_HZ_IDLE = mkForce yes;
NO_HZ_FULL = mkForce unset; NO_HZ_FULL = mkForce unset;
HZ_1000 = unset; HZ_1000 = unset;
HZ_250 = yes; # NixOS default: 1000 HZ_250 = yes; # NixOS default: 1000
# LSM = "lockdown,yama,loadpin,safesetid,integrity,bpf";
EXT4_FS = yes; EXT4_FS = yes;
# EXT4_USE_FOR_EXT2 = yes; # EXT4_USE_FOR_EXT2 = yes;
XFS_FS = yes; XFS_FS = yes;
@ -83,4 +86,6 @@ linux_latest.override {
FRAMEBUFFER_CONSOLE_ROTATION = mkForce unset; FRAMEBUFFER_CONSOLE_ROTATION = mkForce unset;
RC_CORE = mkForce unset; RC_CORE = mkForce unset;
}; };
} };
in
result // { __originalOverride = result.override; }

39
pkgs/mkSystemdDropin.nix Normal file
View file

@ -0,0 +1,39 @@
{
lib,
runCommand,
writeShellScriptBin,
}:
{
name,
prefix ? "10-all-",
dirs ? [
"service"
"mount"
"socket"
"timer"
"target"
],
dropinText ? null,
extraCommands ? "",
...
}@args:
runCommand "${name}-dropin"
(
lib.removeAttrs args [
"name"
]
// {
inherit dirs dropinText extraCommands;
}
)
''
set -euo pipefail
root=$out/lib/systemd/system
for dir in $dirs ; do
mkdir -p "$root/$dir".d
printf "%s" "$dropinText" > "$root/$dir.d/${prefix}${name}.conf"
done
runHook extraCommands
''

76
pkgs/uvms-guest/guest.py Normal file
View file

@ -0,0 +1,76 @@
import json
import os
import select
import socket
import subprocess
def handle_run(run: dict) -> dict:
res = {}
text = run.get("text", False)
env = {
**os.environ,
"PATH": ":".join(
os.environ.get("PATH", "").split(":") + run.get("EXTRA_PATH", [])
),
}
proc = None
try:
proc = subprocess.Popen(
req["run"]["argv"],
text=text,
env=env,
cwd="/home/user",
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
res["status"] = "exec succeeded"
except Exception as e:
res["status"] = "exec failed"
res["exception"] = repr(e)
res["pid"] = getattr(proc, "pid", None)
try:
if proc is not None:
proc.wait(0.125)
res["long_running"] = False
res["returncode"] = getattr(proc, "returncode", None)
except subprocess.TimeoutExpired:
res["long_running"] = True
return res, proc
if __name__ == "__main__":
serv = socket.fromfd(3, socket.AF_VSOCK, socket.SOCK_STREAM)
procs = []
conns = [serv]
while True:
rr, rw, xs = select.select(conns, [], [])
for con in rr:
if con is serv:
con, (cid, port) = serv.accept()
assert cid == 2, cid
conns.append(con)
continue
req = con.recv(8192)
# IDK why but I keep getting empty messages
if req == b"":
continue
try:
req = json.loads(req)
print(f"Received {req=}")
except json.JSONDecodeError as e:
print(f"Couldn't interpret {req=}: {e}")
continue
if "run" in req:
res, proc = handle_run(req["run"])
procs.append(proc)
else:
res = {"status": "unknown command"}
_, rw, _ = select.select([], [con], [])
assert rw, rw
res = json.dumps(res).encode("utf8")
print(f"Responding with {res=}")
con.send(res)

View file

@ -0,0 +1,5 @@
{
lib,
writers,
}:
writers.writePython3Bin "uvms-guest" { } ./guest.py

View file

@ -11,9 +11,11 @@
execline, execline,
s6, s6,
strace, strace,
taps,
util-linux, util-linux,
virtiofsd, virtiofsd,
taps,
baseImage,
}: }:
let let
@ -43,5 +45,12 @@ writers.writePython3Bin "uvms" { } (
STRACE = lib.getExe strace; STRACE = lib.getExe strace;
TAPS = "${lib.getExe taps}"; TAPS = "${lib.getExe taps}";
VIRTIOFSD = "${lib.getExe virtiofsd}"; VIRTIOFSD = "${lib.getExe virtiofsd}";
BASE_CONFIG = baseImage.config.system.build.ch;
SYSTEM = baseImage.config.system.build.toplevel;
SYSTEM_CLOSURE = writeClosure [
baseImage.config.system.build.toplevel
baseImage.config.system.build.ch
];
} }
) )

View file

@ -9,14 +9,18 @@
import os import os
import subprocess import subprocess
import socket import socket
import json
from argparse import ArgumentParser from argparse import ArgumentParser
from contextlib import contextmanager, closing, ExitStack from contextlib import contextmanager, closing, ExitStack
parser = ArgumentParser("supervise-vm") parser = ArgumentParser("supervise-vm")
parser.add_argument("--vm") parser.add_argument("--vm", default=None)
parser.add_argument("--prefix", default="$HOME/uvms/$VM") parser.add_argument("--prefix", default="$HOME/uvms/$VM")
parser.add_argument("--vm-config") parser.add_argument("--vm-config", default="@BASE_CONFIG@") # noqa: E501
parser.add_argument("--persist-home", action="store_true")
parser.add_argument("--run", action="append")
parser.add_argument("app", nargs="*", default=())
TOOLS_DIR = "@TOOLS@" # noqa: E501 TOOLS_DIR = "@TOOLS@" # noqa: E501
SOCKETBINDER = TOOLS_DIR + "/s6-ipcserver-socketbinder" # noqa: E501 SOCKETBINDER = TOOLS_DIR + "/s6-ipcserver-socketbinder" # noqa: E501
@ -27,12 +31,18 @@ VIRTIOFSD = "@VIRTIOFSD@" # noqa: E501
BWRAP = "@BWRAP@" # noqa: E501 BWRAP = "@BWRAP@" # noqa: E501
with open("@TOOLS_CLOSURE@", mode="r") as f: # noqa: E501 with open("@TOOLS_CLOSURE@", mode="r") as f: # noqa: E501
CLOSURE = [ TOOLS_CLOSURE = [
*(ln.rstrip() for ln in f.readlines()), *(ln.rstrip() for ln in f.readlines()),
os.path.dirname(__file__), os.path.dirname(__file__),
] ]
PASSTHRU_PATH = ":".join([TOOLS_DIR]) BASE_SYSTEM = "@SYSTEM@" # noqa: E501
with open("@SYSTEM_CLOSURE@", mode="r") as f: # noqa: E501
BASE_SYSTEM_CLOSURE = [
*(ln.rstrip() for ln in f.readlines()),
]
PASSTHRU_PATH = ":".join([TOOLS_DIR, *os.environ.get("PATH", "").split(":")])
PASSTHRU_ENV = { PASSTHRU_ENV = {
**{ **{
k: v k: v
@ -41,6 +51,7 @@ PASSTHRU_ENV = {
or k.startswith("WAYLAND") or k.startswith("WAYLAND")
or k.startswith("XDG_") or k.startswith("XDG_")
or k.startswith("DBUS_") or k.startswith("DBUS_")
or k.startswith("NIX_")
or k or k
in [ in [
"TAPS_SOCK", "TAPS_SOCK",
@ -52,6 +63,10 @@ PASSTHRU_ENV = {
def preprocess_args(args_mut): def preprocess_args(args_mut):
if not args_mut.app and args_mut.run:
args_mut.app = [*args_mut.run]
if not args_mut.vm:
args_mut.vm = args_mut.run[0]
keys = [k for k, v in args_mut._get_kwargs() if isinstance(v, str)] keys = [k for k, v in args_mut._get_kwargs() if isinstance(v, str)]
for k in keys: for k in keys:
v = getattr(args_mut, k) v = getattr(args_mut, k)
@ -86,6 +101,7 @@ class Processes:
self.vm = vm self.vm = vm
self.check = check self.check = check
self.defaults = defaults self.defaults = defaults
self.processes = []
def make_env(self): def make_env(self):
return { return {
@ -121,6 +137,7 @@ class Processes:
kwargs["pass_fds"] = kwargs.get("pass_fds", ()) kwargs["pass_fds"] = kwargs.get("pass_fds", ())
kwargs["env"] = kwargs.get("env", self.make_env()) kwargs["env"] = kwargs.get("env", self.make_env())
kwargs["cwd"] = kwargs.get("cwd", self.prefix) kwargs["cwd"] = kwargs.get("cwd", self.prefix)
kwargs["text"] = kwargs.get("text", True)
kwargs["stdin"] = kwargs.get("stdin", subprocess.DEVNULL) kwargs["stdin"] = kwargs.get("stdin", subprocess.DEVNULL)
kwargs["stdout"] = kwargs.get("stdout", subprocess.DEVNULL) kwargs["stdout"] = kwargs.get("stdout", subprocess.DEVNULL)
kwargs["stderr"] = kwargs.get("stderr", subprocess.DEVNULL) kwargs["stderr"] = kwargs.get("stderr", subprocess.DEVNULL)
@ -132,12 +149,19 @@ class Processes:
) )
if not alive_after(proc, 0.125): if not alive_after(proc, 0.125):
raise RuntimeError("Failed to start", args) raise RuntimeError("Failed to start", args)
print(f"Started {args}")
self.processes.append(proc)
yield proc yield proc
print(f"Releasing {args}")
finally: finally:
if alive_after(proc, 0.125): if subprocess.PIPE in (kwargs["stderr"], kwargs["stdout"]):
print(proc.communicate())
while alive_after(proc, 0.125):
try:
proc.terminate() proc.terminate()
if proc is not None:
proc.wait() proc.wait()
except Exception as e:
print(f"Cleanup failing: {e}")
@contextmanager @contextmanager
def bwrap( def bwrap(
@ -147,6 +171,8 @@ class Processes:
# Based on the args from # Based on the args from
# `host/rootfs/image/usr/bin/run-vmm` # `host/rootfs/image/usr/bin/run-vmm`
unshare_all=True, unshare_all=True,
uid=1000,
gid=100,
unshare_user=True, unshare_user=True,
unshare_ipc=None, unshare_ipc=None,
unshare_pid=None, unshare_pid=None,
@ -164,7 +190,7 @@ class Processes:
"/proc/sys", "/proc/sys",
"/dev/null", "/dev/null",
"/proc/kallsyms", "/proc/kallsyms",
*CLOSURE, *sorted(set([*TOOLS_CLOSURE, *BASE_SYSTEM_CLOSURE])),
), ),
ro_bind=(), ro_bind=(),
remount_ro=("/proc/fs", "/proc/irq"), remount_ro=("/proc/fs", "/proc/irq"),
@ -183,9 +209,6 @@ class Processes:
bwrap_args_sock, remote = socket.socketpair() bwrap_args_sock, remote = socket.socketpair()
remote.set_inheritable(True) remote.set_inheritable(True)
bwrap_args_f = bwrap_args_sock.makefile("w") bwrap_args_f = bwrap_args_sock.makefile("w")
with ExitStack() as cleanup:
# cleanup.enter_context(closing(bwrap_args_sock))
# cleanup.enter_context(closing(bwrap_args_f))
def print_arg(*args): def print_arg(*args):
print(*args, file=bwrap_args_f, sep="\0", end="\0") print(*args, file=bwrap_args_f, sep="\0", end="\0")
@ -194,6 +217,12 @@ class Processes:
print_arg("--unshare-all") print_arg("--unshare-all")
if unshare_user: if unshare_user:
print_arg("--unshare-user") print_arg("--unshare-user")
if uid is not None:
assert unshare_user
print_arg("--uid", uid)
if gid is not None:
assert unshare_user
print_arg("--gid", gid)
if unshare_ipc: if unshare_ipc:
print_arg("--unshare-ipc") print_arg("--unshare-ipc")
if unshare_pid: if unshare_pid:
@ -214,12 +243,15 @@ class Processes:
print_arg("--proc", proc) print_arg("--proc", proc)
for p in bind: for p in bind:
assert isinstance(p, (str, tuple)), p
p1, p2 = (p, p) if isinstance(p, str) else p p1, p2 = (p, p) if isinstance(p, str) else p
print_arg("--bind", p1, p2) print_arg("--bind", p1, p2)
for p in (*ro_bind, *ro_bind_implicit): for p in (*ro_bind, *ro_bind_implicit):
assert isinstance(p, (str, tuple)), p
p1, p2 = (p, p) if isinstance(p, str) else p p1, p2 = (p, p) if isinstance(p, str) else p
print_arg("--ro-bind", p1, p2) print_arg("--ro-bind", p1, p2)
for p in (*dev_bind, *dev_bind_implicit): for p in (*dev_bind, *dev_bind_implicit):
assert isinstance(p, (str, tuple)), p
p1, p2 = (p, p) if isinstance(p, str) else p p1, p2 = (p, p) if isinstance(p, str) else p
print_arg("--dev-bind", p1, p2) print_arg("--dev-bind", p1, p2)
for p in (*tmpfs, *tmpfs_implicit): for p in (*tmpfs, *tmpfs_implicit):
@ -230,11 +262,13 @@ class Processes:
bwrap_args_f.flush() bwrap_args_f.flush()
try:
with ExitStack() as proc_es:
with ExitStack() as es: with ExitStack() as es:
es.enter_context(closing(remote)) es.enter_context(closing(remote))
es.enter_context(closing(bwrap_args_sock)) es.enter_context(closing(bwrap_args_sock))
es.enter_context(closing(bwrap_args_f)) es.enter_context(closing(bwrap_args_f))
proc = cleanup.enter_context( proc = proc_es.enter_context(
self.popen( self.popen(
"bwrap", "bwrap",
"--args", "--args",
@ -246,10 +280,11 @@ class Processes:
) )
) )
yield proc yield proc
finally:
assert proc.returncode is not None, proc
@contextmanager @contextmanager
def run_ch(self): def run_ch(self):
try:
# s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) # s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
# s.set_inheritable(True) # s.set_inheritable(True)
# s.setblocking(True) # s.setblocking(True)
@ -266,40 +301,36 @@ class Processes:
"fd=0", "fd=0",
# f"fd={s.fileno()}" # f"fd={s.fileno()}"
] ]
needs_cleanup = False cleanup_paths = [
with self.bwrap(
*args,
bind=[self.prefix],
# Probably just need the path to vmlinux
ro_bind=["/nix/store"], # I give up
unshare_net=False,
shell=False,
stderr=None,
# pass_fds=(s.fileno(),)
) as proc:
# s.close()
assert alive_after(proc, 0.125)
if not os.path.exists(self.prefix + "/vmm.sock"):
raise RuntimeError(
f"{self.prefix}/vmm.sock should exist by now",
)
needs_cleanup = True
if proc.returncode is not None:
raise RuntimeError("CH exited early")
yield proc
finally:
unlink_paths = (
[
self.prefix + "/vmm.sock", self.prefix + "/vmm.sock",
self.prefix + "/vmm.sock.lock", self.prefix + "/vmm.sock.lock",
self.prefix + "/vsock.sock", self.prefix + "/vsock.sock",
] ]
if needs_cleanup new_paths = [p for p in cleanup_paths if not os.path.exists(p)]
else [] old_paths = [p for p in cleanup_paths if p not in new_paths]
with ExitStack() as cleanup:
cleanup.enter_context(removing(*new_paths))
proc = cleanup.enter_context(
self.bwrap(
*args,
bind=[self.prefix],
# Probably just need the path to vmlinux
# ro_bind=["/nix/store"], # I give up
unshare_net=False,
shell=False,
# pass_fds=(s.fileno(),)
) )
for p in unlink_paths: )
if os.path.exists(p): # s.close()
os.remove(p) cleanup.enter_context(removing(*old_paths))
assert alive_after(proc, 1.0), proc
if not os.path.exists(self.prefix + "/vmm.sock"):
raise RuntimeError(
f"{self.prefix}/vmm.sock should exist by now",
)
if proc.returncode is not None:
raise RuntimeError("CH exited early")
yield proc
@contextmanager @contextmanager
def start_gpu( def start_gpu(
@ -330,7 +361,7 @@ class Processes:
with self.popen( with self.popen(
*args, *args,
stderr=None, stderr=None,
) as proc, removing(sock_path): ) as proc, removing(sock_path, sock_path + ".lock"):
yield proc, sock_path yield proc, sock_path
@contextmanager @contextmanager
@ -338,9 +369,9 @@ class Processes:
self, self,
root_dir, root_dir,
tag, tag,
ro=False, ro=True,
subdirs=None, subdirs=None,
extra_flags=("--posix-acl",), extra_flags=("--posix-acl", "--xattr"),
): ):
assert os.path.exists(root_dir) assert os.path.exists(root_dir)
@ -351,20 +382,16 @@ class Processes:
# s.setblocking(True) # s.setblocking(True)
# s.set_inheritable(True) # s.set_inheritable(True)
def rm_sock():
if os.path.exists(sock_path):
os.remove(sock_path)
with ExitStack() as cleanup: # noqa: F841 with ExitStack() as cleanup: # noqa: F841
# s.bind(sock_path.encode("utf8")) # s.bind(sock_path.encode("utf8"))
# cleanup.enter_context(closing(s)) # cleanup.enter_context(closing(s))
cleanup.enter_context(defer(rm_sock)) cleanup.enter_context(removing(sock_path, sock_path + ".pid"))
args = [ args = [
# If using bwrap(): # If using bwrap():
# "--argv0", "virtiofsd", # "--argv0", "virtiofsd",
# "--uid", "1000", # "--uid", "1000",
# "--gid", "1000", # "--gid", "100",
# "--", # "--",
"unshare", "unshare",
"-rUm", "-rUm",
@ -372,7 +399,7 @@ class Processes:
"--map-user", "--map-user",
"1000", "1000",
"--map-group", "--map-group",
"1000", "100",
VIRTIOFSD, VIRTIOFSD,
"--shared-dir", "--shared-dir",
root_dir, root_dir,
@ -396,6 +423,8 @@ class Processes:
# if subdirs is not None # if subdirs is not None
# else [root_dir], # else [root_dir],
# "pass_fds": (2, s.fileno()), # "pass_fds": (2, s.fileno()),
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
} }
try: try:
with self.popen(*args, **kwargs) as p: with self.popen(*args, **kwargs) as p:
@ -423,20 +452,43 @@ def removing(*paths):
os.remove(p) os.remove(p)
if __name__ == "__main__": def connect_ch_vsock(
args, args_next = parser.parse_known_args() vsock_sock_path,
preprocess_args(args) port: int,
type=socket.SOCK_STREAM,
blocking=True,
) -> socket.socket:
s = socket.socket(socket.AF_UNIX, type, 0)
s.setblocking(blocking)
s.connect(vsock_sock_path)
s.send(b"CONNECT %d\n" % port)
return s
@contextmanager
def listen_ch_vsock(
vsock_sock_path,
port: int,
type=socket.SOCK_STREAM,
blocking=True,
) -> socket.socket:
listen_path = vsock_sock_path + "_%d" % port
s = socket.socket(socket.AF_UNIX, type, 0)
s.setblocking(blocking)
s.bind(listen_path)
s.listen()
try:
yield s
finally:
os.remove(listen_path)
def main(args, args_next, cleanup, ps):
send_dir = PASSTHRU_ENV["HOME"] + f"/send/{args.vm}" send_dir = PASSTHRU_ENV["HOME"] + f"/send/{args.vm}"
os.makedirs(send_dir, exist_ok=True) os.makedirs(send_dir, exist_ok=True)
os.makedirs(args.prefix, exist_ok=True) os.makedirs(args.prefix, exist_ok=True)
os.makedirs(args.prefix + "/pts", exist_ok=True)
ps = Processes(
prefix=args.prefix,
vm=args.vm,
)
ch_remote = [ ch_remote = [
"ch-remote", "ch-remote",
@ -444,18 +496,83 @@ if __name__ == "__main__":
args.prefix + "/vmm.sock", args.prefix + "/vmm.sock",
] ]
with ExitStack() as cleanup: with open(args.vm_config) as f:
config = json.load(f)
vfsd, vfsd_path = cleanup.enter_context( app_paths = []
for a in args.app:
out_path = ps.exec(
"nix-build",
"<nixpkgs>",
"-A",
a,
"--no-out-link",
capture_output=True,
text=True,
).stdout.strip()
assert out_path.startswith("/nix/store/")
app_paths.append(out_path)
apps_closure = ps.exec( # noqa: F841
"nix-store",
"-qR",
*app_paths,
capture_output=True,
text=True,
).stdout.split()
ready_sock = cleanup.enter_context(
listen_ch_vsock(ps.prefix + "/vsock.sock", 8888),
)
virtiofs_socks = []
_, sock_path = cleanup.enter_context(
ps.start_virtiofsd( ps.start_virtiofsd(
send_dir, send_dir,
tag="send", tag="send",
ro=False,
) )
) )
virtiofs_socks.append(("send", sock_path))
_, sock_path = cleanup.enter_context(
ps.start_virtiofsd(
"/nix/store",
subdirs=apps_closure,
tag="apps",
)
)
virtiofs_socks.append(("apps", sock_path))
_, sock_path = cleanup.enter_context(
ps.start_virtiofsd(
"/nix/store",
subdirs=BASE_SYSTEM_CLOSURE,
tag="system",
)
)
virtiofs_socks.append(("system", sock_path))
if args.persist_home:
os.makedirs(args.prefix + "/home", exist_ok=True)
_, sock_path = cleanup.enter_context(
ps.start_virtiofsd(
args.prefix + "/home",
subdirs=BASE_SYSTEM_CLOSURE,
tag="home",
ro=False,
)
)
virtiofs_socks.append(("home", sock_path))
config["payload"]["cmdline"] += " uvms.persist-home=1"
gpud, gpud_path = cleanup.enter_context(ps.start_gpu()) gpud, gpud_path = cleanup.enter_context(ps.start_gpu())
ch = cleanup.enter_context(ps.run_ch()) ch = cleanup.enter_context(ps.run_ch())
ps.exec(*ch_remote, "create", args.vm_config)
ps.exec(
*ch_remote,
"create",
input=json.dumps(config),
text=True,
)
ps.exec( ps.exec(
TAPS, TAPS,
"pass", "pass",
@ -464,11 +581,85 @@ if __name__ == "__main__":
"id=wan,fd=3,mac=00:00:00:00:00:01", "id=wan,fd=3,mac=00:00:00:00:00:01",
) )
ps.exec(*ch_remote, "add-fs", f"tag=send,socket={vfsd_path},id=send") # TODO: add-fs apps closure separately
for tag, sock_path in virtiofs_socks:
ps.exec(*ch_remote, "add-fs", f"tag={tag},socket={sock_path},id={tag}")
ps.exec(*ch_remote, "add-gpu", f"socket={gpud_path}") ps.exec(*ch_remote, "add-gpu", f"socket={gpud_path}")
ps.exec(*ch_remote, "boot") ps.exec(*ch_remote, "boot")
ps.exec(*ch_remote, "info") ps.exec(*ch_remote, "info")
with ready_sock:
ready_sock.settimeout(16.0)
try:
con, _ = ready_sock.accept()
except: # noqa: E722
print(
"CH didn't try connecting to the readiness notification socket"
) # noqa: E501
else:
with con:
msg = con.recv(128)
assert msg.startswith(b"READY=1"), msg
with connect_ch_vsock(ps.prefix + "/vsock.sock", 24601) as guest:
for r in args.run:
try:
guest.send(
json.dumps(
{
"run": {
"argv": [r],
"EXTRA_PATH": [
f"{a}/bin" for a in app_paths
], # noqa: E501
}
}
).encode("utf8")
)
res = guest.recv(8192)
try:
res = json.loads(guest.recv(8192))
except json.JSONDecodeError as e:
print(f"Couldn't interpret --run {r} response: {e} {res}")
continue
adverb = (
"Successfully"
if res["status"] == "exec succeeded"
else "Failed to" # noqa: E501
)
print(f"{adverb} --run {r}: {res}")
except Exception as e:
print(f"Couldn't --run {r}: {repr(e)}")
try: try:
ch.wait() ch.wait()
except KeyboardInterrupt: except KeyboardInterrupt:
pass pass
if __name__ == "__main__":
args, args_next = parser.parse_known_args()
preprocess_args(args)
ps = Processes(
prefix=args.prefix,
vm=args.vm,
)
try:
with ExitStack() as cleanup:
main(args, args_next, cleanup, ps)
finally:
for p in ps.processes:
if p.returncode is not None:
continue
try:
print(f"Cleanup failed. Re-trying the killing of {p}")
p.terminate()
except: # noqa: E722
pass
for p in ps.processes:
if p.returncode is not None:
continue
try:
p.wait()
except: # noqa: E722
pass

407
profiles/baseImage.nix Normal file
View file

@ -0,0 +1,407 @@
{
lib,
config,
modulesPath,
pkgs,
...
}:
let
inherit (lib) mkOption types concatStringsSep;
jsonType = (pkgs.formats.json { }).type;
inherit (config.system.build) initialRamdisk;
inherit (config.system.boot.loader) initrdFile;
inherit (config.boot.kernelPackages) kernel;
kernelTarget = pkgs.stdenv.hostPlatform.linux-kernel.target;
uvmsPkgs = pkgs.callPackage ../pkgs { };
waylandSock = "/run/user/1000/wayland-1";
env = {
XDG_RUNTIME_DIR = "/run/user/1000";
WAYLAND_DISPLAY = "wayland-1";
# MESA_LOADER_DRIVER_OVERRIDE = "zink";
ELECTRON_OZONE_PLATFORM_HINT = "wayland";
MOZ_ENABLE_WAYLAND = "1";
QT_QPA_PLATFORM = "wayland"; # Qt Applications
GDK_BACKEND = "wayland"; # GTK Applications
XDG_SESSION_TYPE = "wayland"; # Electron Applications
SDL_VIDEODRIVER = "wayland";
CLUTTER_BACKEND = "wayland";
NIXOS_OZONE_WL = "1";
};
in
{
imports = [
(modulesPath + "/profiles/minimal.nix")
./debug-closure.nix
./minimal.nix
./on-failure.nix
];
config = {
some.failure-handler.enable = true;
hardware.graphics.enable = true;
# boot.kernelPackages = pkgs.linuxPackagesFor uvmsPkgs.linux-uvm;
# boot.isContainer = true;
boot.initrd.kernelModules = [
"drm"
"virtio_blk"
"virtiofs"
"virtio_gpu"
"virtio_mmio"
"virtio_pci"
"overlay"
];
boot.kernelModules = [
"drm"
"erofs"
"overlay"
"virtio_blk"
"virtiofs"
"virtio_gpu"
"virtio_mmio"
"virtio_pci"
];
boot.initrd.systemd.initrdBin = [
pkgs.fuse
pkgs.fuse3
];
fileSystems = {
"/" = lib.mkDefault {
device = "rootfs"; # how does this work? does this assign a label to the tmpfs?
fsType = "tmpfs";
options = [ "size=20%,mode=0755" ];
neededForBoot = true;
};
"/nix/store" = {
fsType = "overlay";
overlay.lowerdir = [
"/nix/.ro-stores/system"
"/nix/.ro-stores/apps"
];
neededForBoot = true;
};
"/nix/.ro-stores/system" = {
device = "system";
fsType = "virtiofs";
options = [
"defaults"
"ro"
"x-systemd.requires=systemd-modules-load.service"
];
neededForBoot = true;
};
"/nix/.ro-stores/apps" = {
device = "apps";
fsType = "virtiofs";
options = [
"defaults"
"ro"
"x-systemd.requires=systemd-modules-load.service"
];
neededForBoot = true;
};
};
systemd.mounts = [
{
type = "virtiofs";
where = "/home/user";
what = "home";
after = [ "systemd-modules-load.service" ];
wantedBy = [ "local-fs.target" ];
before = [ "local-fs.target" ];
requires = [ "systemd-modules-load.service" ];
options = lib.concatStringsSep "," [
"defaults"
"rw"
"X-mount.owner=1000"
"X-mount.group=100"
];
unitConfig = {
ConditionKernelCommandLine = "uvms.persist-home=1";
};
}
{
type = "virtiofs";
where = "/home/user/send";
what = "send";
wants = [
"home-user.mount"
"-.mount"
];
after = [
"systemd-modules-load.service"
"home-user.mount"
"-.mount"
];
wantedBy = [ "local-fs.target" ];
before = [ "local-fs.target" ];
options = lib.concatStringsSep "," [
"defaults"
"rw"
"X-mount.owner=1000"
"X-mount.group=100"
];
unitConfig = {
DefaultDependencies = false;
};
}
];
# systemd.services."mount-home-user-send" = {
# wants = [ "home-user.mount" ];
# after = [
# "systemd-modules-load.service"
# "home-user.mount"
# "-.mount"
# ];
# wantedBy = [ "local-fs.target" ];
# before = [ "local-fs.target" ];
# unitConfig = {
# DefaultDependencies = false;
# };
# environment.PATH = lib.mkForce (
# lib.makeBinPath [
# pkgs.fuse
# pkgs.fuse3
# pkgs.coreutils
# ]
# );
# serviceConfig = {
# Type = "oneshot";
# RemainsAfterExit = true;
# ExecStart = [
# "/run/current-system/sw/bin/mkdir -p /home/user/send"
# "/run/current-system/sw/bin/chown user /home/user/send"
# "/run/current-system/sw/sbin/mount -t virtiofs -o defaults,rw send /home/user/send"
# ];
# StandardOutput = "journal+console";
# StandardError = "journal+console";
# };
# };
systemd.network.enable = true;
networking.useNetworkd = true;
networking.nftables.enable = true;
networking.useDHCP = true;
networking.nameservers = [ "1.1.1.1" ];
services.resolved.enable = lib.mkForce true;
system.activationScripts.specialfs = lib.mkForce "";
# networking.firewall.enable = false;
console.enable = false;
services.udev.packages = lib.mkDefault [ ];
systemd.services."systemd-oomd".enable = false;
users.mutableUsers = false;
users.users.root.password = "hacktheplanet!";
users.groups.users = { };
users.users.user = {
uid = 1000;
isNormalUser = true;
password = "hacktheplanet!";
extraGroups = [
"video"
"render"
"users"
"wheel"
];
};
environment.variables = env;
systemd.globalEnvironment = env;
systemd.tmpfiles.settings."10-xdg" = {
${env.XDG_RUNTIME_DIR}.d = {
user = "user";
group = "users";
mode = "0755";
};
};
systemd.sockets."wayland-proxy" = {
listenStreams = [
waylandSock
];
socketConfig = {
SocketUser = "user";
SocketGroup = "users";
FileDescriptorName = "wayland";
};
wantedBy = [ "sockets.target" ];
partOf = [ "wayland-proxy.service" ];
};
systemd.services."wayland-proxy" = {
wantedBy = [ "default.target" ];
serviceConfig = {
User = "user";
Group = "users";
ExecStart = "${lib.getExe pkgs.wayland-proxy-virtwl} --virtio-gpu";
# ExecStart = "${lib.getExe uvmsPkgs.wl-cross-domain-proxy} --listen-fd --filter-global wp_presentation";
ExecStartPre = [
"+/run/current-system/sw/bin/chmod 0666 /dev/dri/card0 /dev/dri/renderD128"
];
StandardOutput = "journal+console";
StandardError = "journal+console";
Restart = "on-failure";
RestartSec = 5;
};
};
systemd.sockets."uvms-guest" = {
wantedBy = [ "default.target" ];
listenStreams = [
"vsock::24601"
];
partOf = [ "uvms-guest.service" ];
};
systemd.services."uvms-guest" = {
serviceConfig = {
User = "user";
Group = "users";
ExecStart = "${lib.getExe uvmsPkgs.uvms-guest}";
StandardOutput = "journal+console";
StandardError = "journal+console";
Restart = "on-failure";
RestartSec = 5;
};
};
fonts.enableDefaultPackages = true;
boot.kernelParams = [
"earlyprintk=ttyS0"
"console=ttyS0"
"reboot=t"
"panic=-1"
"io.systemd.credential:vmm.notify_socket=vsock-stream:2:8888"
# "rootfstype=virtiofs"
# "root=rootstore"
];
};
options = {
system.build.ch = mkOption {
type = types.package;
default = (pkgs.formats.json { }).generate "vm.json" config.uvms.ch.settings;
};
uvms.ch.settings = mkOption {
default = { };
type = types.submodule {
freeformType = jsonType;
options = {
payload = {
cmdline = mkOption {
type = types.str;
default = concatStringsSep " " (
config.boot.kernelParams
++ [
# "init=${lib.removePrefix "/nix/store" "${config.system.build.toplevel}"}/init"
"init=${config.system.build.toplevel}/init"
]
);
defaultText = ''concatStringsSep " " ${config.boot.kernelParams}'';
};
kernel = mkOption {
type = types.str;
default = "${kernel}/${kernelTarget}";
};
initramfs = mkOption {
type = types.nullOr types.str;
default = "${initialRamdisk}/${initrdFile}";
};
};
vsock = {
cid = mkOption {
type = types.int;
default = 4;
};
socket = mkOption {
type = types.str;
default = "vsock.sock";
};
};
"api-socket" = mkOption {
type = types.str;
default = "vmm.sock";
};
"serial".mode = mkOption {
type = types.str;
default = "File";
};
"serial".file = mkOption {
type = types.nullOr types.str;
default = "serial";
};
"console".mode = mkOption {
type = types.str;
default = "Pty";
};
"console".file = mkOption {
type = types.nullOr types.str;
default = null;
};
# "watchdog" = true;
# "seccomp" = true;
disks = mkOption {
default = [ ];
type = types.listOf (
types.submodule {
freeformType = jsonType;
options = {
path = mkOption {
type = types.oneOf [
types.path
types.str
];
};
readonly = mkOption {
type = types.bool;
default = true;
};
id = mkOption { type = types.str; };
};
}
);
};
memory = mkOption {
default = { };
type = types.submodule {
freeformType = jsonType;
options = {
size = mkOption {
type = types.int;
default = 1536 * 1048576;
};
shared = mkOption {
type = types.bool;
default = true;
};
mergeable = mkOption {
type = types.bool;
default = true;
};
};
};
};
cpus = mkOption {
default = { };
type = types.submodule {
freeformType = jsonType;
options = {
boot_vcpus = mkOption {
type = types.int;
default = 4;
};
max_vcpus = mkOption {
type = types.int;
default = 4;
};
};
};
};
};
};
};
};
}

View file

@ -9,7 +9,7 @@
# but we shall begin by reproducing at least some of their work. # but we shall begin by reproducing at least some of their work.
let let
cfg = config.uvms.cloud-hypervisor; cfg = config.uvms.ch;
inherit (config.networking) hostName; inherit (config.networking) hostName;
inherit (config.debug.closure.erofs) layers; inherit (config.debug.closure.erofs) layers;
@ -48,69 +48,21 @@ let
in in
{ {
options = { options = {
uvms.cloud-hypervisor.enable = lib.mkEnableOption "Configure guest (e.g. fileSystems)"; uvms.ch.enable = lib.mkEnableOption "Configure guest (e.g. fileSystems)";
uvms.cloud-hypervisor.runner = mkOption { uvms.ch.runner = mkOption {
type = types.package; type = types.package;
description = "A naive script for running this system in cloud-hypervisor"; description = "A naive script for running this system in cloud-hypervisor";
}; };
uvms.cloud-hypervisor.debugger = mkOption { uvms.ch.debugger = mkOption {
type = types.lazyAttrsOf types.anything; type = types.lazyAttrsOf types.anything;
description = "Same but you can debug the kernel"; description = "Same but you can debug the kernel";
}; };
uvms.cloud-hypervisor.settingsFile = mkOption { uvms.ch.settingsFile = mkOption {
type = types.package; type = types.package;
default = chSettingsFile; default = chSettingsFile;
defaultText = "..."; defaultText = "...";
readOnly = true; readOnly = true;
}; };
uvms.cloud-hypervisor.settings = mkOption {
default = { };
type = types.submodule {
freeformType = (pkgs.formats.json { }).type;
options = {
payload = {
cmdline = mkOption { type = types.str; };
kernel = mkOption { type = types.str; };
initramfs = mkOption {
type = types.str;
default = "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
};
};
vsock = {
cid = mkOption {
type = types.int;
default = 4;
};
socket = mkOption {
type = types.str;
default = "vsock.sock";
};
};
"api-socket" = mkOption {
type = types.str;
default = "vmm.sock";
};
"serial".mode = mkOption {
type = types.str;
default = "File";
};
"serial".file = mkOption {
type = types.nullOr types.str;
default = "serial";
};
"console".mode = mkOption {
type = types.str;
default = "Pty";
};
"console".file = mkOption {
type = types.nullOr types.str;
default = null;
};
# "watchdog" = true;
# "seccomp" = true;
};
};
};
uvms.cloud-hypervisor.extraCmdline = lib.mkOption { uvms.cloud-hypervisor.extraCmdline = lib.mkOption {
type = lib.types.listOf lib.types.str; type = lib.types.listOf lib.types.str;
default = [ ]; default = [ ];
@ -118,44 +70,24 @@ in
uvms.cloud-hypervisor.cmdline = lib.mkOption { uvms.cloud-hypervisor.cmdline = lib.mkOption {
type = lib.types.listOf lib.types.str; type = lib.types.listOf lib.types.str;
default = [ default = [
"earlyprintk=ttyS0"
"console=ttyS0"
"reboot=t"
"panic=-1"
"init=${config.system.build.toplevel}/init"
] ]
++ config.boot.kernelParams ++ config.boot.kernelParams
++ config.uvms.cloud-hypervisor.extraCmdline; ++ config.uvms.cloud-hypervisor.extraCmdline;
}; };
}; };
imports = [ ./baseImage.nix ];
config = lib.mkMerge [ config = lib.mkMerge [
{ {
# boot.kernelPackages = pkgs.linuxPackagesFor (uvmsPkgs.linux-uvm); # boot.kernelPackages = pkgs.linuxPackagesFor (uvmsPkgs.linux-uvm);
uvms.cloud-hypervisor.settings = { uvms.ch.settings = {
payload = {
cmdline = lib.concatStringsSep " " cfg.cmdline;
kernel = "${config.boot.kernelPackages.kernel}/${pkgs.stdenv.hostPlatform.linux-kernel.target}";
};
disks = map (img: {
path = img;
readonly = true;
id = toString img.label;
}) layers;
memory = { memory = {
size = 1536 * 1048576;
shared = true;
mergeable = true;
# hotplugged_size = 512 * 1048576; # hotplugged_size = 512 * 1048576;
# hotplugd_size = 1536 * 1048576; # hotplugd_size = 1536 * 1048576;
# hotplug_method = "virtio-mem" # hotplug_method = "virtio-mem"
}; };
cpus = {
boot_vcpus = 4;
max_vcpus = 4;
};
}; };
uvms.cloud-hypervisor.debugger = pkgs.testers.runNixOSTest ( uvms.ch.debugger = pkgs.testers.runNixOSTest (
{ config, ... }: { config, ... }:
{ {
name = "test-run-${hostName}"; name = "test-run-${hostName}";
@ -265,39 +197,9 @@ in
); );
# NOTE: Used to be an even uglier bash script, but, for now, execline makes for easier comparisons against spectrum # NOTE: Used to be an even uglier bash script, but, for now, execline makes for easier comparisons against spectrum
uvms.cloud-hypervisor.runner = writeElb "run-${hostName}" '' uvms.ch.runner = writeElb "run-${hostName}" ''
${lib.getExe uvmsPkgs.uvms} --vm-config=${chSettingsFile} --vm=${hostName} ${lib.getExe uvmsPkgs.uvms} --vm-config=${chSettingsFile} --vm=${hostName}
''; '';
} }
(lib.mkIf cfg.enable {
boot.initrd.availableKernelModules = [
"erofs"
"overlay"
"virtio_mmio"
"virtio_pci"
"virtio_blk"
# "9pnet_virtio"
# "9p"
"virtiofs"
];
boot.initrd.systemd.enable = lib.mkDefault true;
fileSystems = {
"/nix/store" = {
fsType = "overlay";
overlay.lowerdir = map (img: "/nix/.ro-stores/${toString img.seq}") layers;
neededForBoot = true;
};
}
// lib.listToAttrs (
map (
img:
lib.nameValuePair "/nix/.ro-stores/${toString img.seq}" {
device = "/dev/disk/by-label/${img.label}";
neededForBoot = true;
options = [ "x-systemd.device-timeout=5" ];
}
) layers
);
})
]; ];
} }

View file

@ -15,15 +15,9 @@ let
inherit (ps) writeErofsLayers; inherit (ps) writeErofsLayers;
emptySystem = import (pkgs.path + "/nixos/lib/eval-config.nix") { emptySystem = import (pkgs.path + "/nixos/lib/eval-config.nix") {
modules = [ modules = [
(modulesPath + "/profiles/minimal.nix") ./minimal.nix
{ {
system.stateVersion = config.system.stateVersion; system.stateVersion = config.system.stateVersion;
fileSystems."/".fsType = "tmpfs";
boot.loader.grub.enable = false;
networking.hostName = "base";
networking.nftables.enable = true;
networking.useNetworkd = true;
systemd.network.enable = true;
} }
]; ];
}; };

36
profiles/minimal.nix Normal file
View file

@ -0,0 +1,36 @@
{
lib,
config,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/profiles/minimal.nix")
];
boot.loader.grub.enable = false;
boot.initrd.systemd.enable = true;
networking.useNetworkd = true;
networking.nftables.enable = config.networking.firewall.enable || config.networking.nat.enable;
fileSystems."/".fsType = lib.mkDefault "tmpfs";
networking.hostName = lib.mkDefault "base";
systemd.sysusers.enable = false;
services.userborn.enable = true; # nikstur it
nix.enable = false;
services.logrotate.enable = false;
services.udisks2.enable = false;
system.tools.nixos-generate-config.enable = false;
systemd.coredump.enable = false;
powerManagement.enable = false;
boot.kexec.enable = false;
system.switch.enable = false;
services.resolved.enable = false;
systemd.services.generate-shutdown-ramfs.enable = lib.mkForce false;
systemd.services.systemd-remount-fs.enable = lib.mkForce false;
systemd.services.systemd-pstore.enable = lib.mkForce false;
systemd.services.lastlog2-import.enable = lib.mkForce false;
# systemd.services.suid-sgid-wrappers.enable = lib.mkForce false;
}

72
profiles/on-failure.nix Normal file
View file

@ -0,0 +1,72 @@
{
lib,
config,
pkgs,
...
}:
let
cfg = config.some.failure-handler;
jobScript = pkgs.writeShellScriptBin "show-status" ''
set -euo pipefail
export PATH=${lib.getBin config.boot.initrd.systemd.package}/bin''${PATH:+:}$PATH
export PATH=${lib.getBin pkgs.util-linux}/bin''${PATH:+:}$PATH
export PATH=${lib.getBin pkgs.gnugrep}/bin''${PATH:+:}$PATH
unit="$1"
shift
systemctl status "$unit" >&2 || true
patterns=$unit$'\n'error
dmesg | grep -Fi "$patterns" || true
'';
mkSystemdDropin = pkgs.callPackage ../pkgs/mkSystemdDropin.nix { };
in
{
options.some.failure-handler = {
enable = lib.mkEnableOption "Set up show-status@.service as a default OnFailure dependency";
stage-1.enable =
lib.mkEnableOption "Set up show-status@.service as a default OnFailure dependency in initramfs/initrd"
// {
default = cfg.enable;
};
package = lib.mkOption {
type = lib.types.package;
readOnly = true;
description = "The internal package with the drop-ins";
};
};
config = {
some.failure-handler.package = mkSystemdDropin {
name = "status-on-failure";
inherit jobScript;
dropinText = ''
[Unit]
OnFailure=status@%n.service
'';
serviceText = ''
[Unit]
DefaultDependencies=no
Description=Show status for %i
[Service]
Type=oneshot
StandardOutput=journal+console
StandardError=journal+console
ExecStart=${lib.getExe jobScript} "%i"
JoinsNamespaceOf=
DelegateNamespaces=
'';
extraCommands = ''
printf "%s" "$serviceText" > "$root/status@.service"
'';
};
boot.initrd.systemd.packages = lib.optionals cfg.stage-1.enable [ cfg.package ];
boot.initrd.systemd.storePaths = lib.optionals cfg.stage-1.enable [
jobScript
pkgs.util-linux
pkgs.gnugrep
];
systemd.packages = lib.optionals cfg.enable [ cfg.package ];
};
}