nixos/incus: add support for soft daemon restart
This is a feature supported out of the box by upstream and allows the
incusd service to be restarted without impacting running
instances. While this does give up a bit of reproducibility, qemu and
lxc for example, there are clear benefits in allowing the host to
apply updates without impacting instances.
Modeled after the zabbly implementation: 2a67c3e260/systemd/incus-startup.service
This will now be the default.
This commit is contained in:
parent
e049102f0f
commit
7d5b333dcd
@ -105,6 +105,37 @@ let
|
||||
path = "${pkgs.OVMFFull.fd}/FV/${ovmf-prefix}_VARS.fd";
|
||||
}
|
||||
];
|
||||
|
||||
environment = lib.mkMerge [
|
||||
{
|
||||
INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
|
||||
INCUS_OVMF_PATH = ovmf;
|
||||
INCUS_USBIDS_PATH = "${pkgs.hwdata}/share/hwdata/usb.ids";
|
||||
PATH = lib.mkForce serverBinPath;
|
||||
}
|
||||
(lib.mkIf (cfg.ui.enable) { "INCUS_UI" = cfg.ui.package; })
|
||||
];
|
||||
|
||||
incus-startup = pkgs.writeShellScript "incus-startup" ''
|
||||
case "$1" in
|
||||
start)
|
||||
systemctl is-active incus.service -q && exit 0
|
||||
exec incusd activateifneeded
|
||||
;;
|
||||
|
||||
stop)
|
||||
systemctl is-active incus.service -q || exit 0
|
||||
exec incusd shutdown
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "unknown argument \`$1'" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
'';
|
||||
in
|
||||
{
|
||||
meta = {
|
||||
@ -137,6 +168,14 @@ in
|
||||
description = "The incus client package to use. This package is added to PATH.";
|
||||
};
|
||||
|
||||
softDaemonRestart = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Allow for incus.service to be stopped without affecting running instances.
|
||||
'';
|
||||
};
|
||||
|
||||
preseed = lib.mkOption {
|
||||
type = lib.types.nullOr (lib.types.submodule { freeformType = preseedFormat.type; });
|
||||
|
||||
@ -282,6 +321,8 @@ in
|
||||
systemd.services.incus = {
|
||||
description = "Incus Container and Virtual Machine Management Daemon";
|
||||
|
||||
inherit environment;
|
||||
|
||||
wantedBy = lib.mkIf (!cfg.socketActivation) [ "multi-user.target" ];
|
||||
after = [
|
||||
"network-online.target"
|
||||
@ -296,20 +337,10 @@ in
|
||||
|
||||
wants = [ "network-online.target" ];
|
||||
|
||||
environment = lib.mkMerge [
|
||||
{
|
||||
INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
|
||||
INCUS_OVMF_PATH = ovmf;
|
||||
INCUS_USBIDS_PATH = "${pkgs.hwdata}/share/hwdata/usb.ids";
|
||||
PATH = lib.mkForce serverBinPath;
|
||||
}
|
||||
(lib.mkIf (cfg.ui.enable) { "INCUS_UI" = cfg.ui.package; })
|
||||
];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/incusd --group incus-admin";
|
||||
ExecStartPost = "${cfg.package}/bin/incusd waitready --timeout=${cfg.startTimeout}";
|
||||
ExecStop = "${cfg.package}/bin/incus admin shutdown";
|
||||
ExecStop = lib.optionalString (!cfg.softDaemonRestart) "${cfg.package}/bin/incus admin shutdown";
|
||||
|
||||
KillMode = "process"; # when stopping, leave the containers alone
|
||||
Delegate = "yes";
|
||||
@ -324,6 +355,27 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.incus-startup = lib.mkIf cfg.softDaemonRestart {
|
||||
description = "Incus Instances Startup/Shutdown";
|
||||
|
||||
inherit environment;
|
||||
|
||||
after = [
|
||||
"incus.service"
|
||||
"incus.socket"
|
||||
];
|
||||
requires = [ "incus.socket" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${incus-startup} start";
|
||||
ExecStop = "${incus-startup} stop";
|
||||
RemainAfterExit = true;
|
||||
TimeoutStartSec = "600s";
|
||||
TimeoutStopSec = "600s";
|
||||
Type = "oneshot";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.sockets.incus = {
|
||||
description = "Incus UNIX socket";
|
||||
wantedBy = [ "sockets.target" ];
|
||||
|
@ -70,51 +70,60 @@ in
|
||||
machine.succeed("incus exec container mount | grep 'lxcfs on /proc/cpuinfo type fuse.lxcfs'")
|
||||
machine.succeed("incus exec container mount | grep 'lxcfs on /proc/meminfo type fuse.lxcfs'")
|
||||
|
||||
with subtest("Container CPU limits can be managed"):
|
||||
set_container("limits.cpu 1")
|
||||
cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
|
||||
assert cpuinfo == "1", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 1, got: {cpuinfo}"
|
||||
with subtest("resource limits"):
|
||||
with subtest("Container CPU limits can be managed"):
|
||||
set_container("limits.cpu 1")
|
||||
cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
|
||||
assert cpuinfo == "1", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 1, got: {cpuinfo}"
|
||||
|
||||
set_container("limits.cpu 2")
|
||||
cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
|
||||
assert cpuinfo == "2", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 2, got: {cpuinfo}"
|
||||
set_container("limits.cpu 2")
|
||||
cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
|
||||
assert cpuinfo == "2", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 2, got: {cpuinfo}"
|
||||
|
||||
with subtest("Container memory limits can be managed"):
|
||||
set_container("limits.memory 64MB")
|
||||
meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
|
||||
meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
|
||||
assert meminfo_bytes == "62500 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '62500 kB', got: '{meminfo_bytes}'"
|
||||
with subtest("Container memory limits can be managed"):
|
||||
set_container("limits.memory 64MB")
|
||||
meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
|
||||
meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
|
||||
assert meminfo_bytes == "62500 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '62500 kB', got: '{meminfo_bytes}'"
|
||||
|
||||
set_container("limits.memory 128MB")
|
||||
meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
|
||||
meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
|
||||
assert meminfo_bytes == "125000 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '125000 kB', got: '{meminfo_bytes}'"
|
||||
set_container("limits.memory 128MB")
|
||||
meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
|
||||
meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
|
||||
assert meminfo_bytes == "125000 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '125000 kB', got: '{meminfo_bytes}'"
|
||||
|
||||
with subtest("lxc-container generator configures plain container"):
|
||||
# reuse the existing container to save some time
|
||||
machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
|
||||
check_sysctl("container")
|
||||
with subtest("lxc-generator"):
|
||||
with subtest("lxc-container generator configures plain container"):
|
||||
# reuse the existing container to save some time
|
||||
machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
|
||||
check_sysctl("container")
|
||||
|
||||
with subtest("lxc-container generator configures nested container"):
|
||||
machine.execute("incus delete --force container")
|
||||
machine.succeed("incus launch nixos container --config security.nesting=true")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
with subtest("lxc-container generator configures nested container"):
|
||||
machine.execute("incus delete --force container")
|
||||
machine.succeed("incus launch nixos container --config security.nesting=true")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
|
||||
machine.fail("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
|
||||
target = machine.succeed("incus exec container readlink -- -f /run/systemd/system/systemd-binfmt.service").strip()
|
||||
assert target == "/dev/null", "lxc generator did not correctly mask /run/systemd/system/systemd-binfmt.service"
|
||||
machine.fail("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
|
||||
target = machine.succeed("incus exec container readlink -- -f /run/systemd/system/systemd-binfmt.service").strip()
|
||||
assert target == "/dev/null", "lxc generator did not correctly mask /run/systemd/system/systemd-binfmt.service"
|
||||
|
||||
check_sysctl("container")
|
||||
check_sysctl("container")
|
||||
|
||||
with subtest("lxc-container generator configures privileged container"):
|
||||
machine.execute("incus delete --force container")
|
||||
machine.succeed("incus launch nixos container --config security.privileged=true")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
with subtest("lxc-container generator configures privileged container"):
|
||||
machine.execute("incus delete --force container")
|
||||
machine.succeed("incus launch nixos container --config security.privileged=true")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
|
||||
machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
|
||||
machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
|
||||
|
||||
check_sysctl("container")
|
||||
check_sysctl("container")
|
||||
|
||||
with subtest("softDaemonRestart"):
|
||||
with subtest("Instance remains running when softDaemonRestart is enabled and services is stopped"):
|
||||
pid = machine.succeed("incus info container | grep 'PID'").split(":")[1].strip()
|
||||
machine.succeed(f"ps {pid}")
|
||||
machine.succeed("systemctl stop incus")
|
||||
machine.succeed(f"ps {pid}")
|
||||
'';
|
||||
})
|
||||
|
@ -16,9 +16,9 @@
|
||||
boot.initrd.systemd.enable = true;
|
||||
};
|
||||
};
|
||||
incusd-options = import ./incusd-options.nix { inherit system pkgs; };
|
||||
lxd-to-incus = import ./lxd-to-incus.nix { inherit system pkgs; };
|
||||
openvswitch = import ./openvswitch.nix { inherit system pkgs; };
|
||||
preseed = import ./preseed.nix { inherit system pkgs; };
|
||||
socket-activated = import ./socket-activated.nix { inherit system pkgs; };
|
||||
storage = import ./storage.nix { inherit system pkgs; };
|
||||
ui = import ./ui.nix { inherit system pkgs; };
|
||||
|
104
nixos/tests/incus/incusd-options.nix
Normal file
104
nixos/tests/incus/incusd-options.nix
Normal file
@ -0,0 +1,104 @@
|
||||
# this is a set of tests for non-default options. typically the default options
|
||||
# will be handled by the other tests
|
||||
import ../make-test-python.nix (
|
||||
{ pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
releases = import ../../release.nix {
|
||||
configuration = {
|
||||
# Building documentation makes the test unnecessarily take a longer time:
|
||||
documentation.enable = lib.mkForce false;
|
||||
};
|
||||
};
|
||||
|
||||
container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
|
||||
container-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
|
||||
in
|
||||
{
|
||||
name = "incusd-options";
|
||||
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
|
||||
nodes.machine = {
|
||||
virtualisation = {
|
||||
cores = 2;
|
||||
memorySize = 1024;
|
||||
diskSize = 4096;
|
||||
|
||||
incus = {
|
||||
enable = true;
|
||||
softDaemonRestart = false;
|
||||
|
||||
preseed = {
|
||||
networks = [
|
||||
{
|
||||
name = "nixostestbr0";
|
||||
type = "bridge";
|
||||
config = {
|
||||
"ipv4.address" = "10.0.100.1/24";
|
||||
"ipv4.nat" = "true";
|
||||
};
|
||||
}
|
||||
];
|
||||
profiles = [
|
||||
{
|
||||
name = "default";
|
||||
devices = {
|
||||
eth0 = {
|
||||
name = "eth0";
|
||||
network = "nixostestbr0";
|
||||
type = "nic";
|
||||
};
|
||||
root = {
|
||||
path = "/";
|
||||
pool = "nixostest_pool";
|
||||
size = "35GiB";
|
||||
type = "disk";
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
storage_pools = [
|
||||
{
|
||||
name = "nixostest_pool";
|
||||
driver = "dir";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
networking.nftables.enable = true;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
def instance_is_up(_) -> bool:
|
||||
status, _ = machine.execute("incus exec container --disable-stdin --force-interactive /run/current-system/sw/bin/systemctl -- is-system-running")
|
||||
return status == 0
|
||||
|
||||
machine.wait_for_unit("incus.service")
|
||||
machine.wait_for_unit("incus-preseed.service")
|
||||
|
||||
with subtest("Container image can be imported"):
|
||||
machine.succeed("incus image import ${container-image-metadata}/*/*.tar.xz ${container-image-rootfs}/*/*.tar.xz --alias nixos")
|
||||
|
||||
with subtest("Container can be launched and managed"):
|
||||
machine.succeed("incus launch nixos container")
|
||||
with machine.nested("Waiting for instance to start and be usable"):
|
||||
retry(instance_is_up)
|
||||
machine.succeed("echo true | incus exec container /run/current-system/sw/bin/bash -")
|
||||
|
||||
with subtest("Verify preseed resources created"):
|
||||
machine.succeed("incus profile show default")
|
||||
machine.succeed("incus network info nixostestbr0")
|
||||
machine.succeed("incus storage show nixostest_pool")
|
||||
|
||||
with subtest("Instance is stopped when softDaemonRestart is disabled and services is stopped"):
|
||||
pid = machine.succeed("incus info container | grep 'PID'").split(":")[1].strip()
|
||||
machine.succeed(f"ps {pid}")
|
||||
machine.succeed("systemctl stop incus")
|
||||
machine.fail(f"ps {pid}")
|
||||
'';
|
||||
}
|
||||
)
|
@ -1,63 +0,0 @@
|
||||
import ../make-test-python.nix ({ pkgs, lib, ... } :
|
||||
|
||||
{
|
||||
name = "incus-preseed";
|
||||
|
||||
meta = {
|
||||
maintainers = lib.teams.lxc.members;
|
||||
};
|
||||
|
||||
nodes.machine = { lib, ... }: {
|
||||
virtualisation = {
|
||||
incus.enable = true;
|
||||
|
||||
incus.preseed = {
|
||||
networks = [
|
||||
{
|
||||
name = "nixostestbr0";
|
||||
type = "bridge";
|
||||
config = {
|
||||
"ipv4.address" = "10.0.100.1/24";
|
||||
"ipv4.nat" = "true";
|
||||
};
|
||||
}
|
||||
];
|
||||
profiles = [
|
||||
{
|
||||
name = "nixostest_default";
|
||||
devices = {
|
||||
eth0 = {
|
||||
name = "eth0";
|
||||
network = "nixostestbr0";
|
||||
type = "nic";
|
||||
};
|
||||
root = {
|
||||
path = "/";
|
||||
pool = "default";
|
||||
size = "35GiB";
|
||||
type = "disk";
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
storage_pools = [
|
||||
{
|
||||
name = "nixostest_pool";
|
||||
driver = "dir";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
networking.nftables.enable = true;
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
machine.wait_for_unit("incus.service")
|
||||
machine.wait_for_unit("incus-preseed.service")
|
||||
|
||||
with subtest("Verify preseed resources created"):
|
||||
machine.succeed("incus profile show nixostest_default")
|
||||
machine.succeed("incus network info nixostestbr0")
|
||||
machine.succeed("incus storage show nixostest_pool")
|
||||
'';
|
||||
})
|
@ -75,5 +75,11 @@ in
|
||||
machine.succeed("incus config set ${instance-name} limits.cpu=2")
|
||||
count = int(machine.succeed("incus exec ${instance-name} -- nproc").strip())
|
||||
assert count == 2, f"Wrong number of CPUs reported, want: 2, got: {count}"
|
||||
|
||||
with subtest("Instance remains running when softDaemonRestart is enabled and services is stopped"):
|
||||
pid = machine.succeed("incus info ${instance-name} | grep 'PID'").split(":")[1].strip()
|
||||
machine.succeed(f"ps {pid}")
|
||||
machine.succeed("systemctl stop incus")
|
||||
machine.succeed(f"ps {pid}")
|
||||
'';
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user