diff --git a/machines/krz01/_configuration.nix b/machines/krz01/_configuration.nix index 154edfe..a1e821d 100644 --- a/machines/krz01/_configuration.nix +++ b/machines/krz01/_configuration.nix @@ -15,7 +15,9 @@ lib.extra.mkConfig { ]; extraConfig = { - microvm.host.enable = true; + microvm = { + host.enable = true; + }; dgn-hardware = { useZfs = true; zfsPools = [ diff --git a/machines/krz01/microvm-ml01.nix b/machines/krz01/microvm-ml01.nix index 5980524..a3811ce 100644 --- a/machines/krz01/microvm-ml01.nix +++ b/machines/krz01/microvm-ml01.nix @@ -1,22 +1,48 @@ _: { microvm.autostart = [ "ml01" ]; microvm.vms.ml01 = { - config = { - networking.hostName = "ml01"; - services.ollama = { - enable = true; - listenAddress = "0.0.0.0:11434"; - sandbox = true; - acceleration = "cuda"; + config = + { config, ... }: + { + nixpkgs.config.cudaSupport = true; + nixpkgs.config.nvidia.acceptLicense = true; + # Tesla K80 is not supported by the latest driver. + hardware.nvidia.package = config.boot.kernelPackages.nvidia_x11_legacy470; + # Don't ask. + services.xserver.videoDrivers = [ "nvidia" ]; + networking.hostName = "ml01"; + services.ollama = { + enable = true; + listenAddress = "0.0.0.0:11434"; + sandbox = true; + acceleration = "cuda"; + }; + + microvm = { + hypervisor = "cloud-hypervisor"; + vcpu = 4; + mem = 4096; + balloonMem = 2048; + devices = [ + # The nVidia Tesla K80 + { + bus = "pci"; + path = "0000:44:00.0"; + } + { + bus = "pci"; + path = "0000:45:00.0"; + } + ]; + shares = [ + { + source = "/nix/store"; + mountPoint = "/nix/.ro-store"; + tag = "ro-store"; + proto = "virtiofs"; + } + ]; + }; }; - microvm.shares = [ - { - source = "/nix/store"; - mountPoint = "/nix/.ro-store"; - tag = "ro-store"; - proto = "virtiofs"; - } - ]; - }; }; }