feat(krz01): introduce ML01 -- a machine learning VM
I will add ollama on it later on and passthrough the GPU in there. Signed-off-by: Ryan Lahfa <ryan@dgnum.eu>
This commit is contained in:
parent
e200ae53a4
commit
ebed6462f6
3 changed files with 31 additions and 1 deletions
|
@ -10,6 +10,8 @@ lib.extra.mkConfig {
|
|||
"microvm-router01"
|
||||
"nvidia-tesla-k80"
|
||||
"proxmox"
|
||||
# Machine learning API machine
|
||||
"microvm-ml01"
|
||||
];
|
||||
|
||||
extraConfig = {
|
||||
|
@ -24,6 +26,9 @@ lib.extra.mkConfig {
|
|||
|
||||
services.netbird.enable = true;
|
||||
|
||||
# We are going to use CUDA here.
|
||||
nixpkgs.config.cudaSupport = true;
|
||||
|
||||
users.users.root.hashedPassword = "$y$j9T$eNZQgDN.J5y7KTG2hXgat1$J1i5tjx5dnSZu.C9B7swXi5zMFIkUnmRrnmyLHFAt8/";
|
||||
};
|
||||
|
||||
|
|
22
machines/krz01/microvm-ml01.nix
Normal file
22
machines/krz01/microvm-ml01.nix
Normal file
|
@ -0,0 +1,22 @@
|
|||
_: {
|
||||
microvm.autostart = [ "ml01" ];
|
||||
microvm.vms.ml01 = {
|
||||
config = {
|
||||
networking.hostName = "ml01";
|
||||
services.ollama = {
|
||||
enable = true;
|
||||
listenAddress = "0.0.0.0:11434";
|
||||
sandbox = true;
|
||||
acceleration = "cuda";
|
||||
};
|
||||
microvm.shares = [
|
||||
{
|
||||
source = "/nix/store";
|
||||
mountPoint = "/nix/.ro-store";
|
||||
tag = "ro-store";
|
||||
proto = "virtiofs";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,5 +1,8 @@
|
|||
{ config, ... }:
|
||||
{
|
||||
nixpkgs.config.nvidia.acceptLicense = true;
|
||||
# Tesla K80 is not supported by the latest driver.
|
||||
hardware.nvidia.package = config.boot.kernelPackages.nvidiaPackages_legacy_470;
|
||||
hardware.nvidia.package = config.boot.kernelPackages.nvidia_x11_legacy470;
|
||||
# Don't ask.
|
||||
services.xserver.videoDrivers = [ "nvidia" ];
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue