diff --git a/machines/krz01/_configuration.nix b/machines/krz01/_configuration.nix index eb8208f..3f4d96a 100644 --- a/machines/krz01/_configuration.nix +++ b/machines/krz01/_configuration.nix @@ -1,4 +1,9 @@ -{ lib, pkgs, ... }: +{ + config, + lib, + pkgs, + ... +}: lib.extra.mkConfig { enabledModules = [ @@ -30,22 +35,35 @@ lib.extra.mkConfig { # We are going to use CUDA here. nixpkgs.config.cudaSupport = true; + hardware.graphics.enable = true; + environment.systemPackages = [ + ((pkgs.openai-whisper-cpp.override { cudaPackages = pkgs.cudaPackages_11; }).overrideAttrs (old: { + src = pkgs.fetchFromGitHub { + owner = "ggerganov"; + repo = "whisper.cpp"; + rev = "v1.7.1"; + hash = "sha256-EDFUVjud79ZRCzGbOh9L9NcXfN3ikvsqkVSOME9F9oo="; + }; + env = { + WHISPER_CUBLAS = ""; + GGML_CUDA = "1"; + }; + # We only need Compute Capability 3.7. + CUDA_ARCH_FLAGS = [ "sm_37" ]; + # We are GPU-only anyway. + patches = (old.patches or [ ]) ++ [ + ./no-weird-microarch.patch + ./all-nvcc-arch.patch + ]; + })) + ]; services.ollama = { enable = true; - package = - (pkgs.ollama.override { - cudaPackages = pkgs.cudaPackages_11; - gcc12 = pkgs.gcc11; - }).overrideAttrs - (old: { - CMAKE_CUDA_ARCHITECTURES = "35"; - ldflags = old.ldflags ++ [ - # K80 is 3.5 - "-X=github.com/ollama/ollama/gpu.CudaComputeMajorMin=3" - "-X=github.com/ollama/ollama/gpu.CudaComputeMinorMin=5" - ]; - patches = (old.patches or [ ]) ++ [ ./K80-support.patch ]; - }); + package = pkgs.callPackage ./ollama.nix { + cudaPackages = pkgs.cudaPackages_11; + # We need to thread our nvidia x11 driver for CUDA. + extraLibraries = [ config.hardware.nvidia.package ]; + }; }; users.users.root.hashedPassword = "$y$j9T$eNZQgDN.J5y7KTG2hXgat1$J1i5tjx5dnSZu.C9B7swXi5zMFIkUnmRrnmyLHFAt8/"; }; diff --git a/machines/krz01/all-nvcc-arch.patch b/machines/krz01/all-nvcc-arch.patch new file mode 100644 index 0000000..6696836 --- /dev/null +++ b/machines/krz01/all-nvcc-arch.patch @@ -0,0 +1,26 @@ +From 2278389ef9ac9231349440aa68f9544ddc69cdc7 Mon Sep 17 00:00:00 2001 +From: Raito Bezarius +Date: Wed, 9 Oct 2024 13:37:08 +0200 +Subject: [PATCH] fix: sm_37 for nvcc + +Signed-off-by: Raito Bezarius +--- + Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/Makefile b/Makefile +index 2ccb750..70dfd9b 100644 +--- a/Makefile ++++ b/Makefile +@@ -537,7 +537,7 @@ endif #GGML_CUDA_NVCC + ifdef CUDA_DOCKER_ARCH + MK_NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH) + else ifndef CUDA_POWER_ARCH +- MK_NVCCFLAGS += -arch=native ++ MK_NVCCFLAGS += -arch=sm_37 + endif # CUDA_DOCKER_ARCH + + ifdef GGML_CUDA_FORCE_DMMV +-- +2.46.0 + diff --git a/machines/krz01/disable-git.patch b/machines/krz01/disable-git.patch new file mode 100644 index 0000000..c305c48 --- /dev/null +++ b/machines/krz01/disable-git.patch @@ -0,0 +1,20 @@ +diff --git c/llm/generate/gen_common.sh i/llm/generate/gen_common.sh +index 3825c155..238a74a7 100644 +--- c/llm/generate/gen_common.sh ++++ i/llm/generate/gen_common.sh +@@ -69,6 +69,7 @@ git_module_setup() { + } + + apply_patches() { ++ return + # apply temporary patches until fix is upstream + for patch in ../patches/*.patch; do + git -c 'user.name=nobody' -c 'user.email=<>' -C ${LLAMACPP_DIR} am ${patch} +@@ -133,6 +134,7 @@ install() { + + # Keep the local tree clean after we're done with the build + cleanup() { ++ return + (cd ${LLAMACPP_DIR}/ && git checkout CMakeLists.txt) + + if [ -n "$(ls -A ../patches/*.diff)" ]; then diff --git a/machines/krz01/no-weird-microarch.patch b/machines/krz01/no-weird-microarch.patch new file mode 100644 index 0000000..7a93b53 --- /dev/null +++ b/machines/krz01/no-weird-microarch.patch @@ -0,0 +1,34 @@ +From 51568b61ef63ecd97867562571411082c32751d3 Mon Sep 17 00:00:00 2001 +From: Raito Bezarius +Date: Wed, 9 Oct 2024 13:36:51 +0200 +Subject: [PATCH] fix: avx & f16c in Makefile + +Signed-off-by: Raito Bezarius +--- + Makefile | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/Makefile b/Makefile +index 32b7cbb..2ccb750 100644 +--- a/Makefile ++++ b/Makefile +@@ -361,12 +361,12 @@ ifndef RISCV + + ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64)) + # Use all CPU extensions that are available: +- MK_CFLAGS += -march=native -mtune=native +- HOST_CXXFLAGS += -march=native -mtune=native ++ # MK_CFLAGS += -march=native -mtune=native ++ # HOST_CXXFLAGS += -march=native -mtune=native + + # Usage AVX-only +- #MK_CFLAGS += -mfma -mf16c -mavx +- #MK_CXXFLAGS += -mfma -mf16c -mavx ++ MK_CFLAGS += -mf16c -mavx ++ MK_CXXFLAGS += -mf16c -mavx + + # Usage SSSE3-only (Not is SSE3!) + #MK_CFLAGS += -mssse3 +-- +2.46.0 + diff --git a/machines/krz01/ollama.nix b/machines/krz01/ollama.nix new file mode 100644 index 0000000..6e252c1 --- /dev/null +++ b/machines/krz01/ollama.nix @@ -0,0 +1,243 @@ +{ + lib, + buildGoModule, + fetchFromGitHub, + buildEnv, + linkFarm, + overrideCC, + makeWrapper, + stdenv, + addDriverRunpath, + nix-update-script, + + cmake, + gcc11, + clblast, + libdrm, + rocmPackages, + cudaPackages, + darwin, + autoAddDriverRunpath, + extraLibraries ? [ ], + + nixosTests, + testers, + ollama, + ollama-rocm, + ollama-cuda, + + config, + # one of `[ null false "rocm" "cuda" ]` + acceleration ? null, +}: + +assert builtins.elem acceleration [ + null + false + "rocm" + "cuda" +]; + +let + pname = "ollama"; + version = "2024-09-10-cc35"; + + src = fetchFromGitHub { + owner = "aliotard"; + repo = "ollama"; + rev = "34827c01f7723c7f5f9f5e392fe85f5a4a5d5fc0"; + hash = "sha256-xFNuqcW7YWeyCyw5QLBnCHHTSMITR6LJkJT0CXZC+Y8="; + fetchSubmodules = true; + }; + + vendorHash = "sha256-hSxcREAujhvzHVNwnRTfhi0MKI3s8HNavER2VLz6SYk="; + + validateFallback = lib.warnIf (config.rocmSupport && config.cudaSupport) (lib.concatStrings [ + "both `nixpkgs.config.rocmSupport` and `nixpkgs.config.cudaSupport` are enabled, " + "but they are mutually exclusive; falling back to cpu" + ]) (!(config.rocmSupport && config.cudaSupport)); + shouldEnable = + mode: fallback: (acceleration == mode) || (fallback && acceleration == null && validateFallback); + + rocmRequested = shouldEnable "rocm" config.rocmSupport; + cudaRequested = shouldEnable "cuda" config.cudaSupport; + + enableRocm = rocmRequested && stdenv.isLinux; + enableCuda = cudaRequested && stdenv.isLinux; + + rocmLibs = [ + rocmPackages.clr + rocmPackages.hipblas + rocmPackages.rocblas + rocmPackages.rocsolver + rocmPackages.rocsparse + rocmPackages.rocm-device-libs + rocmPackages.rocm-smi + ]; + rocmClang = linkFarm "rocm-clang" { llvm = rocmPackages.llvm.clang; }; + rocmPath = buildEnv { + name = "rocm-path"; + paths = rocmLibs ++ [ rocmClang ]; + }; + + cudaLibs = [ + cudaPackages.cuda_cudart + cudaPackages.libcublas + cudaPackages.cuda_cccl + ]; + cudaToolkit = buildEnv { + name = "cuda-merged"; + paths = map lib.getLib cudaLibs ++ [ + (lib.getOutput "static" cudaPackages.cuda_cudart) + (lib.getBin (cudaPackages.cuda_nvcc.__spliced.buildHost or cudaPackages.cuda_nvcc)) + ]; + }; + + metalFrameworks = with darwin.apple_sdk_11_0.frameworks; [ + Accelerate + Metal + MetalKit + MetalPerformanceShaders + ]; + + wrapperOptions = + [ + # ollama embeds llama-cpp binaries which actually run the ai models + # these llama-cpp binaries are unaffected by the ollama binary's DT_RUNPATH + # LD_LIBRARY_PATH is temporarily required to use the gpu + # until these llama-cpp binaries can have their runpath patched + "--suffix LD_LIBRARY_PATH : '${addDriverRunpath.driverLink}/lib'" + "--suffix LD_LIBRARY_PATH : '${lib.makeLibraryPath (map lib.getLib extraLibraries)}'" + ] + ++ lib.optionals enableRocm [ + "--suffix LD_LIBRARY_PATH : '${rocmPath}/lib'" + "--set-default HIP_PATH '${rocmPath}'" + ] + ++ lib.optionals enableCuda [ + "--suffix LD_LIBRARY_PATH : '${lib.makeLibraryPath (map lib.getLib cudaLibs)}'" + ]; + wrapperArgs = builtins.concatStringsSep " " wrapperOptions; + + goBuild = + if enableCuda then buildGoModule.override { stdenv = overrideCC stdenv gcc11; } else buildGoModule; + inherit (lib) licenses platforms maintainers; +in +goBuild { + inherit + pname + version + src + vendorHash + ; + + env = + lib.optionalAttrs enableRocm { + ROCM_PATH = rocmPath; + CLBlast_DIR = "${clblast}/lib/cmake/CLBlast"; + } + // lib.optionalAttrs enableCuda { CUDA_LIB_DIR = "${cudaToolkit}/lib"; } + // { + CMAKE_CUDA_ARCHITECTURES = "35;37"; + }; + + nativeBuildInputs = + [ cmake ] + ++ lib.optionals enableRocm [ rocmPackages.llvm.bintools ] + ++ lib.optionals enableCuda [ cudaPackages.cuda_nvcc ] + ++ lib.optionals (enableRocm || enableCuda) [ + makeWrapper + autoAddDriverRunpath + ] + ++ lib.optionals stdenv.isDarwin metalFrameworks; + + buildInputs = + lib.optionals enableRocm (rocmLibs ++ [ libdrm ]) + ++ lib.optionals enableCuda cudaLibs + ++ lib.optionals stdenv.isDarwin metalFrameworks; + + patches = [ + # disable uses of `git` in the `go generate` script + # ollama's build script assumes the source is a git repo, but nix removes the git directory + # this also disables necessary patches contained in `ollama/llm/patches/` + # those patches are applied in `postPatch` + ./disable-git.patch + ]; + + postPatch = '' + # replace inaccurate version number with actual release version + substituteInPlace version/version.go --replace-fail 0.0.0 '${version}' + + # apply ollama's patches to `llama.cpp` submodule + for diff in llm/patches/*; do + patch -p1 -d llm/llama.cpp < $diff + done + ''; + + overrideModAttrs = _: _: { + # don't run llama.cpp build in the module fetch phase + preBuild = ""; + }; + + preBuild = '' + # disable uses of `git`, since nix removes the git directory + export OLLAMA_SKIP_PATCHING=true + # build llama.cpp libraries for ollama + go generate ./... + ''; + + postFixup = + '' + # the app doesn't appear functional at the moment, so hide it + mv "$out/bin/app" "$out/bin/.ollama-app" + '' + + lib.optionalString (enableRocm || enableCuda) '' + # expose runtime libraries necessary to use the gpu + wrapProgram "$out/bin/ollama" ${wrapperArgs} + ''; + + ldflags = [ + "-s" + "-w" + "-X=github.com/ollama/ollama/version.Version=${version}" + "-X=github.com/ollama/ollama/server.mode=release" + "-X=github.com/ollama/ollama/gpu.CudaComputeMajorMin=3" + "-X=github.com/ollama/ollama/gpu.CudaComputeMinorMin=5" + ]; + + passthru = { + tests = + { + inherit ollama; + version = testers.testVersion { + inherit version; + package = ollama; + }; + } + // lib.optionalAttrs stdenv.isLinux { + inherit ollama-rocm ollama-cuda; + service = nixosTests.ollama; + service-cuda = nixosTests.ollama-cuda; + service-rocm = nixosTests.ollama-rocm; + }; + + updateScript = nix-update-script { }; + }; + + meta = { + description = + "Get up and running with large language models locally" + + lib.optionalString rocmRequested ", using ROCm for AMD GPU acceleration" + + lib.optionalString cudaRequested ", using CUDA for NVIDIA GPU acceleration"; + homepage = "https://github.com/ollama/ollama"; + changelog = "https://github.com/ollama/ollama/releases/tag/v${version}"; + license = licenses.mit; + platforms = if (rocmRequested || cudaRequested) then platforms.linux else platforms.unix; + mainProgram = "ollama"; + maintainers = with maintainers; [ + abysssol + dit7ya + elohmeier + roydubnium + ]; + }; +}