commit d05fcb7e2c96c1465d94d670b58c82dd5966fcfd Author: Julien Malka Date: Sat Oct 12 00:20:58 2024 +0200 chore: init diff --git a/.envrc b/.envrc new file mode 100644 index 0000000..1d953f4 --- /dev/null +++ b/.envrc @@ -0,0 +1 @@ +use nix diff --git a/.forgejo/workflows/check-meta.yaml b/.forgejo/workflows/check-meta.yaml new file mode 100644 index 0000000..27af558 --- /dev/null +++ b/.forgejo/workflows/check-meta.yaml @@ -0,0 +1,25 @@ +name: Check meta +on: + pull_request: + branches: + - main + push: + paths: + - 'meta/*' + +jobs: + check_meta: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Check the validity of meta options + run: nix-build meta/verify.nix -A meta + + check_dns: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Check the validity of the DNS configuration + run: nix-build meta/verify.nix -A dns --no-out-link diff --git a/.forgejo/workflows/ds-fr.yaml b/.forgejo/workflows/ds-fr.yaml new file mode 100644 index 0000000..f54b414 --- /dev/null +++ b/.forgejo/workflows/ds-fr.yaml @@ -0,0 +1,56 @@ +name: ds-fr update +on: + schedule: + - cron: "26 18 * * wed" + +jobs: + npins_update: + runs-on: nix + steps: + - uses: actions/checkout@v3 + with: + token: ${{ secrets.TEA_DGNUM_CHORES_TOKEN }} + + - name: Update DS and open PR if necessary + run: | + # Fetch the latest release tag + VERSION=$(curl -L \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/repos/demarches-simplifiees/demarches-simplifiees.fr/releases/latest \ + | jq -r '.tag_name') + + # Move to the ds-fr directory + cd machines/compute01/ds-fr/package + + # Run the update script + ./update.sh -v "$VERSION" + + if [ ! -z "$(git diff --name-only)" ]; then + echo "[+] Changes detected, pushing updates." + + git switch -C ds-update + + git add . + + git config user.name "DGNum Chores" + git config user.email "tech@dgnum.eu" + + git commit --message "chore(ds-fr): Update" + git push --set-upstream origin ds-update --force + + # Connect to the server with the cli + tea login add \ + -n dgnum-chores \ + -t '${{ secrets.TEA_DGNUM_CHORES_TOKEN }}' \ + -u https://git.dgnum.eu + + # Create a pull request if needed + # i.e. no PR with the same title exists + if [ -z "$(tea pr ls -f='title,author' -o simple | grep 'chore(ds-fr): Update dgnum-chores')" ]; then + tea pr create \ + --description "Automatic ds-fr update" \ + --title "chore(ds-fr): Update" \ + --head ds-update + fi + fi diff --git a/.forgejo/workflows/eval.yaml b/.forgejo/workflows/eval.yaml new file mode 100644 index 0000000..106743e --- /dev/null +++ b/.forgejo/workflows/eval.yaml @@ -0,0 +1,200 @@ +name: build configuration +on: + pull_request: + types: [opened, synchronize, edited, reopened] + branches: + - main + push: + branches: + - main + +jobs: + build_and_cache_krz01: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Build and cache the node + run: nix-shell --run cache-node + env: + STORE_ENDPOINT: "https://tvix-store.dgnum.eu/infra-signing/" + STORE_USER: "admin" + STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }} + BUILD_NODE: "krz01" + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: outputs_krz01 + path: paths.txt + + build_and_cache_compute01: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Build and cache the node + run: nix-shell --run cache-node + env: + STORE_ENDPOINT: "https://tvix-store.dgnum.eu/infra-signing/" + STORE_USER: "admin" + STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }} + BUILD_NODE: "compute01" + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: outputs_compute01 + path: paths.txt + + build_and_cache_storage01: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Build and cache the node + run: nix-shell --run cache-node + env: + STORE_ENDPOINT: "https://tvix-store.dgnum.eu/infra-signing/" + STORE_USER: "admin" + STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }} + BUILD_NODE: "storage01" + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: outputs_storage01 + path: paths.txt + + build_and_cache_rescue01: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Build and cache the node + run: nix-shell --run cache-node + env: + STORE_ENDPOINT: "https://tvix-store.dgnum.eu/infra-signing/" + STORE_USER: "admin" + STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }} + BUILD_NODE: "rescue01" + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: outputs_rescue01 + path: paths.txt + + build_and_cache_geo01: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Build and cache the node + run: nix-shell --run cache-node + env: + STORE_ENDPOINT: "https://tvix-store.dgnum.eu/infra-signing/" + STORE_USER: "admin" + STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }} + BUILD_NODE: "geo01" + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: outputs_geo01 + path: paths.txt + + build_and_cache_geo02: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Build and cache the node + run: nix-shell --run cache-node + env: + STORE_ENDPOINT: "https://tvix-store.dgnum.eu/infra-signing/" + STORE_USER: "admin" + STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }} + BUILD_NODE: "geo02" + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: outputs_geo02 + path: paths.txt + + build_and_cache_vault01: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Build and cache the node + run: nix-shell --run cache-node + env: + STORE_ENDPOINT: "https://tvix-store.dgnum.eu/infra-signing/" + STORE_USER: "admin" + STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }} + BUILD_NODE: "vault01" + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: outputs_vault01 + path: paths.txt + + build_and_cache_web01: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Build and cache the node + run: nix-shell --run cache-node + env: + STORE_ENDPOINT: "https://tvix-store.dgnum.eu/infra-signing/" + STORE_USER: "admin" + STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }} + BUILD_NODE: "web01" + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: outputs_web01 + path: paths.txt + + build_and_cache_web02: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Build and cache the node + run: nix-shell --run cache-node + env: + STORE_ENDPOINT: "https://tvix-store.dgnum.eu/infra-signing/" + STORE_USER: "admin" + STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }} + BUILD_NODE: "web02" + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: outputs_web02 + path: paths.txt + + build_and_cache_bridge01: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Build and cache the node + run: nix-shell --run cache-node + env: + STORE_ENDPOINT: "https://tvix-store.dgnum.eu/infra-signing/" + STORE_USER: "admin" + STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }} + BUILD_NODE: "bridge01" + + - uses: actions/upload-artifact@v3 + if: always() + with: + name: outputs_web02 + path: paths.txt diff --git a/.forgejo/workflows/lint.yaml b/.forgejo/workflows/lint.yaml new file mode 100644 index 0000000..4b58ae7 --- /dev/null +++ b/.forgejo/workflows/lint.yaml @@ -0,0 +1,11 @@ +name: lint +on: [push, pull_request] + +jobs: + check: + runs-on: nix + steps: + - uses: actions/checkout@v3 + + - name: Run pre-commit on all files + run: nix-shell --run 'pre-commit run --all-files --hook-stage pre-push --show-diff-on-failure' -A shells.pre-commit ./. diff --git a/.forgejo/workflows/npins.yaml b/.forgejo/workflows/npins.yaml new file mode 100644 index 0000000..f1cff0e --- /dev/null +++ b/.forgejo/workflows/npins.yaml @@ -0,0 +1,58 @@ +name: npins update +on: + schedule: + # Run at 11 o'clock every wednesday + - cron: "25 15 * * *" + +jobs: + npins_update: + runs-on: nix + steps: + # - name: Install applications + # run: apt-get update && apt-get install sudo + # + - uses: actions/checkout@v3 + with: + depth: 0 + token: ${{ secrets.TEA_DGNUM_CHORES_TOKEN }} + # + # - uses: https://github.com/cachix/install-nix-action@v22 + # with: + # nix_path: nixpkgs=channel:nixos-unstable + + # - name: Install tea + # run: | + # nix-env -f '' -i tea + + - name: Update dependencies and open PR if necessary + run: | + npins update + + if [ ! -z "$(git diff --name-only)" ]; then + echo "[+] Changes detected, pushing updates." + + git switch -C npins-update + + git add npins + + git config user.name "DGNum Chores" + git config user.email "tech@dgnum.eu" + + git commit --message "chore(npins): Update" + git push --set-upstream origin npins-update --force + + # Connect to the server with the cli + tea login add \ + -n dgnum-chores \ + -t '${{ secrets.TEA_DGNUM_CHORES_TOKEN }}' \ + -u https://git.dgnum.eu + + # Create a pull request if needed + # i.e. no PR with the same title exists + if [ -z "$(tea pr ls -f='title,author' -o simple | grep 'chore(npins): Update dgnum-chores')" ]; then + tea pr create \ + --description "Automatic npins update" \ + --title "chore(npins): Update" \ + --head npins-update + fi + fi diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 120000 index 0000000..484aa20 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1 @@ +/nix/store/fqvsrc65664l0j62lpssg29kbb1d8nkd-pre-commit-config.json \ No newline at end of file diff --git a/CONTRIBUTE.md b/CONTRIBUTE.md new file mode 100644 index 0000000..c07e430 --- /dev/null +++ b/CONTRIBUTE.md @@ -0,0 +1,42 @@ +# Contribuer + +Quelques éléments à savoir: +- La branche `main` doit normalement suivre l'état des machines en production. Si vous ne déployez pas tout de suite faîtes une branche et une PR. +- Les commits doivent passer le crochet de `pre-commit` (cf ci-dessous pour savoir comment l'installer) + + +## Shell de développement et crochets `pre-commit` + +Le dépot possède un shell de développement: +``` +nix-shell +``` + +Pour installer les crochets, il suffit de lancer le shell de developpement une fois. + + +## Lister les machines + +Dans le shell de developpement, exécuter: +``` +list-nodes +``` + + +## Lancer une vm de test + +Dans le shell de developpement, exécuter: +``` +launch-vm -p MACHINE +``` + +On peut faire du port forwarding (pour avoir accès à ssh par exemple) avec l'option `-p HOSTPORT-:GUESTPORT`. + + +## Tester les versions en production + +En attendant [https://github.com/RaitoBezarius/mimir](`mimir`), on peut tester localement si c'est bien le dernier commit de main qui est déployé avec: +``` +nix-shell --run 'check-deployment [NODE_NAME]' +``` + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..64e1d71 --- /dev/null +++ b/LICENSE @@ -0,0 +1,547 @@ + CONTRAT DE LICENCE DE LOGICIEL LIBRE CeCILL + +Version 2.1 du 2013-06-21 + + + Avertissement + +Ce contrat est une licence de logiciel libre issue d'une concertation +entre ses auteurs afin que le respect de deux grands principes préside à +sa rédaction: + + * d'une part, le respect des principes de diffusion des logiciels + libres: accès au code source, droits étendus conférés aux utilisateurs, + * d'autre part, la désignation d'un droit applicable, le droit + français, auquel elle est conforme, tant au regard du droit de la + responsabilité civile que du droit de la propriété intellectuelle et + de la protection qu'il offre aux auteurs et titulaires des droits + patrimoniaux sur un logiciel. + +Les auteurs de la licence CeCILL (Ce[a] C[nrs] I[nria] L[ogiciel] L[ibre]) +sont: + +Commissariat à l'énergie atomique et aux énergies alternatives - CEA, +établissement public de recherche à caractère scientifique, technique et +industriel, dont le siège est situé 25 rue Leblanc, immeuble Le Ponant +D, 75015 Paris. + +Centre National de la Recherche Scientifique - CNRS, établissement +public à caractère scientifique et technologique, dont le siège est +situé 3 rue Michel-Ange, 75794 Paris cedex 16. + +Institut National de Recherche en Informatique et en Automatique - +Inria, établissement public à caractère scientifique et technologique, +dont le siège est situé Domaine de Voluceau, Rocquencourt, BP 105, 78153 +Le Chesnay cedex. + + + Préambule + +Ce contrat est une licence de logiciel libre dont l'objectif est de +conférer aux utilisateurs la liberté de modification et de +redistribution du logiciel régi par cette licence dans le cadre d'un +modèle de diffusion en logiciel libre. + +L'exercice de ces libertés est assorti de certains devoirs à la charge +des utilisateurs afin de préserver ce statut au cours des +redistributions ultérieures. + +L'accessibilité au code source et les droits de copie, de modification +et de redistribution qui en découlent ont pour contrepartie de n'offrir +aux utilisateurs qu'une garantie limitée et de ne faire peser sur +l'auteur du logiciel, le titulaire des droits patrimoniaux et les +concédants successifs qu'une responsabilité restreinte. + +A cet égard l'attention de l'utilisateur est attirée sur les risques +associés au chargement, à l'utilisation, à la modification et/ou au +développement et à la reproduction du logiciel par l'utilisateur étant +donné sa spécificité de logiciel libre, qui peut le rendre complexe à +manipuler et qui le réserve donc à des développeurs ou des +professionnels avertis possédant des connaissances informatiques +approfondies. Les utilisateurs sont donc invités à charger et tester +l'adéquation du logiciel à leurs besoins dans des conditions permettant +d'assurer la sécurité de leurs systèmes et/ou de leurs données et, plus +généralement, à l'utiliser et l'exploiter dans les mêmes conditions de +sécurité. Ce contrat peut être reproduit et diffusé librement, sous +réserve de le conserver en l'état, sans ajout ni suppression de clauses. + +Ce contrat est susceptible de s'appliquer à tout logiciel dont le +titulaire des droits patrimoniaux décide de soumettre l'exploitation aux +dispositions qu'il contient. + +Une liste de questions fréquemment posées se trouve sur le site web +officiel de la famille des licences CeCILL +(http://www.cecill.info/index.fr.html) pour toute clarification qui +serait nécessaire. + + + Article 1 - DEFINITIONS + +Dans ce contrat, les termes suivants, lorsqu'ils seront écrits avec une +lettre capitale, auront la signification suivante: + +Contrat: désigne le présent contrat de licence, ses éventuelles versions +postérieures et annexes. + +Logiciel: désigne le logiciel sous sa forme de Code Objet et/ou de Code +Source et le cas échéant sa documentation, dans leur état au moment de +l'acceptation du Contrat par le Licencié. + +Logiciel Initial: désigne le Logiciel sous sa forme de Code Source et +éventuellement de Code Objet et le cas échéant sa documentation, dans +leur état au moment de leur première diffusion sous les termes du Contrat. + +Logiciel Modifié: désigne le Logiciel modifié par au moins une +Contribution. + +Code Source: désigne l'ensemble des instructions et des lignes de +programme du Logiciel et auquel l'accès est nécessaire en vue de +modifier le Logiciel. + +Code Objet: désigne les fichiers binaires issus de la compilation du +Code Source. + +Titulaire: désigne le ou les détenteurs des droits patrimoniaux d'auteur +sur le Logiciel Initial. + +Licencié: désigne le ou les utilisateurs du Logiciel ayant accepté le +Contrat. + +Contributeur: désigne le Licencié auteur d'au moins une Contribution. + +Concédant: désigne le Titulaire ou toute personne physique ou morale +distribuant le Logiciel sous le Contrat. + +Contribution: désigne l'ensemble des modifications, corrections, +traductions, adaptations et/ou nouvelles fonctionnalités intégrées dans +le Logiciel par tout Contributeur, ainsi que tout Module Interne. + +Module: désigne un ensemble de fichiers sources y compris leur +documentation qui permet de réaliser des fonctionnalités ou services +supplémentaires à ceux fournis par le Logiciel. + +Module Externe: désigne tout Module, non dérivé du Logiciel, tel que ce +Module et le Logiciel s'exécutent dans des espaces d'adressage +différents, l'un appelant l'autre au moment de leur exécution. + +Module Interne: désigne tout Module lié au Logiciel de telle sorte +qu'ils s'exécutent dans le même espace d'adressage. + +GNU GPL: désigne la GNU General Public License dans sa version 2 ou +toute version ultérieure, telle que publiée par Free Software Foundation +Inc. + +GNU Affero GPL: désigne la GNU Affero General Public License dans sa +version 3 ou toute version ultérieure, telle que publiée par Free +Software Foundation Inc. + +EUPL: désigne la Licence Publique de l'Union européenne dans sa version +1.1 ou toute version ultérieure, telle que publiée par la Commission +Européenne. + +Parties: désigne collectivement le Licencié et le Concédant. + +Ces termes s'entendent au singulier comme au pluriel. + + + Article 2 - OBJET + +Le Contrat a pour objet la concession par le Concédant au Licencié d'une +licence non exclusive, cessible et mondiale du Logiciel telle que +définie ci-après à l'article 5 <#etendue> pour toute la durée de +protection des droits portant sur ce Logiciel. + + + Article 3 - ACCEPTATION + +3.1 L'acceptation par le Licencié des termes du Contrat est réputée +acquise du fait du premier des faits suivants: + + * (i) le chargement du Logiciel par tout moyen notamment par + téléchargement à partir d'un serveur distant ou par chargement à + partir d'un support physique; + * (ii) le premier exercice par le Licencié de l'un quelconque des + droits concédés par le Contrat. + +3.2 Un exemplaire du Contrat, contenant notamment un avertissement +relatif aux spécificités du Logiciel, à la restriction de garantie et à +la limitation à un usage par des utilisateurs expérimentés a été mis à +disposition du Licencié préalablement à son acceptation telle que +définie à l'article 3.1 <#acceptation-acquise> ci dessus et le Licencié +reconnaît en avoir pris connaissance. + + + Article 4 - ENTREE EN VIGUEUR ET DUREE + + + 4.1 ENTREE EN VIGUEUR + +Le Contrat entre en vigueur à la date de son acceptation par le Licencié +telle que définie en 3.1 <#acceptation-acquise>. + + + 4.2 DUREE + +Le Contrat produira ses effets pendant toute la durée légale de +protection des droits patrimoniaux portant sur le Logiciel. + + + Article 5 - ETENDUE DES DROITS CONCEDES + +Le Concédant concède au Licencié, qui accepte, les droits suivants sur +le Logiciel pour toutes destinations et pour la durée du Contrat dans +les conditions ci-après détaillées. + +Par ailleurs, si le Concédant détient ou venait à détenir un ou +plusieurs brevets d'invention protégeant tout ou partie des +fonctionnalités du Logiciel ou de ses composants, il s'engage à ne pas +opposer les éventuels droits conférés par ces brevets aux Licenciés +successifs qui utiliseraient, exploiteraient ou modifieraient le +Logiciel. En cas de cession de ces brevets, le Concédant s'engage à +faire reprendre les obligations du présent alinéa aux cessionnaires. + + + 5.1 DROIT D'UTILISATION + +Le Licencié est autorisé à utiliser le Logiciel, sans restriction quant +aux domaines d'application, étant ci-après précisé que cela comporte: + + 1. + + la reproduction permanente ou provisoire du Logiciel en tout ou + partie par tout moyen et sous toute forme. + + 2. + + le chargement, l'affichage, l'exécution, ou le stockage du Logiciel + sur tout support. + + 3. + + la possibilité d'en observer, d'en étudier, ou d'en tester le + fonctionnement afin de déterminer les idées et principes qui sont à + la base de n'importe quel élément de ce Logiciel; et ceci, lorsque + le Licencié effectue toute opération de chargement, d'affichage, + d'exécution, de transmission ou de stockage du Logiciel qu'il est en + droit d'effectuer en vertu du Contrat. + + + 5.2 DROIT D'APPORTER DES CONTRIBUTIONS + +Le droit d'apporter des Contributions comporte le droit de traduire, +d'adapter, d'arranger ou d'apporter toute autre modification au Logiciel +et le droit de reproduire le logiciel en résultant. + +Le Licencié est autorisé à apporter toute Contribution au Logiciel sous +réserve de mentionner, de façon explicite, son nom en tant qu'auteur de +cette Contribution et la date de création de celle-ci. + + + 5.3 DROIT DE DISTRIBUTION + +Le droit de distribution comporte notamment le droit de diffuser, de +transmettre et de communiquer le Logiciel au public sur tout support et +par tout moyen ainsi que le droit de mettre sur le marché à titre +onéreux ou gratuit, un ou des exemplaires du Logiciel par tout procédé. + +Le Licencié est autorisé à distribuer des copies du Logiciel, modifié ou +non, à des tiers dans les conditions ci-après détaillées. + + + 5.3.1 DISTRIBUTION DU LOGICIEL SANS MODIFICATION + +Le Licencié est autorisé à distribuer des copies conformes du Logiciel, +sous forme de Code Source ou de Code Objet, à condition que cette +distribution respecte les dispositions du Contrat dans leur totalité et +soit accompagnée: + + 1. + + d'un exemplaire du Contrat, + + 2. + + d'un avertissement relatif à la restriction de garantie et de + responsabilité du Concédant telle que prévue aux articles 8 + <#responsabilite> et 9 <#garantie>, + +et que, dans le cas où seul le Code Objet du Logiciel est redistribué, +le Licencié permette un accès effectif au Code Source complet du +Logiciel pour une durée d'au moins 3 ans à compter de la distribution du +logiciel, étant entendu que le coût additionnel d'acquisition du Code +Source ne devra pas excéder le simple coût de transfert des données. + + + 5.3.2 DISTRIBUTION DU LOGICIEL MODIFIE + +Lorsque le Licencié apporte une Contribution au Logiciel, les conditions +de distribution du Logiciel Modifié en résultant sont alors soumises à +l'intégralité des dispositions du Contrat. + +Le Licencié est autorisé à distribuer le Logiciel Modifié, sous forme de +code source ou de code objet, à condition que cette distribution +respecte les dispositions du Contrat dans leur totalité et soit +accompagnée: + + 1. + + d'un exemplaire du Contrat, + + 2. + + d'un avertissement relatif à la restriction de garantie et de + responsabilité du Concédant telle que prévue aux articles 8 + <#responsabilite> et 9 <#garantie>, + +et, dans le cas où seul le code objet du Logiciel Modifié est redistribué, + + 3. + + d'une note précisant les conditions d'accès effectif au code source + complet du Logiciel Modifié, pendant une période d'au moins 3 ans à + compter de la distribution du Logiciel Modifié, étant entendu que le + coût additionnel d'acquisition du code source ne devra pas excéder + le simple coût de transfert des données. + + + 5.3.3 DISTRIBUTION DES MODULES EXTERNES + +Lorsque le Licencié a développé un Module Externe les conditions du +Contrat ne s'appliquent pas à ce Module Externe, qui peut être distribué +sous un contrat de licence différent. + + + 5.3.4 COMPATIBILITE AVEC D'AUTRES LICENCES + +Le Licencié peut inclure un code soumis aux dispositions d'une des +versions de la licence GNU GPL, GNU Affero GPL et/ou EUPL dans le +Logiciel modifié ou non et distribuer l'ensemble sous les conditions de +la même version de la licence GNU GPL, GNU Affero GPL et/ou EUPL. + +Le Licencié peut inclure le Logiciel modifié ou non dans un code soumis +aux dispositions d'une des versions de la licence GNU GPL, GNU Affero +GPL et/ou EUPL et distribuer l'ensemble sous les conditions de la même +version de la licence GNU GPL, GNU Affero GPL et/ou EUPL. + + + Article 6 - PROPRIETE INTELLECTUELLE + + + 6.1 SUR LE LOGICIEL INITIAL + +Le Titulaire est détenteur des droits patrimoniaux sur le Logiciel +Initial. Toute utilisation du Logiciel Initial est soumise au respect +des conditions dans lesquelles le Titulaire a choisi de diffuser son +oeuvre et nul autre n'a la faculté de modifier les conditions de +diffusion de ce Logiciel Initial. + +Le Titulaire s'engage à ce que le Logiciel Initial reste au moins régi +par le Contrat et ce, pour la durée visée à l'article 4.2 <#duree>. + + + 6.2 SUR LES CONTRIBUTIONS + +Le Licencié qui a développé une Contribution est titulaire sur celle-ci +des droits de propriété intellectuelle dans les conditions définies par +la législation applicable. + + + 6.3 SUR LES MODULES EXTERNES + +Le Licencié qui a développé un Module Externe est titulaire sur celui-ci +des droits de propriété intellectuelle dans les conditions définies par +la législation applicable et reste libre du choix du contrat régissant +sa diffusion. + + + 6.4 DISPOSITIONS COMMUNES + +Le Licencié s'engage expressément: + + 1. + + à ne pas supprimer ou modifier de quelque manière que ce soit les + mentions de propriété intellectuelle apposées sur le Logiciel; + + 2. + + à reproduire à l'identique lesdites mentions de propriété + intellectuelle sur les copies du Logiciel modifié ou non. + +Le Licencié s'engage à ne pas porter atteinte, directement ou +indirectement, aux droits de propriété intellectuelle du Titulaire et/ou +des Contributeurs sur le Logiciel et à prendre, le cas échéant, à +l'égard de son personnel toutes les mesures nécessaires pour assurer le +respect des dits droits de propriété intellectuelle du Titulaire et/ou +des Contributeurs. + + + Article 7 - SERVICES ASSOCIES + +7.1 Le Contrat n'oblige en aucun cas le Concédant à la réalisation de +prestations d'assistance technique ou de maintenance du Logiciel. + +Cependant le Concédant reste libre de proposer ce type de services. Les +termes et conditions d'une telle assistance technique et/ou d'une telle +maintenance seront alors déterminés dans un acte séparé. Ces actes de +maintenance et/ou assistance technique n'engageront que la seule +responsabilité du Concédant qui les propose. + +7.2 De même, tout Concédant est libre de proposer, sous sa seule +responsabilité, à ses licenciés une garantie, qui n'engagera que lui, +lors de la redistribution du Logiciel et/ou du Logiciel Modifié et ce, +dans les conditions qu'il souhaite. Cette garantie et les modalités +financières de son application feront l'objet d'un acte séparé entre le +Concédant et le Licencié. + + + Article 8 - RESPONSABILITE + +8.1 Sous réserve des dispositions de l'article 8.2 +<#limite-responsabilite>, le Licencié a la faculté, sous réserve de +prouver la faute du Concédant concerné, de solliciter la réparation du +préjudice direct qu'il subirait du fait du Logiciel et dont il apportera +la preuve. + +8.2 La responsabilité du Concédant est limitée aux engagements pris en +application du Contrat et ne saurait être engagée en raison notamment: +(i) des dommages dus à l'inexécution, totale ou partielle, de ses +obligations par le Licencié, (ii) des dommages directs ou indirects +découlant de l'utilisation ou des performances du Logiciel subis par le +Licencié et (iii) plus généralement d'un quelconque dommage indirect. En +particulier, les Parties conviennent expressément que tout préjudice +financier ou commercial (par exemple perte de données, perte de +bénéfices, perte d'exploitation, perte de clientèle ou de commandes, +manque à gagner, trouble commercial quelconque) ou toute action dirigée +contre le Licencié par un tiers, constitue un dommage indirect et +n'ouvre pas droit à réparation par le Concédant. + + + Article 9 - GARANTIE + +9.1 Le Licencié reconnaît que l'état actuel des connaissances +scientifiques et techniques au moment de la mise en circulation du +Logiciel ne permet pas d'en tester et d'en vérifier toutes les +utilisations ni de détecter l'existence d'éventuels défauts. L'attention +du Licencié a été attirée sur ce point sur les risques associés au +chargement, à l'utilisation, la modification et/ou au développement et à +la reproduction du Logiciel qui sont réservés à des utilisateurs avertis. + +Il relève de la responsabilité du Licencié de contrôler, par tous +moyens, l'adéquation du produit à ses besoins, son bon fonctionnement et +de s'assurer qu'il ne causera pas de dommages aux personnes et aux biens. + +9.2 Le Concédant déclare de bonne foi être en droit de concéder +l'ensemble des droits attachés au Logiciel (comprenant notamment les +droits visés à l'article 5 <#etendue>). + +9.3 Le Licencié reconnaît que le Logiciel est fourni "en l'état" par le +Concédant sans autre garantie, expresse ou tacite, que celle prévue à +l'article 9.2 <#bonne-foi> et notamment sans aucune garantie sur sa +valeur commerciale, son caractère sécurisé, innovant ou pertinent. + +En particulier, le Concédant ne garantit pas que le Logiciel est exempt +d'erreur, qu'il fonctionnera sans interruption, qu'il sera compatible +avec l'équipement du Licencié et sa configuration logicielle ni qu'il +remplira les besoins du Licencié. + +9.4 Le Concédant ne garantit pas, de manière expresse ou tacite, que le +Logiciel ne porte pas atteinte à un quelconque droit de propriété +intellectuelle d'un tiers portant sur un brevet, un logiciel ou sur tout +autre droit de propriété. Ainsi, le Concédant exclut toute garantie au +profit du Licencié contre les actions en contrefaçon qui pourraient être +diligentées au titre de l'utilisation, de la modification, et de la +redistribution du Logiciel. Néanmoins, si de telles actions sont +exercées contre le Licencié, le Concédant lui apportera son expertise +technique et juridique pour sa défense. Cette expertise technique et +juridique est déterminée au cas par cas entre le Concédant concerné et +le Licencié dans le cadre d'un protocole d'accord. Le Concédant dégage +toute responsabilité quant à l'utilisation de la dénomination du +Logiciel par le Licencié. Aucune garantie n'est apportée quant à +l'existence de droits antérieurs sur le nom du Logiciel et sur +l'existence d'une marque. + + + Article 10 - RESILIATION + +10.1 En cas de manquement par le Licencié aux obligations mises à sa +charge par le Contrat, le Concédant pourra résilier de plein droit le +Contrat trente (30) jours après notification adressée au Licencié et +restée sans effet. + +10.2 Le Licencié dont le Contrat est résilié n'est plus autorisé à +utiliser, modifier ou distribuer le Logiciel. Cependant, toutes les +licences qu'il aura concédées antérieurement à la résiliation du Contrat +resteront valides sous réserve qu'elles aient été effectuées en +conformité avec le Contrat. + + + Article 11 - DISPOSITIONS DIVERSES + + + 11.1 CAUSE EXTERIEURE + +Aucune des Parties ne sera responsable d'un retard ou d'une défaillance +d'exécution du Contrat qui serait dû à un cas de force majeure, un cas +fortuit ou une cause extérieure, telle que, notamment, le mauvais +fonctionnement ou les interruptions du réseau électrique ou de +télécommunication, la paralysie du réseau liée à une attaque +informatique, l'intervention des autorités gouvernementales, les +catastrophes naturelles, les dégâts des eaux, les tremblements de terre, +le feu, les explosions, les grèves et les conflits sociaux, l'état de +guerre... + +11.2 Le fait, par l'une ou l'autre des Parties, d'omettre en une ou +plusieurs occasions de se prévaloir d'une ou plusieurs dispositions du +Contrat, ne pourra en aucun cas impliquer renonciation par la Partie +intéressée à s'en prévaloir ultérieurement. + +11.3 Le Contrat annule et remplace toute convention antérieure, écrite +ou orale, entre les Parties sur le même objet et constitue l'accord +entier entre les Parties sur cet objet. Aucune addition ou modification +aux termes du Contrat n'aura d'effet à l'égard des Parties à moins +d'être faite par écrit et signée par leurs représentants dûment habilités. + +11.4 Dans l'hypothèse où une ou plusieurs des dispositions du Contrat +s'avèrerait contraire à une loi ou à un texte applicable, existants ou +futurs, cette loi ou ce texte prévaudrait, et les Parties feraient les +amendements nécessaires pour se conformer à cette loi ou à ce texte. +Toutes les autres dispositions resteront en vigueur. De même, la +nullité, pour quelque raison que ce soit, d'une des dispositions du +Contrat ne saurait entraîner la nullité de l'ensemble du Contrat. + + + 11.5 LANGUE + +Le Contrat est rédigé en langue française et en langue anglaise, ces +deux versions faisant également foi. + + + Article 12 - NOUVELLES VERSIONS DU CONTRAT + +12.1 Toute personne est autorisée à copier et distribuer des copies de +ce Contrat. + +12.2 Afin d'en préserver la cohérence, le texte du Contrat est protégé +et ne peut être modifié que par les auteurs de la licence, lesquels se +réservent le droit de publier périodiquement des mises à jour ou de +nouvelles versions du Contrat, qui posséderont chacune un numéro +distinct. Ces versions ultérieures seront susceptibles de prendre en +compte de nouvelles problématiques rencontrées par les logiciels libres. + +12.3 Tout Logiciel diffusé sous une version donnée du Contrat ne pourra +faire l'objet d'une diffusion ultérieure que sous la même version du +Contrat ou une version postérieure, sous réserve des dispositions de +l'article 5.3.4 <#compatibilite>. + + + Article 13 - LOI APPLICABLE ET COMPETENCE TERRITORIALE + +13.1 Le Contrat est régi par la loi française. Les Parties conviennent +de tenter de régler à l'amiable les différends ou litiges qui +viendraient à se produire par suite ou à l'occasion du Contrat. + +13.2 A défaut d'accord amiable dans un délai de deux (2) mois à compter +de leur survenance et sauf situation relevant d'une procédure d'urgence, +les différends ou litiges seront portés par la Partie la plus diligente +devant les Tribunaux compétents de Paris. diff --git a/README.md b/README.md new file mode 100644 index 0000000..78ceadf --- /dev/null +++ b/README.md @@ -0,0 +1,117 @@ +# ❄️ infrastructure + +The dgnum infrastructure. + +# Contributing + +Some instruction on how to contribute are available (in french) in [/CONTRIBUTE.md](CONTRIBUTE.md). +You're expected to read this document before commiting to the repo. + +Some documentation for the development tools are provided in the aforementioned file. + +# Using the binary cache + +Add the following module to your configuration (and pin this repo using your favorite tool: npins, lon, etc...): +``` +{ lib, ... }: +let + dgnum-infra = PINNED_PATH_TO_INFRA; +in { + nix.settings = (import dgnum-infra { }).mkCacheSettings { + caches = [ "infra" ]; + }; +} +``` + + +# Adding a new machine + +The first step is to create a minimal viable NixOS host, using tha means necessary. +The second step is to find a name for this host, it must be unique from the other hosts. + +> [!TIP] +> For the rest of this part, we assume that the host is named `host02` + +## Download the keys + +The public SSH keys of `host02` have to be saved to `keys`, preferably only the `ssh-ed25519` one. + +It can be retreived with : + +```bash +ssh-keyscan address.of.host02 2>/dev/null | awk '/ssh-ed25519/ {print $2,$3}' +``` + +## Initialize the machine folder and configuration + +- Create a folder `host02` under `machines/` +- Copy the hardware configuration file generated by `nixos-generate-config` to `machines/host02/_hardware-configuration.nix` +- Create a `machines/host02/_configuration.nix` file, it will contain the main configuration options, the basic content of this file should be the following + +```nix +{ lib, ... }: + +lib.extra.mkConfig { + enabledModules = [ + # List of modules to enable + ]; + + enabledServices = [ + # List of services to enable + ]; + + extraConfig = { + services.netbird.enable = true; + }; + + root = ./.; +} +``` + +## Fill in the metadata + +### Network configuration + +The network is declared in `meta/network.nix`, the necessary `hostId` value can be generated with : + +```bash +head -c4 /dev/urandom | od -A none -t x4 | sed 's/ //' +``` + +### Other details + +The general metadata is declared in `meta/nodes.nix`, the main values to declare are : + +- `site`, where the node is physically located +- `stateVersion` +- `nixpkgs`, the nixpkgs version to use + +## Initialize secrets + +Create the directory `secrets` in the configuration folder, and add a `secrets.nix` file containing : + +```nix +(import ../../../keys).mkSecrets [ "host02" ] [ + # List of secrets for host02 +] +``` + +This will be used for future secret management. + +## Update encrypted files + +Both the Arkheon, Netbox and notification modules have secrets that are deployed on all machines. To make those services work correctly, run in `modules/dgn-records`, `modules/dgn-netbox-agent` and `modules/dgn-notify` : + +```bash +agenix -r +``` + +## Commit and create a PR + +Once all of this is done, check that the configuration builds correctly : + +```bash +colmena build --on host02 +``` + +Apply it, and create a Pull Request. diff --git a/default.nix b/default.nix new file mode 100644 index 0000000..414feb8 --- /dev/null +++ b/default.nix @@ -0,0 +1,113 @@ +/* + Copyright : + - Maurice Debray 2023 + - Tom Hubrecht 2023 + + Ce logiciel est un programme informatique servant à déployer des + configurations de serveurs via NixOS. + + Ce logiciel est régi par la licence CeCILL soumise au droit français et + respectant les principes de diffusion des logiciels libres. Vous pouvez + utiliser, modifier et/ou redistribuer ce programme sous les conditions + de la licence CeCILL telle que diffusée par le CEA, le CNRS et l'INRIA + sur le site "http://www.cecill.info". + + En contrepartie de l'accessibilité au code source et des droits de copie, + de modification et de redistribution accordés par cette licence, il n'est + offert aux utilisateurs qu'une garantie limitée. Pour les mêmes raisons, + seule une responsabilité restreinte pèse sur l'auteur du programme, le + titulaire des droits patrimoniaux et les concédants successifs. + + A cet égard l'attention de l'utilisateur est attirée sur les risques + associés au chargement, à l'utilisation, à la modification et/ou au + développement et à la reproduction du logiciel par l'utilisateur étant + donné sa spécificité de logiciel libre, qui peut le rendre complexe à + manipuler et qui le réserve donc à des développeurs et des professionnels + avertis possédant des connaissances informatiques approfondies. Les + utilisateurs sont donc invités à charger et tester l'adéquation du + logiciel à leurs besoins dans des conditions permettant d'assurer la + sécurité de leurs systèmes et ou de leurs données et, plus généralement, + à l'utiliser et l'exploiter dans les mêmes conditions de sécurité. + + Le fait que vous puissiez accéder à cet en-tête signifie que vous avez + pris connaissance de la licence CeCILL, et que vous en avez accepté les + termes. +*/ + +{ + sources ? import ./npins, + pkgs ? import sources.nixpkgs { }, + nix-pkgs ? import sources.nix-pkgs { inherit pkgs; }, +}: + +let + git-checks = (import (builtins.storePath sources.git-hooks)).run { + src = ./.; + + hooks = { + statix = { + enable = true; + stages = [ "pre-push" ]; + settings.ignore = [ + "**/lon.nix" + "**/npins" + ]; + }; + + deadnix = { + enable = true; + stages = [ "pre-push" ]; + }; + + nixfmt-rfc-style = { + enable = true; + stages = [ "pre-push" ]; + }; + + commitizen.enable = true; + }; + }; +in + +{ + nodes = builtins.mapAttrs ( + host: { site, ... }: "${host}.${site}.infra.dgnum.eu" + ) (import ./meta/nodes.nix); + + dns = import ./meta/dns.nix; + + mkCacheSettings = import ./machines/storage01/tvix-cache/cache-settings.nix; + + shells = { + default = pkgs.mkShell { + name = "dgnum-infra"; + + packages = [ + (pkgs.nixos-generators.overrideAttrs (_: { + version = "1.8.0-unstable"; + src = builtins.storePath sources.nixos-generators; + })) + pkgs.npins + + (pkgs.callPackage ./lib/colmena { inherit (nix-pkgs) colmena; }) + (pkgs.callPackage "${sources.agenix}/pkgs/agenix.nix" { }) + (pkgs.callPackage "${sources.lon}/nix/packages/lon.nix" { }) + + ] ++ (import ./scripts { inherit pkgs; }); + + shellHook = '' + ${git-checks.shellHook} + ''; + + preferLocalBuild = true; + }; + + pre-commit = pkgs.mkShell { + name = "pre-commit-shell"; + + shellHook = '' + ${git-checks.shellHook} + ''; + }; + }; +} diff --git a/hive.nix b/hive.nix new file mode 100644 index 0000000..906c5fa --- /dev/null +++ b/hive.nix @@ -0,0 +1,115 @@ +let + sources' = import ./npins; + infrastructure_sources = import "${sources'.infrastructure}/npins"; + sources_merged = infrastructure_sources // sources'; + + # Patch sources directly + sources = builtins.mapAttrs (patch.base { pkgs = import sources'.nixos-unstable { }; }) + .applyPatches' sources_merged; + + nix-lib = import ./lib/nix-lib; + + patch = import ./lib/nix-patches { patchFile = ./patches; }; + + nodes' = import ./meta/nodes.nix; + nodes = builtins.attrNames nodes'; + + mkNode = node: { + # Import the base configuration for each node + imports = [ ./machines/${node}/_configuration.nix ]; + }; + + nixpkgs' = import ./meta/nixpkgs.nix; + # All supported nixpkgs versions, instanciated + nixpkgs = nix-lib.mapSingleFuse mkNixpkgs nixpkgs'.supported; + + # Get the configured nixos version for the node, + # defaulting to the one defined in meta/nixpkgs + version = node: nodes'.${node}.nixpkgs or nixpkgs'.default; + + # Builds a patched version of nixpkgs, only as the source + mkNixpkgs' = + v: + patch.mkNixpkgsSrc rec { + src = sources'.${name}; + name = "nixos-${v}"; + }; + + # Instanciates the required nixpkgs version + mkNixpkgs = version: import (mkNixpkgs' version) { }; + + ### + # Function to create arguments based on the node + # + mkArgs = node: rec { + lib = nixpkgs.${version node}.lib // { + extra = nix-lib; + }; + + meta = (import ./meta) lib; + + nodeMeta = meta.nodes.${node}; + }; +in + +{ + meta = { + nodeNixpkgs = nix-lib.mapSingleFuse (n: nixpkgs.${version n}) nodes; + + specialArgs = { + inherit nixpkgs sources; + + dgn-keys = import ./keys; + }; + + nodeSpecialArgs = nix-lib.mapSingleFuse mkArgs nodes; + }; + + defaults = + { + pkgs, + name, + nodeMeta, + ... + }: + { + # Import the default modules + imports = [ + "${sources.infrastructure}/modules" + (import "${sources.lix-module}/module.nix" { + lix = pkgs.applyPatches { + name = "lix-2.90.patched"; + src = sources.lix; + patches = [ ./patches/00-disable-installChecks-lix.patch ]; + }; + }) + ]; + + # Include default secrets + age-secrets.sources = [ ./machines/${name}/secrets ]; + + # Deployment config is specified in meta.nodes.${node}.deployment + inherit (nodeMeta) deployment; + + nix = { + # Set NIX_PATH to the patched version of nixpkgs + nixPath = [ "nixpkgs=${mkNixpkgs' (version name)}" ]; + optimise.automatic = true; + + gc = { + automatic = true; + dates = "weekly"; + options = "--delete-older-than 7d"; + }; + }; + + # Allow unfree packages + nixpkgs.config.allowUnfree = true; + + # Use the stateVersion declared in the metadata + system = { + inherit (nodeMeta) stateVersion; + }; + }; +} +// (nix-lib.mapSingleFuse mkNode nodes) diff --git a/iso/build-iso.sh b/iso/build-iso.sh new file mode 100755 index 0000000..a210aad --- /dev/null +++ b/iso/build-iso.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +NIXPKGS=$(nix-build --no-out-link nixpkgs.nix) + +nixos-generate -c configuration.nix -I NIX_PATH="$NIXPKGS" -f install-iso diff --git a/iso/configuration.nix b/iso/configuration.nix new file mode 100644 index 0000000..e7cabf8 --- /dev/null +++ b/iso/configuration.nix @@ -0,0 +1,38 @@ +{ lib, pkgs, ... }: + +let + dgn-keys = import ../keys; + + dgn-members = (import ../meta lib).organization.groups.root; +in + +{ + imports = [ ./dgn-install ]; + + boot = { + blacklistedKernelModules = [ "snd_pcsp" ]; + kernelPackages = pkgs.linuxPackages_latest; + tmp.cleanOnBoot = true; + + loader = { + systemd-boot.enable = true; + efi.canTouchEfiVariables = true; + }; + + supportedFilesystems = [ + "exfat" + "zfs" + "bcachefs" + ]; + + swraid.enable = lib.mkForce false; + }; + + console.keyMap = "fr"; + + services = { + openssh.enable = true; + }; + + users.users.root.openssh.authorizedKeys.keys = dgn-keys.getKeys dgn-members; +} diff --git a/iso/dgn-install/README.md b/iso/dgn-install/README.md new file mode 100644 index 0000000..32f2f2f --- /dev/null +++ b/iso/dgn-install/README.md @@ -0,0 +1 @@ +Script pour installer automatiquement NixOS sur les machines de la DGNum diff --git a/iso/dgn-install/default.nix b/iso/dgn-install/default.nix new file mode 100644 index 0000000..3028540 --- /dev/null +++ b/iso/dgn-install/default.nix @@ -0,0 +1,20 @@ +{ pkgs, ... }: + +let + installScript = pkgs.writeShellApplication { + name = "dgn-install"; + + runtimeInputs = with pkgs; [ + coreutils + gnused + nixos-install-tools + zfs + ]; + + text = builtins.readFile ./dgn-install.sh; + }; +in + +{ + environment.systemPackages = [ installScript ]; +} diff --git a/iso/dgn-install/dgn-install.sh b/iso/dgn-install/dgn-install.sh new file mode 100644 index 0000000..ae7d179 --- /dev/null +++ b/iso/dgn-install/dgn-install.sh @@ -0,0 +1,149 @@ +bootDevice= +rootDevice= + +domain="par01.infra.dgnum.eu" +hostname="dgn0x" + +hasZFS= + +while [ "$#" -gt 0 ]; do + i="$1" + shift 1 + case "$i" in + --root) + rootDevice="$1" + shift 1 + ;; + --boot) + bootDevice="$1" + shift 1 + ;; + --swap) + swapDevice="$1" + shift 1 + ;; + --domain) + domain="$1" + shift 1 + ;; + --hostname) + hostname="$1" + shift 1 + ;; + --with-zfs) + hasZFS="1" + ;; + *) + echo "$0: unknown option \`$i'" + exit 1 + ;; + esac +done + +if [ -z "$bootDevice" ]; then + echo "Missing boot partition" + exit 1 +fi + +if [ -z "$rootDevice" ]; then + echo "Missing root partition" + exit 1 +fi + +# Mount the partitions to where they should be +mount "$rootDevice" /mnt +mkdir /mnt/boot + +mount "$bootDevice" /mnt/boot + +if [ -n "$swapDevice" ]; then + swapon "$swapDevice" +fi + +# Generate configration +nixos-generate-config --root /mnt + +NIX="/mnt/etc/nixos/" + +# Setup our own files +mv $NIX/configuration.nix $NIX/base-configuration.nix + +cat < $NIX/dgnum-server.nix +{ ... }: { + services.nscd.enableNsncd = false; + programs.bash.promptInit = '' + # Provide a nice prompt if the terminal supports it. + if [ "\$TERM" != "dumb" ] || [ -n "\$INSIDE_EMACS" ]; then + PROMPT_COLOR="1;31m" + ((UID)) && PROMPT_COLOR="1;32m" + if [ -n "\$INSIDE_EMACS" ] || [ "\$TERM" = "eterm" ] || [ "\$TERM" = "eterm-color" ]; then + # Emacs term mode doesn't support xterm title escape sequence (\e]0;) + PS1="\n\[\033[\$PROMPT_COLOR\][\u@\$(hostname -f):\w]\\\$\[\033[0m\] " + else + PS1="\n\[\033[\$PROMPT_COLOR\][\[\e]0;\u@\H: \w\a\]\u@\$(hostname -f):\w]\\\$\[\033[0m\] " + fi + if test "\$TERM" = "xterm"; then + PS1="\[\033]2;\$(hostname -f):\u:\w\007\]\$PS1" + fi + fi + ''; +} +EOF + +cat < $NIX/configuration.nix +{ pkgs, ... }: { + imports = [ + ./base-configuration.nix + ./dgnum-server.nix + $(if [ -n "$hasZFS" ]; then echo './zfs.nix'; fi) + ]; + + boot.tmp.cleanOnBoot = true; + + console.keyMap = "fr"; + + time.timeZone = "Europe/Paris"; + + environment.systemPackages = with pkgs; [ + vim + wget + kitty.terminfo + ]; + + networking = { + hostName = "$hostname"; + domain = "$domain"; + }; + + # Activate SSH and set the keys + services.openssh = { + enable = true; + + settings.PasswordAuthentication = false; + }; + + users.users.root.openssh.authorizedKeys.keyFiles = [ ./rootKeys ]; +} +EOF + +if [ -n "$hasZFS" ]; then + cat < $NIX/zfs.nix +{ ... }: { + boot = { + supportedFilesystems = [ "zfs" ]; + zfs.forceImportRoot = false; + zfs.extraPools = [ + $(zpool list -Ho name | sed 's/^/"/;s/$/"/') + ]; + }; + + networking.hostId = "$(head -c4 /dev/urandom | od -A none -t x4 | sed 's/ //')"; +} +EOF +fi + +# Copy the keys +cp /etc/ssh/authorized_keys.d/root $NIX/rootKeys + +# Perform the installation +nixos-install diff --git a/iso/nixpkgs.nix b/iso/nixpkgs.nix new file mode 100644 index 0000000..c859113 --- /dev/null +++ b/iso/nixpkgs.nix @@ -0,0 +1,9 @@ +let + version = (import ../meta/nixpkgs.nix).default; + nixpkgs = (import ../npins)."nixos-${version}"; +in + +(import nixpkgs { }).srcOnly { + name = "nixpkgs-for-iso"; + src = nixpkgs; +} diff --git a/keys/default.nix b/keys/default.nix new file mode 100644 index 0000000..81dbd3f --- /dev/null +++ b/keys/default.nix @@ -0,0 +1,39 @@ +let + _sources = import ../npins; + + meta = import ../meta (import _sources.nixpkgs { }).lib; + + getAttr = flip builtins.getAttr; + + inherit (import ../lib/nix-lib) flip setDefault unique; +in + +rec { + # WARNING: When updating this list, make sure that the nodes and members are alphabetically sorted + # If not, you will face an angry maintainer + _keys = (import "${_sources.infrastructure}/keys")._keys // { + krz01 = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP4o65gWOgNrxbSd3kiQIGZUM+YD6kuZOQtblvzUGsfB" ]; + }; + + getKeys = ls: builtins.concatLists (builtins.map (getAttr _keys) ls); + + mkSecrets = + nodes: setDefault { publicKeys = unique (rootKeys ++ (builtins.concatMap getNodeKeys' nodes)); }; + + getNodeKeys' = + node: + let + names = builtins.foldl' (names: group: names ++ meta.organization.groups.${group}) ( + meta.nodes.${node}.admins ++ [ node ] + ) meta.nodes.${node}.adminGroups; + in + unique (getKeys names); + + getNodeKeys = node: rootKeys ++ getNodeKeys' node; + + # List of keys for the root group + rootKeys = getKeys meta.organization.groups.root; + + # List of 'machine' keys + machineKeys = rootKeys ++ (getKeys (builtins.attrNames meta.nodes)); +} diff --git a/lib/colmena/default.nix b/lib/colmena/default.nix new file mode 100644 index 0000000..b5c7f5d --- /dev/null +++ b/lib/colmena/default.nix @@ -0,0 +1,11 @@ +# Copyright: Jade Lovelace 2024 + +{ colmena, runCommandNoCC }: +runCommandNoCC "colmena-wrapper" { env.colmena = "${colmena}/bin/colmena"; } '' + mkdir -p $out + ln -s ${colmena}/share $out/share + mkdir $out/bin + + substituteAll ${./wrapper.sh.in} $out/bin/colmena + chmod +x $out/bin/colmena +'' diff --git a/lib/colmena/wrapper.sh.in b/lib/colmena/wrapper.sh.in new file mode 100644 index 0000000..4c1ba17 --- /dev/null +++ b/lib/colmena/wrapper.sh.in @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# +# Copyright: Jade Lovelace 2024 + +doChecks() { + # creates refs in the refs/prefetch/remotes/origin namespace + echo "Prefetching repo changes..." >&2 + git fetch --quiet --prefetch --no-write-fetch-head origin + + diffs=$(git rev-list --left-right --count HEAD...refs/prefetch/remotes/origin/main) + only_in_local=$(echo "$diffs" | cut -f1) + only_in_main=$(echo "$diffs" | cut -f2) + + if [[ $only_in_main -gt 0 && ! -v $FORCE_DEPLOY_DGNUM ]]; then + echo >&2 + echo "Attempting to deploy when main has $only_in_main commits not in your branch!" >&2 + echo "This will probably revert someone's changes. Consider merging them." >&2 + echo "If you really mean it, set the environment variable FORCE_DEPLOY_DGNUM" >&2 + exit 1 + fi + + if [[ $only_in_local -gt 0 ]]; then + echo "You have $only_in_local commits not yet pushed to main. Reminder to push them after :)" >&2 + fi +} + +if [[ $1 == 'apply' ]]; then + doChecks +fi + +exec @colmena@ "$@" diff --git a/lib/nix-lib/default.nix b/lib/nix-lib/default.nix new file mode 100644 index 0000000..39e446d --- /dev/null +++ b/lib/nix-lib/default.nix @@ -0,0 +1,197 @@ +# Copyright Tom Hubrecht, (2023) +# +# Tom Hubrecht +# +# This software is governed by the CeCILL license under French law and +# abiding by the rules of distribution of free software. You can use, +# modify and/ or redistribute the software under the terms of the CeCILL +# license as circulated by CEA, CNRS and INRIA at the following URL +# "http://www.cecill.info". +# +# As a counterpart to the access to the source code and rights to copy, +# modify and redistribute granted by the license, users are provided only +# with a limited warranty and the software's author, the holder of the +# economic rights, and the successive licensors have only limited +# liability. +# +# In this respect, the user's attention is drawn to the risks associated +# with loading, using, modifying and/or developing or reproducing the +# software by the user in light of its specific status of free software, +# that may mean that it is complicated to manipulate, and that also +# therefore means that it is reserved for developers and experienced +# professionals having in-depth computer knowledge. Users are therefore +# encouraged to load and test the software's suitability as regards their +# requirements in conditions enabling the security of their systems and/or +# data to be ensured and, more generally, to use and operate it in the +# same conditions as regards security. +# +# The fact that you are presently reading this means that you have had +# knowledge of the CeCILL license and that you accept its terms. + +let + # Reimplement optional functions + _optional = + default: b: value: + if b then value else default; +in + +rec { + inherit (import ./nixpkgs.nix) + flip + hasPrefix + recursiveUpdate + splitString + unique + ; + + /* + Fuses a list of attribute sets into a single attribute set. + + Type: [attrs] -> attrs + + Example: + x = [ { a = 1; } { b = 2; } ] + fuseAttrs x + => { a = 1; b = 2; } + */ + fuseAttrs = builtins.foldl' (attrs: x: attrs // x) { }; + + fuseValueAttrs = attrs: fuseAttrs (builtins.attrValues attrs); + + /* + Applies a function to `attrsList` before fusing the resulting list + of attribute sets. + + Type: ('a -> attrs) -> ['a] -> attrs + + Example: + x = [ "to" "ta" "ti" ] + f = s: { ${s} = s + s; } + mapFuse f x + => { to = "toto"; ta = "tata"; ti = "titi"; } + */ + mapFuse = + # 'a -> attrs + f: + # ['a] + attrsList: + fuseAttrs (builtins.map f attrsList); + + /* + Equivalent of lib.singleton but for an attribute set. + + Type: str -> 'a -> attrs + + Example: + singleAttr "a" 1 + => { a = 1; } + */ + singleAttr = name: value: { ${name} = value; }; + + # Enables a list of modules. + enableAttrs' = + enable: + mapFuse (m: { + ${m}.${enable} = true; + }); + + enableModules = enableAttrs' "enable"; + + /* + Create an attribute set from a list of values, mapping those + values through the function `f`. + + Example: + mapSingleFuse (x: "val-${x}") [ "a" "b" ] + => { a = "val-a"; b = "val-b" } + */ + mapSingleFuse = f: mapFuse (x: singleAttr x (f x)); + + /* + Creates a relative path as a string + + Type: path -> str -> path + + Example: + mkRel /home/test/ "file.txt" + => "/home/test/file.txt" + */ + mkRel = path: file: path + "/${file}"; + + setDefault = + default: + mapFuse (name: { + ${name} = default; + }); + + mkBaseSecrets = + root: + mapFuse (secret: { + ${secret}.file = mkRel root secret; + }); + + getSecrets = dir: builtins.attrNames (import (mkRel dir "secrets.nix")); + + subAttr = attrs: name: attrs.${name}; + + subAttrs = attrs: builtins.map (subAttr attrs); + + optionalList = _optional [ ]; + + optionalAttrs = _optional { }; + + optionalString = _optional ""; + /* + Same as fuseAttrs but using `lib.recursiveUpdate` to merge attribute + sets together. + + Type: [attrs] -> attrs + */ + recursiveFuse = builtins.foldl' recursiveUpdate { }; + + mkImport = + root: file: + let + path = mkRel root file; + in + path + (optionalString (!(builtins.pathExists path)) ".nix"); + + mkImports = root: builtins.map (mkImport root); + + /* + Creates a confugiration by merging enabled modules, + services and extraConfig. + + Example: + mkConfig { + enabledModules = [ "ht-defaults" ]; + enabledServices = [ "toto" ]; + extraConfig = { services.nginx.enable = true; }; + root = ./.; + } + => + { + imports = [ ./toto ]; + ht-defaults.enable = true; + services.nginx.enable = true; + } + */ + mkConfig = + { + # List of modules to enable with `enableModules` + enabledModules, + # List of services to import + enabledServices, + # Extra configuration, defaults to `{ }` + extraConfig ? { }, + # Path relative to which the enabled services will be imported + root, + }: + recursiveFuse [ + (enableModules enabledModules) + + { imports = mkImports root ([ "_hardware-configuration" ] ++ enabledServices); } + + extraConfig + ]; +} diff --git a/lib/nix-lib/nixpkgs.nix b/lib/nix-lib/nixpkgs.nix new file mode 100644 index 0000000..302a1b9 --- /dev/null +++ b/lib/nix-lib/nixpkgs.nix @@ -0,0 +1,416 @@ +### +# Collection of nixpkgs library functions, those are necessary for defining our own lib +# +# They have been simplified and builtins are used in some places, instead of lib shims. + +rec { + /** + Does the same as the update operator '//' except that attributes are + merged until the given predicate is verified. The predicate should + accept 3 arguments which are the path to reach the attribute, a part of + the first attribute set and a part of the second attribute set. When + the predicate is satisfied, the value of the first attribute set is + replaced by the value of the second attribute set. + + # Inputs + + `pred` + + : Predicate, taking the path to the current attribute as a list of strings for attribute names, and the two values at that path from the original arguments. + + `lhs` + + : Left attribute set of the merge. + + `rhs` + + : Right attribute set of the merge. + + # Type + + ``` + recursiveUpdateUntil :: ( [ String ] -> AttrSet -> AttrSet -> Bool ) -> AttrSet -> AttrSet -> AttrSet + ``` + + # Examples + :::{.example} + ## `lib.attrsets.recursiveUpdateUntil` usage example + + ```nix + recursiveUpdateUntil (path: l: r: path == ["foo"]) { + # first attribute set + foo.bar = 1; + foo.baz = 2; + bar = 3; + } { + #second attribute set + foo.bar = 1; + foo.quz = 2; + baz = 4; + } + + => { + foo.bar = 1; # 'foo.*' from the second set + foo.quz = 2; # + bar = 3; # 'bar' from the first set + baz = 4; # 'baz' from the second set + } + ``` + + ::: + */ + recursiveUpdateUntil = + pred: lhs: rhs: + let + f = + attrPath: + builtins.zipAttrsWith ( + n: values: + let + here = attrPath ++ [ n ]; + in + if builtins.length values == 1 || pred here (builtins.elemAt values 1) (builtins.head values) then + builtins.head values + else + f here values + ); + in + f [ ] [ + rhs + lhs + ]; + + /** + A recursive variant of the update operator ‘//’. The recursion + stops when one of the attribute values is not an attribute set, + in which case the right hand side value takes precedence over the + left hand side value. + + # Inputs + + `lhs` + + : Left attribute set of the merge. + + `rhs` + + : Right attribute set of the merge. + + # Type + + ``` + recursiveUpdate :: AttrSet -> AttrSet -> AttrSet + ``` + + # Examples + :::{.example} + ## `lib.attrsets.recursiveUpdate` usage example + + ```nix + recursiveUpdate { + boot.loader.grub.enable = true; + boot.loader.grub.device = "/dev/hda"; + } { + boot.loader.grub.device = ""; + } + + returns: { + boot.loader.grub.enable = true; + boot.loader.grub.device = ""; + } + ``` + + ::: + */ + recursiveUpdate = + lhs: rhs: + recursiveUpdateUntil ( + _: lhs: rhs: + !(builtins.isAttrs lhs && builtins.isAttrs rhs) + ) lhs rhs; + + /** + Determine whether a string has given prefix. + + # Inputs + + `pref` + : Prefix to check for + + `str` + : Input string + + # Type + + ``` + hasPrefix :: string -> string -> bool + ``` + + # Examples + :::{.example} + ## `lib.strings.hasPrefix` usage example + + ```nix + hasPrefix "foo" "foobar" + => true + hasPrefix "foo" "barfoo" + => false + ``` + + ::: + */ + hasPrefix = pref: str: (builtins.substring 0 (builtins.stringLength pref) str == pref); + + /** + Escape occurrence of the elements of `list` in `string` by + prefixing it with a backslash. + + # Inputs + + `list` + : 1\. Function argument + + `string` + : 2\. Function argument + + # Type + + ``` + escape :: [string] -> string -> string + ``` + + # Examples + :::{.example} + ## `lib.strings.escape` usage example + + ```nix + escape ["(" ")"] "(foo)" + => "\\(foo\\)" + ``` + + ::: + */ + escape = list: builtins.replaceStrings list (builtins.map (c: "\\${c}") list); + + /** + Convert a string `s` to a list of characters (i.e. singleton strings). + This allows you to, e.g., map a function over each character. However, + note that this will likely be horribly inefficient; Nix is not a + general purpose programming language. Complex string manipulations + should, if appropriate, be done in a derivation. + Also note that Nix treats strings as a list of bytes and thus doesn't + handle unicode. + + # Inputs + + `s` + : 1\. Function argument + + # Type + + ``` + stringToCharacters :: string -> [string] + ``` + + # Examples + :::{.example} + ## `lib.strings.stringToCharacters` usage example + + ```nix + stringToCharacters "" + => [ ] + stringToCharacters "abc" + => [ "a" "b" "c" ] + stringToCharacters "🦄" + => [ "�" "�" "�" "�" ] + ``` + + ::: + */ + stringToCharacters = s: builtins.genList (p: builtins.substring p 1 s) (builtins.stringLength s); + + /** + Turn a string `s` into an exact regular expression + + # Inputs + + `s` + : 1\. Function argument + + # Type + + ``` + escapeRegex :: string -> string + ``` + + # Examples + :::{.example} + ## `lib.strings.escapeRegex` usage example + + ```nix + escapeRegex "[^a-z]*" + => "\\[\\^a-z]\\*" + ``` + + ::: + */ + escapeRegex = escape (stringToCharacters "\\[{()^$?*+|."); + + /** + Appends string context from string like object `src` to `target`. + + :::{.warning} + This is an implementation + detail of Nix and should be used carefully. + ::: + + Strings in Nix carry an invisible `context` which is a list of strings + representing store paths. If the string is later used in a derivation + attribute, the derivation will properly populate the inputDrvs and + inputSrcs. + + # Inputs + + `src` + : The string to take the context from. If the argument is not a string, + it will be implicitly converted to a string. + + `target` + : The string to append the context to. If the argument is not a string, + it will be implicitly converted to a string. + + # Type + + ``` + addContextFrom :: string -> string -> string + ``` + + # Examples + :::{.example} + ## `lib.strings.addContextFrom` usage example + + ```nix + pkgs = import { }; + addContextFrom pkgs.coreutils "bar" + => "bar" + ``` + + The context can be displayed using the `toString` function: + + ```nix + nix-repl> builtins.getContext (lib.strings.addContextFrom pkgs.coreutils "bar") + { + "/nix/store/m1s1d2dk2dqqlw3j90jl3cjy2cykbdxz-coreutils-9.5.drv" = { ... }; + } + ``` + + ::: + */ + addContextFrom = src: target: builtins.substring 0 0 src + target; + + /** + Cut a string with a separator and produces a list of strings which + were separated by this separator. + + # Inputs + + `sep` + : 1\. Function argument + + `s` + : 2\. Function argument + + # Type + + ``` + splitString :: string -> string -> [string] + ``` + + # Examples + :::{.example} + ## `lib.strings.splitString` usage example + + ```nix + splitString "." "foo.bar.baz" + => [ "foo" "bar" "baz" ] + splitString "/" "/usr/local/bin" + => [ "" "usr" "local" "bin" ] + ``` + + ::: + */ + splitString = + sep: s: + let + splits = builtins.filter builtins.isString ( + builtins.split (escapeRegex (builtins.toString sep)) (builtins.toString s) + ); + in + builtins.map (addContextFrom s) splits; + + /** + Remove duplicate elements from the `list`. O(n^2) complexity. + + # Inputs + + `list` + + : Input list + + # Type + + ``` + unique :: [a] -> [a] + ``` + + # Examples + :::{.example} + ## `lib.lists.unique` usage example + + ```nix + unique [ 3 2 3 4 ] + => [ 3 2 4 ] + ``` + + ::: + */ + unique = builtins.foldl' (acc: e: if builtins.elem e acc then acc else acc ++ [ e ]) [ ]; + + /** + Flip the order of the arguments of a binary function. + + # Inputs + + `f` + + : 1\. Function argument + + `a` + + : 2\. Function argument + + `b` + + : 3\. Function argument + + # Type + + ``` + flip :: (a -> b -> c) -> (b -> a -> c) + ``` + + # Examples + :::{.example} + ## `lib.trivial.flip` usage example + + ```nix + flip concat [1] [2] + => [ 2 1 ] + ``` + + ::: + */ + flip = + f: a: b: + f b a; +} diff --git a/lib/nix-patches/default.nix b/lib/nix-patches/default.nix new file mode 100644 index 0000000..887fe6b --- /dev/null +++ b/lib/nix-patches/default.nix @@ -0,0 +1,110 @@ +# Copyright Tom Hubrecht, (2023-2024) +# +# Tom Hubrecht +# +# This software is governed by the CeCILL license under French law and +# abiding by the rules of distribution of free software. You can use, +# modify and/ or redistribute the software under the terms of the CeCILL +# license as circulated by CEA, CNRS and INRIA at the following URL +# "http://www.cecill.info". +# +# As a counterpart to the access to the source code and rights to copy, +# modify and redistribute granted by the license, users are provided only +# with a limited warranty and the software's author, the holder of the +# economic rights, and the successive licensors have only limited +# liability. +# +# In this respect, the user's attention is drawn to the risks associated +# with loading, using, modifying and/or developing or reproducing the +# software by the user in light of its specific status of free software, +# that may mean that it is complicated to manipulate, and that also +# therefore means that it is reserved for developers and experienced +# professionals having in-depth computer knowledge. Users are therefore +# encouraged to load and test the software's suitability as regards their +# requirements in conditions enabling the security of their systems and/or +# data to be ensured and, more generally, to use and operate it in the +# same conditions as regards security. +# +# The fact that you are presently reading this means that you have had +# knowledge of the CeCILL license and that you accept its terms. + +{ + patchFile, + excludeGitHubManual ? true, + fetchers ? { }, +}: + +rec { + base = + { pkgs }: + rec { + mkUrlPatch = + attrs: + pkgs.fetchpatch ( + { + hash = pkgs.lib.fakeHash; + } + // attrs + // (pkgs.lib.optionalAttrs (excludeGitHubManual && !(builtins.hasAttr "includes" attrs)) { + excludes = (attrs.excludes or [ ]) ++ [ "nixos/doc/manual/*" ]; + }) + ); + + mkGitHubPatch = + { id, ... }@attrs: + mkUrlPatch ( + (builtins.removeAttrs attrs [ "id" ]) + // { + url = "https://github.com/NixOS/nixpkgs/pull/${builtins.toString id}.diff"; + } + ); + + mkCommitPatch = + { sha, ... }@attrs: + mkUrlPatch ( + (builtins.removeAttrs attrs [ "sha" ]) + // { + url = "https://github.com/NixOS/nixpkgs/commit/${builtins.toString sha}.diff"; + } + ); + + patchFunctions = { + commit = mkCommitPatch; + github = mkGitHubPatch; + remote = pkgs.fetchpatch; + static = attrs: attrs.path; + url = mkUrlPatch; + } // fetchers; + + mkPatch = + { + _type ? "github", + ... + }@attrs: + if builtins.hasAttr _type patchFunctions then + patchFunctions.${_type} (builtins.removeAttrs attrs [ "_type" ]) + else + throw "Unknown patch type: ${builtins.toString _type}."; + + mkPatches = v: builtins.map mkPatch ((import patchFile).${v} or [ ]); + + applyPatches = + { + src, + name, + patches ? mkPatches name, + }: + if patches == [ ] then + src + else + pkgs.applyPatches { + inherit patches src; + + name = "${name}-patched"; + }; + + applyPatches' = name: src: applyPatches { inherit name src; }; + }; + + mkNixpkgsSrc = { src, name }: (base { pkgs = import src { }; }).applyPatches { inherit src name; }; +} diff --git a/machines/krz01/K80-support.patch b/machines/krz01/K80-support.patch new file mode 100644 index 0000000..d8f0c3a --- /dev/null +++ b/machines/krz01/K80-support.patch @@ -0,0 +1,179 @@ +From 2abd226ff3093c5a9e18a618fba466853e7ebaf7 Mon Sep 17 00:00:00 2001 +From: Raito Bezarius +Date: Tue, 8 Oct 2024 18:27:41 +0200 +Subject: [PATCH] K80 support + +Signed-off-by: Raito Bezarius +--- + docs/development.md | 6 +++- + docs/gpu.md | 1 + + gpu/amd_linux.go | 6 +++- + gpu/gpu.go | 63 ++++++++++++++++++++++++++++++++++++----- + scripts/build_docker.sh | 2 +- + scripts/build_linux.sh | 2 +- + 6 files changed, 69 insertions(+), 11 deletions(-) + +diff --git a/docs/development.md b/docs/development.md +index 2f7b9ecf..9da35931 100644 +--- a/docs/development.md ++++ b/docs/development.md +@@ -51,7 +51,11 @@ Typically the build scripts will auto-detect CUDA, however, if your Linux distro + or installation approach uses unusual paths, you can specify the location by + specifying an environment variable `CUDA_LIB_DIR` to the location of the shared + libraries, and `CUDACXX` to the location of the nvcc compiler. You can customize +-a set of target CUDA architectures by setting `CMAKE_CUDA_ARCHITECTURES` (e.g. "50;60;70") ++a set of target CUDA architectures by setting `CMAKE_CUDA_ARCHITECTURES` (e.g. "35;37;50;60;70") ++ ++To support GPUs older than Compute Capability 5.0, you will need to use an older version of ++the Driver from [Unix Driver Archive](https://www.nvidia.com/en-us/drivers/unix/) (tested with 470) and [CUDA Toolkit Archive](https://developer.nvidia.com/cuda-toolkit-archive) (tested with cuda V11). When you build Ollama, you will need to set two environment variable to adjust the minimum compute capability Ollama supports via `export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/gpu.CudaComputeMajorMin=3\" \"-X=github.com/ollama/ollama/gpu.CudaComputeMinorMin=5\"'"` and the `CMAKE_CUDA_ARCHITECTURES`. To find the Compute Capability of your older GPU, refer to [GPU Compute Capability](https://developer.nvidia.com/cuda-gpus). ++ + + Then generate dependencies: + +diff --git a/docs/gpu.md b/docs/gpu.md +index a6b559f0..66627611 100644 +--- a/docs/gpu.md ++++ b/docs/gpu.md +@@ -28,6 +28,7 @@ Check your compute compatibility to see if your card is supported: + | 5.0 | GeForce GTX | `GTX 750 Ti` `GTX 750` `NVS 810` | + | | Quadro | `K2200` `K1200` `K620` `M1200` `M520` `M5000M` `M4000M` `M3000M` `M2000M` `M1000M` `K620M` `M600M` `M500M` | + ++For building locally to support older GPUs, see [developer.md](./development.md#linux-cuda-nvidia) + + ### GPU Selection + +diff --git a/gpu/amd_linux.go b/gpu/amd_linux.go +index 6b08ac2e..768fb97a 100644 +--- a/gpu/amd_linux.go ++++ b/gpu/amd_linux.go +@@ -159,7 +159,11 @@ func AMDGetGPUInfo() []GpuInfo { + return []GpuInfo{} + } + +- if int(major) < RocmComputeMin { ++ minVer, err := strconv.Atoi(RocmComputeMajorMin) ++ if err != nil { ++ slog.Error("invalid RocmComputeMajorMin setting", "value", RocmComputeMajorMin, "error", err) ++ } ++ if int(major) < minVer { + slog.Warn(fmt.Sprintf("amdgpu too old gfx%d%x%x", major, minor, patch), "gpu", gpuID) + continue + } +diff --git a/gpu/gpu.go b/gpu/gpu.go +index 781e23df..60d68c33 100644 +--- a/gpu/gpu.go ++++ b/gpu/gpu.go +@@ -16,6 +16,7 @@ import ( + "os" + "path/filepath" + "runtime" ++ "strconv" + "strings" + "sync" + "unsafe" +@@ -38,9 +39,11 @@ const ( + var gpuMutex sync.Mutex + + // With our current CUDA compile flags, older than 5.0 will not work properly +-var CudaComputeMin = [2]C.int{5, 0} ++// (string values used to allow ldflags overrides at build time) ++var CudaComputeMajorMin = "5" ++var CudaComputeMinorMin = "0" + +-var RocmComputeMin = 9 ++var RocmComputeMajorMin = "9" + + // TODO find a better way to detect iGPU instead of minimum memory + const IGPUMemLimit = 1 * format.GibiByte // 512G is what they typically report, so anything less than 1G must be iGPU +@@ -175,11 +178,57 @@ func GetGPUInfo() GpuInfoList { + var memInfo C.mem_info_t + resp := []GpuInfo{} + +- // NVIDIA first +- for i := 0; i < gpuHandles.deviceCount; i++ { +- // TODO once we support CPU compilation variants of GPU libraries refine this... +- if cpuVariant == "" && runtime.GOARCH == "amd64" { +- continue ++ // Load ALL libraries ++ cHandles = initCudaHandles() ++ minMajorVer, err := strconv.Atoi(CudaComputeMajorMin) ++ if err != nil { ++ slog.Error("invalid CudaComputeMajorMin setting", "value", CudaComputeMajorMin, "error", err) ++ } ++ minMinorVer, err := strconv.Atoi(CudaComputeMinorMin) ++ if err != nil { ++ slog.Error("invalid CudaComputeMinorMin setting", "value", CudaComputeMinorMin, "error", err) ++ } ++ ++ // NVIDIA ++ for i := range cHandles.deviceCount { ++ if cHandles.cudart != nil || cHandles.nvcuda != nil { ++ gpuInfo := CudaGPUInfo{ ++ GpuInfo: GpuInfo{ ++ Library: "cuda", ++ }, ++ index: i, ++ } ++ var driverMajor int ++ var driverMinor int ++ if cHandles.cudart != nil { ++ C.cudart_bootstrap(*cHandles.cudart, C.int(i), &memInfo) ++ } else { ++ C.nvcuda_bootstrap(*cHandles.nvcuda, C.int(i), &memInfo) ++ driverMajor = int(cHandles.nvcuda.driver_major) ++ driverMinor = int(cHandles.nvcuda.driver_minor) ++ } ++ if memInfo.err != nil { ++ slog.Info("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err)) ++ C.free(unsafe.Pointer(memInfo.err)) ++ continue ++ } ++ ++ if int(memInfo.major) < minMajorVer || (int(memInfo.major) == minMajorVer && int(memInfo.minor) < minMinorVer) { ++ slog.Info(fmt.Sprintf("[%d] CUDA GPU is too old. Compute Capability detected: %d.%d", i, memInfo.major, memInfo.minor)) ++ continue ++ } ++ gpuInfo.TotalMemory = uint64(memInfo.total) ++ gpuInfo.FreeMemory = uint64(memInfo.free) ++ gpuInfo.ID = C.GoString(&memInfo.gpu_id[0]) ++ gpuInfo.Compute = fmt.Sprintf("%d.%d", memInfo.major, memInfo.minor) ++ gpuInfo.MinimumMemory = cudaMinimumMemory ++ gpuInfo.DependencyPath = depPath ++ gpuInfo.Name = C.GoString(&memInfo.gpu_name[0]) ++ gpuInfo.DriverMajor = driverMajor ++ gpuInfo.DriverMinor = driverMinor ++ ++ // TODO potentially sort on our own algorithm instead of what the underlying GPU library does... ++ cudaGPUs = append(cudaGPUs, gpuInfo) + } + gpuInfo := GpuInfo{ + Library: "cuda", +diff --git a/scripts/build_docker.sh b/scripts/build_docker.sh +index e91c56ed..c03bc25f 100755 +--- a/scripts/build_docker.sh ++++ b/scripts/build_docker.sh +@@ -3,7 +3,7 @@ + set -eu + + export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")} +-export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'" ++export GOFLAGS=${GOFLAGS:-"'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'"} + + # We use 2 different image repositories to handle combining architecture images into multiarch manifest + # (The ROCm image is x86 only and is not a multiarch manifest) +diff --git a/scripts/build_linux.sh b/scripts/build_linux.sh +index 27c4ff1f..e7e6d0dd 100755 +--- a/scripts/build_linux.sh ++++ b/scripts/build_linux.sh +@@ -3,7 +3,7 @@ + set -eu + + export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")} +-export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'" ++export GOFLAGS=${GOFLAGS:-"'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'"} + + BUILD_ARCH=${BUILD_ARCH:-"amd64 arm64"} + export AMDGPU_TARGETS=${AMDGPU_TARGETS:=""} +-- +2.46.0 + diff --git a/machines/krz01/_configuration.nix b/machines/krz01/_configuration.nix new file mode 100644 index 0000000..21a6f59 --- /dev/null +++ b/machines/krz01/_configuration.nix @@ -0,0 +1,79 @@ +{ + config, + lib, + pkgs, + meta, + name, + ... +}: + +lib.extra.mkConfig { + enabledModules = [ + # INFO: This list needs to stay sorted alphabetically + ]; + + enabledServices = [ + # INFO: This list needs to stay sorted alphabetically + # Machine learning API machine + "microvm-ml01" + "microvm-router01" + "nvidia-tesla-k80" + "proxmox" + ]; + + extraConfig = { + microvm = { + host.enable = true; + }; + dgn-hardware = { + useZfs = true; + zfsPools = [ + "dpool" + "ppool0" + ]; + }; + + services.netbird.enable = true; + + # We are going to use CUDA here. + nixpkgs.config.cudaSupport = true; + hardware.graphics.enable = true; + environment.systemPackages = [ + ((pkgs.openai-whisper-cpp.override { cudaPackages = pkgs.cudaPackages_11; }).overrideAttrs (old: { + src = pkgs.fetchFromGitHub { + owner = "ggerganov"; + repo = "whisper.cpp"; + rev = "v1.7.1"; + hash = "sha256-EDFUVjud79ZRCzGbOh9L9NcXfN3ikvsqkVSOME9F9oo="; + }; + env = { + WHISPER_CUBLAS = ""; + GGML_CUDA = "1"; + }; + # We only need Compute Capability 3.7. + CUDA_ARCH_FLAGS = [ "sm_37" ]; + # We are GPU-only anyway. + patches = (old.patches or [ ]) ++ [ + ./no-weird-microarch.patch + ./all-nvcc-arch.patch + ]; + })) + ]; + + services = { + ollama = { + enable = true; + host = meta.network.${name}.netbirdIp; + package = pkgs.callPackage ./ollama.nix { + cudaPackages = pkgs.cudaPackages_11; + # We need to thread our nvidia x11 driver for CUDA. + extraLibraries = [ config.hardware.nvidia.package ]; + }; + }; + }; + + networking.firewall.interfaces.wt0.allowedTCPPorts = [ config.services.ollama.port ]; + }; + + root = ./.; +} diff --git a/machines/krz01/_hardware-configuration.nix b/machines/krz01/_hardware-configuration.nix new file mode 100644 index 0000000..6b39e1a --- /dev/null +++ b/machines/krz01/_hardware-configuration.nix @@ -0,0 +1,50 @@ +{ + config, + lib, + modulesPath, + ... +}: + +{ + imports = [ (modulesPath + "/installer/scan/not-detected.nix") ]; + + boot = { + initrd = { + availableKernelModules = [ + "ehci_pci" + "ahci" + "mpt3sas" + "usbhid" + "sd_mod" + ]; + kernelModules = [ ]; + }; + kernelModules = [ "kvm-intel" ]; + extraModulePackages = [ ]; + }; + + fileSystems."/" = { + device = "/dev/disk/by-uuid/92bf4d66-2693-4eca-9b26-f86ae09d468d"; + fsType = "ext4"; + }; + + boot.initrd.luks.devices."mainfs" = { + device = "/dev/disk/by-uuid/26f9737b-28aa-4c3f-bd3b-b028283cef88"; + keyFileSize = 1; + keyFile = "/dev/zero"; + }; + + fileSystems."/boot" = { + device = "/dev/disk/by-uuid/280C-8844"; + fsType = "vfat"; + options = [ + "fmask=0022" + "dmask=0022" + ]; + }; + + swapDevices = [ ]; + + nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux"; + hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware; +} diff --git a/machines/krz01/all-nvcc-arch.patch b/machines/krz01/all-nvcc-arch.patch new file mode 100644 index 0000000..6696836 --- /dev/null +++ b/machines/krz01/all-nvcc-arch.patch @@ -0,0 +1,26 @@ +From 2278389ef9ac9231349440aa68f9544ddc69cdc7 Mon Sep 17 00:00:00 2001 +From: Raito Bezarius +Date: Wed, 9 Oct 2024 13:37:08 +0200 +Subject: [PATCH] fix: sm_37 for nvcc + +Signed-off-by: Raito Bezarius +--- + Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/Makefile b/Makefile +index 2ccb750..70dfd9b 100644 +--- a/Makefile ++++ b/Makefile +@@ -537,7 +537,7 @@ endif #GGML_CUDA_NVCC + ifdef CUDA_DOCKER_ARCH + MK_NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH) + else ifndef CUDA_POWER_ARCH +- MK_NVCCFLAGS += -arch=native ++ MK_NVCCFLAGS += -arch=sm_37 + endif # CUDA_DOCKER_ARCH + + ifdef GGML_CUDA_FORCE_DMMV +-- +2.46.0 + diff --git a/machines/krz01/disable-git.patch b/machines/krz01/disable-git.patch new file mode 100644 index 0000000..c305c48 --- /dev/null +++ b/machines/krz01/disable-git.patch @@ -0,0 +1,20 @@ +diff --git c/llm/generate/gen_common.sh i/llm/generate/gen_common.sh +index 3825c155..238a74a7 100644 +--- c/llm/generate/gen_common.sh ++++ i/llm/generate/gen_common.sh +@@ -69,6 +69,7 @@ git_module_setup() { + } + + apply_patches() { ++ return + # apply temporary patches until fix is upstream + for patch in ../patches/*.patch; do + git -c 'user.name=nobody' -c 'user.email=<>' -C ${LLAMACPP_DIR} am ${patch} +@@ -133,6 +134,7 @@ install() { + + # Keep the local tree clean after we're done with the build + cleanup() { ++ return + (cd ${LLAMACPP_DIR}/ && git checkout CMakeLists.txt) + + if [ -n "$(ls -A ../patches/*.diff)" ]; then diff --git a/machines/krz01/microvm-ml01.nix b/machines/krz01/microvm-ml01.nix new file mode 100644 index 0000000..b0a8be8 --- /dev/null +++ b/machines/krz01/microvm-ml01.nix @@ -0,0 +1,22 @@ +_: { + microvm.autostart = [ "ml01" ]; + microvm.vms.ml01 = { + config = { + networking.hostName = "ml01"; + microvm = { + hypervisor = "cloud-hypervisor"; + vcpu = 4; + mem = 4096; + balloonMem = 2048; + shares = [ + { + source = "/nix/store"; + mountPoint = "/nix/.ro-store"; + tag = "ro-store"; + proto = "virtiofs"; + } + ]; + }; + }; + }; +} diff --git a/machines/krz01/microvm-router01.nix b/machines/krz01/microvm-router01.nix new file mode 100644 index 0000000..9af9bdf --- /dev/null +++ b/machines/krz01/microvm-router01.nix @@ -0,0 +1,16 @@ +_: { + microvm.autostart = [ "router01" ]; + microvm.vms.router01 = { + config = { + networking.hostName = "router01"; + microvm.shares = [ + { + source = "/nix/store"; + mountPoint = "/nix/.ro-store"; + tag = "ro-store"; + proto = "virtiofs"; + } + ]; + }; + }; +} diff --git a/machines/krz01/no-weird-microarch.patch b/machines/krz01/no-weird-microarch.patch new file mode 100644 index 0000000..7a93b53 --- /dev/null +++ b/machines/krz01/no-weird-microarch.patch @@ -0,0 +1,34 @@ +From 51568b61ef63ecd97867562571411082c32751d3 Mon Sep 17 00:00:00 2001 +From: Raito Bezarius +Date: Wed, 9 Oct 2024 13:36:51 +0200 +Subject: [PATCH] fix: avx & f16c in Makefile + +Signed-off-by: Raito Bezarius +--- + Makefile | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/Makefile b/Makefile +index 32b7cbb..2ccb750 100644 +--- a/Makefile ++++ b/Makefile +@@ -361,12 +361,12 @@ ifndef RISCV + + ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64)) + # Use all CPU extensions that are available: +- MK_CFLAGS += -march=native -mtune=native +- HOST_CXXFLAGS += -march=native -mtune=native ++ # MK_CFLAGS += -march=native -mtune=native ++ # HOST_CXXFLAGS += -march=native -mtune=native + + # Usage AVX-only +- #MK_CFLAGS += -mfma -mf16c -mavx +- #MK_CXXFLAGS += -mfma -mf16c -mavx ++ MK_CFLAGS += -mf16c -mavx ++ MK_CXXFLAGS += -mf16c -mavx + + # Usage SSSE3-only (Not is SSE3!) + #MK_CFLAGS += -mssse3 +-- +2.46.0 + diff --git a/machines/krz01/nvidia-tesla-k80.nix b/machines/krz01/nvidia-tesla-k80.nix new file mode 100644 index 0000000..3d7f6ba --- /dev/null +++ b/machines/krz01/nvidia-tesla-k80.nix @@ -0,0 +1,8 @@ +{ config, ... }: +{ + nixpkgs.config.nvidia.acceptLicense = true; + # Tesla K80 is not supported by the latest driver. + hardware.nvidia.package = config.boot.kernelPackages.nvidia_x11_legacy470; + # Don't ask. + services.xserver.videoDrivers = [ "nvidia" ]; +} diff --git a/machines/krz01/ollama.nix b/machines/krz01/ollama.nix new file mode 100644 index 0000000..6e252c1 --- /dev/null +++ b/machines/krz01/ollama.nix @@ -0,0 +1,243 @@ +{ + lib, + buildGoModule, + fetchFromGitHub, + buildEnv, + linkFarm, + overrideCC, + makeWrapper, + stdenv, + addDriverRunpath, + nix-update-script, + + cmake, + gcc11, + clblast, + libdrm, + rocmPackages, + cudaPackages, + darwin, + autoAddDriverRunpath, + extraLibraries ? [ ], + + nixosTests, + testers, + ollama, + ollama-rocm, + ollama-cuda, + + config, + # one of `[ null false "rocm" "cuda" ]` + acceleration ? null, +}: + +assert builtins.elem acceleration [ + null + false + "rocm" + "cuda" +]; + +let + pname = "ollama"; + version = "2024-09-10-cc35"; + + src = fetchFromGitHub { + owner = "aliotard"; + repo = "ollama"; + rev = "34827c01f7723c7f5f9f5e392fe85f5a4a5d5fc0"; + hash = "sha256-xFNuqcW7YWeyCyw5QLBnCHHTSMITR6LJkJT0CXZC+Y8="; + fetchSubmodules = true; + }; + + vendorHash = "sha256-hSxcREAujhvzHVNwnRTfhi0MKI3s8HNavER2VLz6SYk="; + + validateFallback = lib.warnIf (config.rocmSupport && config.cudaSupport) (lib.concatStrings [ + "both `nixpkgs.config.rocmSupport` and `nixpkgs.config.cudaSupport` are enabled, " + "but they are mutually exclusive; falling back to cpu" + ]) (!(config.rocmSupport && config.cudaSupport)); + shouldEnable = + mode: fallback: (acceleration == mode) || (fallback && acceleration == null && validateFallback); + + rocmRequested = shouldEnable "rocm" config.rocmSupport; + cudaRequested = shouldEnable "cuda" config.cudaSupport; + + enableRocm = rocmRequested && stdenv.isLinux; + enableCuda = cudaRequested && stdenv.isLinux; + + rocmLibs = [ + rocmPackages.clr + rocmPackages.hipblas + rocmPackages.rocblas + rocmPackages.rocsolver + rocmPackages.rocsparse + rocmPackages.rocm-device-libs + rocmPackages.rocm-smi + ]; + rocmClang = linkFarm "rocm-clang" { llvm = rocmPackages.llvm.clang; }; + rocmPath = buildEnv { + name = "rocm-path"; + paths = rocmLibs ++ [ rocmClang ]; + }; + + cudaLibs = [ + cudaPackages.cuda_cudart + cudaPackages.libcublas + cudaPackages.cuda_cccl + ]; + cudaToolkit = buildEnv { + name = "cuda-merged"; + paths = map lib.getLib cudaLibs ++ [ + (lib.getOutput "static" cudaPackages.cuda_cudart) + (lib.getBin (cudaPackages.cuda_nvcc.__spliced.buildHost or cudaPackages.cuda_nvcc)) + ]; + }; + + metalFrameworks = with darwin.apple_sdk_11_0.frameworks; [ + Accelerate + Metal + MetalKit + MetalPerformanceShaders + ]; + + wrapperOptions = + [ + # ollama embeds llama-cpp binaries which actually run the ai models + # these llama-cpp binaries are unaffected by the ollama binary's DT_RUNPATH + # LD_LIBRARY_PATH is temporarily required to use the gpu + # until these llama-cpp binaries can have their runpath patched + "--suffix LD_LIBRARY_PATH : '${addDriverRunpath.driverLink}/lib'" + "--suffix LD_LIBRARY_PATH : '${lib.makeLibraryPath (map lib.getLib extraLibraries)}'" + ] + ++ lib.optionals enableRocm [ + "--suffix LD_LIBRARY_PATH : '${rocmPath}/lib'" + "--set-default HIP_PATH '${rocmPath}'" + ] + ++ lib.optionals enableCuda [ + "--suffix LD_LIBRARY_PATH : '${lib.makeLibraryPath (map lib.getLib cudaLibs)}'" + ]; + wrapperArgs = builtins.concatStringsSep " " wrapperOptions; + + goBuild = + if enableCuda then buildGoModule.override { stdenv = overrideCC stdenv gcc11; } else buildGoModule; + inherit (lib) licenses platforms maintainers; +in +goBuild { + inherit + pname + version + src + vendorHash + ; + + env = + lib.optionalAttrs enableRocm { + ROCM_PATH = rocmPath; + CLBlast_DIR = "${clblast}/lib/cmake/CLBlast"; + } + // lib.optionalAttrs enableCuda { CUDA_LIB_DIR = "${cudaToolkit}/lib"; } + // { + CMAKE_CUDA_ARCHITECTURES = "35;37"; + }; + + nativeBuildInputs = + [ cmake ] + ++ lib.optionals enableRocm [ rocmPackages.llvm.bintools ] + ++ lib.optionals enableCuda [ cudaPackages.cuda_nvcc ] + ++ lib.optionals (enableRocm || enableCuda) [ + makeWrapper + autoAddDriverRunpath + ] + ++ lib.optionals stdenv.isDarwin metalFrameworks; + + buildInputs = + lib.optionals enableRocm (rocmLibs ++ [ libdrm ]) + ++ lib.optionals enableCuda cudaLibs + ++ lib.optionals stdenv.isDarwin metalFrameworks; + + patches = [ + # disable uses of `git` in the `go generate` script + # ollama's build script assumes the source is a git repo, but nix removes the git directory + # this also disables necessary patches contained in `ollama/llm/patches/` + # those patches are applied in `postPatch` + ./disable-git.patch + ]; + + postPatch = '' + # replace inaccurate version number with actual release version + substituteInPlace version/version.go --replace-fail 0.0.0 '${version}' + + # apply ollama's patches to `llama.cpp` submodule + for diff in llm/patches/*; do + patch -p1 -d llm/llama.cpp < $diff + done + ''; + + overrideModAttrs = _: _: { + # don't run llama.cpp build in the module fetch phase + preBuild = ""; + }; + + preBuild = '' + # disable uses of `git`, since nix removes the git directory + export OLLAMA_SKIP_PATCHING=true + # build llama.cpp libraries for ollama + go generate ./... + ''; + + postFixup = + '' + # the app doesn't appear functional at the moment, so hide it + mv "$out/bin/app" "$out/bin/.ollama-app" + '' + + lib.optionalString (enableRocm || enableCuda) '' + # expose runtime libraries necessary to use the gpu + wrapProgram "$out/bin/ollama" ${wrapperArgs} + ''; + + ldflags = [ + "-s" + "-w" + "-X=github.com/ollama/ollama/version.Version=${version}" + "-X=github.com/ollama/ollama/server.mode=release" + "-X=github.com/ollama/ollama/gpu.CudaComputeMajorMin=3" + "-X=github.com/ollama/ollama/gpu.CudaComputeMinorMin=5" + ]; + + passthru = { + tests = + { + inherit ollama; + version = testers.testVersion { + inherit version; + package = ollama; + }; + } + // lib.optionalAttrs stdenv.isLinux { + inherit ollama-rocm ollama-cuda; + service = nixosTests.ollama; + service-cuda = nixosTests.ollama-cuda; + service-rocm = nixosTests.ollama-rocm; + }; + + updateScript = nix-update-script { }; + }; + + meta = { + description = + "Get up and running with large language models locally" + + lib.optionalString rocmRequested ", using ROCm for AMD GPU acceleration" + + lib.optionalString cudaRequested ", using CUDA for NVIDIA GPU acceleration"; + homepage = "https://github.com/ollama/ollama"; + changelog = "https://github.com/ollama/ollama/releases/tag/v${version}"; + license = licenses.mit; + platforms = if (rocmRequested || cudaRequested) then platforms.linux else platforms.unix; + mainProgram = "ollama"; + maintainers = with maintainers; [ + abysssol + dit7ya + elohmeier + roydubnium + ]; + }; +} diff --git a/machines/krz01/proxmox/default.nix b/machines/krz01/proxmox/default.nix new file mode 100644 index 0000000..2f501e1 --- /dev/null +++ b/machines/krz01/proxmox/default.nix @@ -0,0 +1,14 @@ +{ sources, lib, ... }: +let + proxmox-nixos = import sources.proxmox-nixos; +in +{ + imports = [ proxmox-nixos.nixosModules.proxmox-ve ]; + services.proxmox-ve.enable = true; + nixpkgs.overlays = [ proxmox-nixos.overlays.x86_64-linux ]; + networking.firewall = { + trustedInterfaces = [ "wt0" ]; + allowedTCPPorts = lib.mkForce [ 22 ]; + }; + +} diff --git a/machines/krz01/secrets/secrets.nix b/machines/krz01/secrets/secrets.nix new file mode 100644 index 0000000..45004b9 --- /dev/null +++ b/machines/krz01/secrets/secrets.nix @@ -0,0 +1,3 @@ +(import ../../../keys).mkSecrets [ "krz01" ] [ + # List of secrets for krz01 +] diff --git a/meta/README.md b/meta/README.md new file mode 100644 index 0000000..5446d5a --- /dev/null +++ b/meta/README.md @@ -0,0 +1,90 @@ +Metadata of the DGNum infrastructure +==================================== + +# DNS + +The DNS configuration of our infrastructure is completely defined with the metadata contained in this folder. + +The different machines have records pointing to their IP addresses when they exist: + +- $node.$site.infra.dgnum.eu points IN A $ipv4 +- $node.$site.infra.dgnum.eu points IN AAAA $ipv6 + +- v4.$node.$site.infra.dgnum.eu points IN A $ipv4 +- v6.$node.$site.infra.dgnum.eu points IN AAAA $ipv6 + +Then the services hosted on those machines can be accessed through redirections: + +- $service.dgnum.eu IN CNAME $node.$site.infra.dgnum.eu + +or, when targeting only a specific IP protocol: + +- $service4.dgnum.eu IN CNAME ipv4.$node.$site.infra.dgnum.eu +- $service6.dgnum.eu IN CNAME ipv6.$node.$site.infra.dgnum.eu + +Extra records exist for ns, mail configuration, or the main website but shouldn't change or be tinkered with. + +# Network + +The network configuration (except the NetBird vpn) is defined statically. + +TODO. + +# Nixpkgs + +Machines can use different versions of NixOS, the supported and default ones are specified here. + +## How to add a new version + +- Switch to a new branch `nixos-$VERSION` +- Run the following command + +```bash +npins add channel nixos-$VERSION +``` + +- Edit `meta/nixpkgs.nix` and add `$VERSION` to the supported version. +- Read the release notes and check for changes. +- Update the nodes versions +- Create a PR so that the CI check that it builds + + +# Nodes + +The nodes are declared statically, several options can be configured: + +- `deployment`, the colmena deployment option +- `stateVersion`, the state version of the node +- `nixpkgs`, the version of NixOS to use +- `admins`, the list of administrators specific to this node, they will be given root access +- `adminGroups`, a list of groups whose members will be added to `admins` +- `site`, the physical location of the node +- `vm-cluster`, the VM cluster hosting the node when appropriate + +Some options are set automatically, for example: + +- `deployment.targetHost` will be inferred from the network configuration +- `deployment.tags` will contain `infra-$site`, so that a full site can be redeployed at once + +# Organization + +The organization defines the groups and members of the infrastructure team, +one day this information will be synchronized in Kanidm. + +## Members + +For a member to be allowed access to a node, they must be defined in the `members` attribute set, +and their SSH keys must be available in the keys folder. + +## Groups + +Groups exist only to simplify the management of accesses: + +- The `root` group will be given administrator access on all nodes +- The `iso` group will have its keys included in the ISOs built from the iso folder + +Extra groups can be created at will, to be used in node-specific modules. + +# Module + +The meta configuration can be evaluated as a module, to perform checks on the structure of the data. diff --git a/meta/default.nix b/meta/default.nix new file mode 100644 index 0000000..cb622a2 --- /dev/null +++ b/meta/default.nix @@ -0,0 +1,17 @@ +### +# Metadata for the nodes. You can add custom attributes, they are +# accessible through the specialArg meta in the config. + +lib: + +(lib.evalModules { + modules = [ + ./options.nix + { + network = import ./network.nix; + nodes = import ./nodes.nix; + organization = import ./organization.nix; + } + ]; + class = "dgnumMeta"; +}).config diff --git a/meta/dns.nix b/meta/dns.nix new file mode 100644 index 0000000..8fa603e --- /dev/null +++ b/meta/dns.nix @@ -0,0 +1,142 @@ +{ lib, dns, ... }: + +let + inherit (lib) mapAttrs' nameValuePair; + inherit (lib.extra) fuseAttrs mapSingleFuse; + + inherit (dns.lib.combinators) mx spf ttl; + + meta = (import ./.) lib; + + mkCNAME = host: { CNAME = [ host ]; }; + + mkHosted = + server: + { + dual ? [ ], + v4 ? [ ], + v6 ? [ ], + }: + let + base = "${server}.${meta.nodes.${server}.site}.infra"; + mkHost = host: mapSingleFuse (_: mkCNAME host); + in + fuseAttrs [ + (mkHost base dual) + (mkHost "v4.${base}" v4) + (mkHost "v6.${base}" v6) + ]; + + cnames = builtins.mapAttrs (_: to: { CNAME = [ to ]; }) { + "dev" = "dev.pages.codeberg.page."; + "irc" = "public.p.lahfa.xyz."; + "webmail" = "kurisu.dual.lahfa.xyz."; + + # Transition to new site names + "web01.dmi01.infra" = "web01.rat01.infra"; + "web02.dmi01.infra" = "web02.rat01.infra"; + "compute01.par01.infra" = "compute01.pav01.infra"; + "storage01.par01.infra" = "storage01.pav01.infra"; + + # Miscelleanous redirections + "traque" = "traque.katvayor.net."; + + # Temporary redirection for the BDS + # FIXME: finish the django apps module + "gestiobds.dj" = "cof.ens.fr."; + }; + + hosted = fuseAttrs ( + builtins.attrValues ( + builtins.mapAttrs mkHosted + { + } + ) + ); + + kurisuDKIM = [ + { + selector = "kurisu"; + k = "rsa"; + s = [ "email" ]; + p = "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDa5KuK6ry+Ss2VsKL0FsDpoBlc7dcXZyp62fGqFJFJv4/GEivPWiwbr2o5oLKjQVI4kIYjIZsyQJFtI/Xcu4BrtDdBknb5WvCN8V9EvIMh3pfXOBLVx4oqw4BR7wF8Rw1J9xyfgsfK+m2n0M39XlMHH0Nuy6kU48jH9vYpZs17ZQIDAQAB"; + } + ]; +in + +{ + SOA = { + nameServer = "ns01.dgnum.eu."; + adminEmail = "dns.dgnum.eu"; + retry = 3600; + minimum = 300; + }; + + # Primary DNS servers + NS = [ + "ns01.dgnum.eu." # ns-03.hubrecht.ovh + "ns02.dgnum.eu." # kurisu.lahfa.xyz + ]; + + # dgnum.codeberg.pages + # ALIAS = [ "codeberg.page" ]; + A = [ "217.197.91.145" ]; + AAAA = [ "2001:67c:1401:20f0::1" ]; + + MX = map (ttl 3600) [ (mx.mx 10 "kurisu.lahfa.xyz.") ]; + + SRV = [ + { + service = "autodiscover"; + proto = "tcp"; + port = 443; + target = "autoconfig.mail.lahfa.xyz."; + } + ]; + + TXT = [ + "dgnum.codeberg.page" + (spf.strict [ "a:kurisu.lahfa.xyz" ]) + ]; + DMARC = [ { p = "none"; } ]; + DKIM = kurisuDKIM; + + subdomains = + hosted + // cnames + // { + ns01 = { + A = [ "51.178.27.125" ]; + AAAA = [ "2001:41d0:305:2100::542c" ]; + }; + ns02 = { + A = [ "163.172.69.160" ]; + AAAA = [ "2001:bc8:38ee::1" ]; + }; + } + // { + infra = { + MX = map (ttl 3600) [ (mx.mx 10 "kurisu.lahfa.xyz.") ]; + + TXT = [ (spf.strict [ "a:kurisu.lahfa.xyz" ]) ]; + DMARC = [ { p = "none"; } ]; + DKIM = kurisuDKIM; + + subdomains = mapAttrs' ( + host: + { site, ... }: + nameValuePair "${host}.${site}" ( + with meta.network.${host}.addresses; + { + A = ipv4; + AAAA = ipv6; + subdomains = { + v4.A = ipv4; + v6.AAAA = ipv6; + }; + } + ) + ) meta.nodes; + }; + }; +} diff --git a/meta/network.nix b/meta/network.nix new file mode 100644 index 0000000..6216bf2 --- /dev/null +++ b/meta/network.nix @@ -0,0 +1,207 @@ +{ + bridge01 = { + hostId = "f57f3ba0"; + + interfaces = { }; + netbirdIp = null; + }; + + compute01 = { + interfaces = { + eno1 = { + ipv4 = [ + { + address = "129.199.146.147"; + prefixLength = 24; + } + { + address = "192.168.1.147"; + prefixLength = 24; + } + ]; + + gateways = [ "129.199.146.254" ]; + enableDefaultDNS = true; + }; + }; + + hostId = "8df60941"; + netbirdIp = "100.80.75.197"; + }; + + krz01 = { + interfaces = { + eno1 = { + ipv4 = [ + { + address = "129.199.146.21"; + prefixLength = 24; + } + { + address = "192.168.1.145"; + prefixLength = 24; + } + ]; + + gateways = [ "129.199.146.254" ]; + enableDefaultDNS = true; + }; + }; + + hostId = "bd11e8fc"; + netbirdIp = "100.80.103.206"; + }; + + geo01 = { + interfaces = { + eno1 = { + ipv4 = [ + { + address = "129.199.210.194"; + prefixLength = 24; + } + ]; + + gateways = [ "129.199.210.254" ]; + + dns = [ + "129.199.96.11" + "129.199.72.99" + ]; + }; + }; + + hostId = "b88fee0c"; + netbirdIp = "100.80.8.66"; + }; + + geo02 = { + interfaces = { + eno1 = { + ipv4 = [ + { + address = "129.199.210.69"; + prefixLength = 24; + } + ]; + + gateways = [ "129.199.210.254" ]; + + dns = [ + "129.199.96.11" + "129.199.72.99" + ]; + }; + }; + + hostId = "45d65237"; + netbirdIp = "100.80.233.249"; + }; + + storage01 = { + interfaces = { + eno1 = { + ipv4 = [ + { + address = "129.199.146.148"; + prefixLength = 24; + } + { + address = "192.168.1.148"; + prefixLength = 24; + } + ]; + + gateways = [ "129.199.146.254" ]; + enableDefaultDNS = true; + }; + }; + + hostId = "d4e7c369"; + netbirdIp = "100.80.156.154"; + }; + + vault01 = { + interfaces = { + vlan-uplink-cri = { + ipv4 = [ + { + # see also machines/vault01/networking.nix + address = "129.199.195.129"; + prefixLength = 32; + } + ]; + gateways = [ ]; + enableDefaultDNS = true; + }; + }; + + hostId = "e83b600d"; + netbirdIp = "100.80.255.180"; + }; + + web01 = { + interfaces = { + ens3 = { + ipv4 = [ + { + address = "129.199.129.53"; + prefixLength = 24; + } + ]; + + gateways = [ "129.199.129.1" ]; + enableDefaultDNS = true; + }; + }; + + hostId = "050df79e"; + netbirdIp = "100.80.77.90"; + }; + + web02 = { + interfaces = { + ens3 = { + ipv4 = [ + { + address = "129.199.129.235"; + prefixLength = 24; + } + ]; + + gateways = [ "129.199.129.1" ]; + enableDefaultDNS = true; + }; + }; + + hostId = "b431ca10"; + netbirdIp = null; # web02 is not to be connected on the VPN + }; + + rescue01 = { + interfaces = { + ens18 = { + ipv6 = [ + { + address = "2a01:e0a:de4:a0e1:2d73:2a7e:18db:5728"; + prefixLength = 64; + } + ]; + + ipv4 = [ + { + address = "192.168.0.232"; + prefixLength = 21; + } + ]; + gateways = [ "192.168.0.1" ]; + enableDefaultDNS = true; + }; + }; + + addresses.ipv4 = [ "82.67.34.230" ]; + + hostId = "007f0200"; + netbirdIp = "100.80.97.140"; + }; +} diff --git a/meta/nixpkgs.nix b/meta/nixpkgs.nix new file mode 100644 index 0000000..317cdd3 --- /dev/null +++ b/meta/nixpkgs.nix @@ -0,0 +1,11 @@ +{ + # Default version of nixpkgs to use + default = "24.05"; + + # Supported nixpkgs versions + supported = [ + "unstable" + "23.11" + "24.05" + ]; +} diff --git a/meta/nodes.nix b/meta/nodes.nix new file mode 100644 index 0000000..884cf71 --- /dev/null +++ b/meta/nodes.nix @@ -0,0 +1,137 @@ +### +# File specifying all the deployement options for the nodes administrated by the dgnum. +# +# Node metadata template is: +# +# NODE_NAME = { +# adminGroups = []; # List of groups that have root access +# admins = []; # List of individuals that have root access +# deployment = {}; # Colmena deployment options +# nixpkgs = "unstable" or "22.11"; # nixpkgs version +# } + +/* + Liste des différents sites : + - rat01 -> VM du NPSPI + - pav01 -> Salle serveur sous le pavillon Pasteur + - oik01 -> Local DGNum Jourdan + - hyp01 -> Salle serveur Hypnos 1 + - luj01 -> VM de Luj +*/ +{ + bridge01 = { + site = "hyp01"; + + hashedPassword = "$y$j9T$EPJdz70kselouXAVUmAH01$8nYbUBY9NPTMfYigegY0qFSdxJwhqzW8sFacDqEYCP5"; + + stateVersion = "24.05"; + + adminGroups = [ "fai" ]; + + deployment = { + targetHost = "fd26:baf9:d250:8000::ffff"; + sshOptions = [ + "-J" + "root@vault01.hyp01.infra.dgnum.eu" + ]; + }; + }; + + web01 = { + site = "rat01"; + + deployment.tags = [ "web" ]; + + hashedPassword = "$y$j9T$9YqXO93VJE/GP3z8Sh4h51$hrBsEPL2O1eP/wBZTrNT8XV906V4JKbQ0g04IWBcyd2"; + + stateVersion = "23.05"; + vm-cluster = "Hyperviseur NPS"; + + nixpkgs = "24.05"; + }; + + compute01 = { + site = "pav01"; + + hashedPassword = "$y$j9T$2nxZHq84G7fWvWMEaGavE/$0ADnmD9qMpXJJ.rWWH9086EakvZ3wAg0mSxZYugOf3C"; + + stateVersion = "23.05"; + nix-modules = [ "services/stirling-pdf" ]; + nixpkgs = "24.05"; + }; + + geo01 = { + site = "oik01"; + deployment.tags = [ "geo" ]; + + hashedPassword = "$y$j9T$2XmDpJu.QLhV57yYCh5Lf1$LK.X0HKB02Q0Ujvhj5nIofW2IRrIAL/Uxnvl9AXM1L8"; + + stateVersion = "24.05"; + nixpkgs = "24.05"; + }; + + geo02 = { + site = "oik01"; + deployment.tags = [ "geo" ]; + + hashedPassword = "$y$j9T$Q4fbMpSm9beWu4DPNAR9t0$dx/1pH4GPY72LpS5ZiECXAZFDdxwmIywztsX.qo2VVA"; + + stateVersion = "24.05"; + nixpkgs = "24.05"; + }; + + krz01 = { + site = "pav01"; + + hashedPassword = "$y$j9T$eNZQgDN.J5y7KTG2hXgat1$J1i5tjx5dnSZu.C9B7swXi5zMFIkUnmRrnmyLHFAt8/"; + + stateVersion = "24.05"; + nixpkgs = "unstable"; + + adminGroups = [ "lab" ]; + }; + + storage01 = { + site = "pav01"; + + hashedPassword = "$y$j9T$tvRu1EJ9MwDSvEm0ogwe70$bKSw6nNteN0L3NOy2Yix7KlIvO/oROQmQ.Ynq002Fg8"; + + stateVersion = "23.11"; + nixpkgs = "24.05"; + + nix-modules = [ "services/forgejo-nix-runners" ]; + }; + + vault01 = { + site = "hyp01"; + deployment.targetHost = "vault01.hyp01.infra.dgnum.eu"; + + hashedPassword = "$y$j9T$5osXVNxCDxu3jIndcyh7G.$UrjiDRpMu3W59tKHLGNdLWllZh.4p8IM4sBS5SrNrN1"; + + stateVersion = "23.11"; + nixpkgs = "24.05"; + + adminGroups = [ "fai" ]; + }; + + web02 = { + site = "rat01"; + + hashedPassword = "$y$j9T$p42UVNy78PykkQOjPwXNJ/$B/zCUOrHXVSFGUY63wnViMiSmU2vCWsiX0y62qqgNQ5"; + + stateVersion = "24.05"; + nixpkgs = "24.05"; + vm-cluster = "Hyperviseur NPS"; + }; + + rescue01 = { + site = "luj01"; + + deployment.targetHost = "v6.rescue01.luj01.infra.dgnum.eu"; + + hashedPassword = "$y$j9T$nqoMMu/axrD0m8AlUFdbs.$UFVmIdPAOHBe2jJv5HJJTcDgINC7LTnSGRQNs9zS1mC"; + + stateVersion = "23.11"; + vm-cluster = "Hyperviseur Luj"; + }; +} diff --git a/meta/options.nix b/meta/options.nix new file mode 100644 index 0000000..e8f4e6a --- /dev/null +++ b/meta/options.nix @@ -0,0 +1,385 @@ +{ config, lib, ... }@args: + +let + inherit (lib) + mkEnableOption + mkDefault + mkIf + mkOption + ; + + inherit (lib.types) + attrs + attrsOf + ints + listOf + nullOr + str + submodule + unspecified + ; + + addressType = + max: + submodule { + options = { + address = mkOption { + type = str; + description = "IP address."; + }; + prefixLength = mkOption { + type = ints.between 8 max; + description = "Length of the prefix used in the local network."; + }; + }; + }; + + org = config.organization; +in + +{ + options = { + organization = { + members = mkOption { + type = attrsOf (submodule { + options = { + name = mkOption { + type = str; + description = '' + Name of the member. + ''; + }; + + email = mkOption { + type = str; + description = '' + Main e-mail address of the member. + ''; + }; + }; + }); + + description = '' + Members of the DGNum organization. + ''; + }; + + groups = mkOption { + type = attrsOf (listOf str); + description = '' + Groups of the DGNum organization. + ''; + }; + + external = mkOption { + type = attrsOf (listOf str); + description = '' + External services used by the DGNum organization. + ''; + }; + + services = mkOption { + type = attrsOf (submodule { + options = { + admins = mkOption { + type = listOf str; + default = [ ]; + description = '' + List of administrators of the service. + ''; + }; + + adminGroups = mkOption { + type = listOf str; + default = [ ]; + description = '' + List of administrator groups of the service. + ''; + }; + }; + }); + description = '' + Administrator access of the different DGNum services, + it is mainly indicative as most services cannot configure this statically. + ''; + }; + }; + + nodes = mkOption { + type = attrsOf ( + submodule ( + { config, name, ... }: + { + options = { + deployment = mkOption { + type = attrs; + default = { }; + }; + + stateVersion = mkOption { + type = str; + description = '' + State version of the node. + ''; + }; + + nixpkgs = mkOption { + type = str; + inherit (import ./nixpkgs.nix) default; + description = '' + Version of nixpkgs to use. + ''; + }; + + nix-modules = mkOption { + type = listOf str; + default = [ ]; + description = '' + List of modules to import from [nix-modules](https://git.hubrecht.ovh/hubrecht/nix-modules). + ''; + }; + + hashedPassword = mkOption { + type = str; + description = '' + The hashed password for the root account. + ''; + }; + + admins = mkOption { + type = listOf str; + default = [ ]; + description = '' + List of members to be given root access to this node. + ''; + }; + + adminGroups = mkOption { + type = listOf str; + default = [ ]; + description = '' + List of groups to be given root access to this node. + ''; + }; + + site = mkOption { + type = str; + description = '' + Geographical site where the node is located. + ''; + }; + + vm-cluster = mkOption { + type = nullOr str; + default = null; + description = "VM cluster where the VM is located"; + }; + }; + + config = { + deployment = { + tags = [ "infra-${config.site}" ]; + targetHost = + let + ip = with args.config.network.${name}.addresses; ipv4 ++ ipv6; + in + mkIf (ip != [ ]) (mkDefault (builtins.head ip)); + }; + }; + } + ) + ); + description = '' + Nodes of the infrastructure. + ''; + }; + + network = mkOption { + type = attrsOf ( + submodule ( + { config, ... }: + { + options = { + interfaces = mkOption { + type = attrsOf ( + submodule ( + { config, ... }: + { + options = { + ipv4 = mkOption { + type = listOf (addressType 32); + default = [ ]; + description = '' + List of ipv4 addresses assigned to the interface. + ''; + }; + + ipv6 = mkOption { + type = listOf (addressType 64); + default = [ ]; + description = '' + List of ipv6 addresses assigned to the interface. + ''; + }; + + gateways = mkOption { + type = listOf str; + description = '' + List of gateways used by the interface. + ''; + }; + + DHCP = mkOption { + type = nullOr str; + default = null; + description = "Whether to enable DHCP on the interface."; + }; + + dns = mkOption { + type = listOf str; + default = [ ]; + }; + + enableDefaultDNS = mkEnableOption "default DNS servers."; + }; + + config.dns = mkIf config.enableDefaultDNS [ + "1.1.1.1#cloudflare-dns.com" + "8.8.8.8#dns.google" + "1.0.0.1#cloudflare-dns.com" + "8.8.4.4#dns.google" + "2606:4700:4700::1111#cloudflare-dns.com" + "2001:4860:4860::8888#dns.google" + "2606:4700:4700::1001#cloudflare-dns.com" + "2001:4860:4860::8844#dns.google" + ]; + } + ) + ); + }; + + addresses = { + ipv4 = mkOption { + type = listOf str; + default = [ ]; + description = '' + List of public ipv4 addresses of the node. + ''; + }; + + ipv6 = mkOption { + type = listOf str; + default = [ ]; + description = '' + List of public ipv6 addresses of the node. + ''; + }; + }; + + hostId = mkOption { + type = str; + description = '' + Network Id of the node. + ''; + }; + + netbirdIp = mkOption { + type = nullOr str; + description = '' + IP address of the node in the netbird network. + ''; + }; + }; + + config = + let + getAddresses = + version: builtins.concatMap (int: builtins.map (builtins.getAttr "address") int.${version}); + in + { + addresses = { + ipv4 = builtins.filter (ip: builtins.substring 0 7 ip != "192.168") ( + getAddresses "ipv4" (builtins.attrValues config.interfaces) + ); + ipv6 = builtins.filter (_: true) ((getAddresses "ipv6") (builtins.attrValues config.interfaces)); + }; + }; + } + ) + ); + description = '' + Network configuration for the different machines. + ''; + }; + + assertions = mkOption { + type = listOf unspecified; + internal = true; + default = [ ]; + description = '' + This option allows modules to express conditions that must + hold for the evaluation of the system configuration to + succeed, along with associated error messages for the user. + ''; + }; + }; + + config = + let + members = builtins.attrNames org.members; + groups = builtins.attrNames org.groups; + + nameExists = + list: f: groups: + builtins.attrValues ( + builtins.mapAttrs (name: members: { + assertion = builtins.all (x: builtins.elem x list) members; + message = f name; + }) groups + ); + + membersExists = nameExists members; + groupsExists = nameExists groups; + + extract = name: builtins.mapAttrs (_: builtins.getAttr name); + in + { + assertions = builtins.concatLists [ + # Check that all group members exist + (membersExists ( + name: "A member of the ${name} group was not found in the members list." + ) org.groups) + + # Check that all node admins exist + (membersExists (name: "A member of the node ${name} admins was not found in the members list.") ( + extract "admins" config.nodes + )) + + # Check that all node adminGroups exist + (groupsExists (name: "A member of the node ${name} adminGroups was not found in the groups list.") ( + extract "adminGroups" config.nodes + )) + + # Check that all services admins exist + (membersExists (name: "A member of the service ${name} admins was not found in the members list.") ( + extract "admins" org.services + )) + + # Check that all services adminGroups exist + (groupsExists ( + name: "A member of the service ${name} adminGroups was not found in the groups list." + ) (extract "adminGroups" org.services)) + + # Check that all external services admins exist + (membersExists ( + name: "A member of the external service ${name} admins was not found in the members list." + ) org.external) + + # Check that all members have ssh keys + (builtins.map (name: { + assertion = ((import ../keys)._keys.${name} or [ ]) != [ ]; + message = "No ssh keys found for ${name}."; + }) members) + ]; + }; +} diff --git a/meta/organization.nix b/meta/organization.nix new file mode 100644 index 0000000..cf0c2b5 --- /dev/null +++ b/meta/organization.nix @@ -0,0 +1,99 @@ +/* + To add a new member add an attribute to `members` + Then add the key to the required groups. +*/ + +{ + members = { + catvayor = { + name = "Lubin Bailly"; + email = "catvayor@dgnum.eu"; + }; + + ecoppens = { + name = "Elias Coppens"; + email = "ecoppens@dgnum.eu"; + }; + + jemagius = { + name = "Jean-Marc Gailis"; + email = "jm@dgnum.eu"; + }; + + luj = { + name = "Julien Malka"; + email = "luj@dgnum.eu"; + }; + + mdebray = { + name = "Maurice Debray"; + email = "maurice.debray@dgnum.eu"; + }; + + raito = { + name = "Ryan Lahfa"; + email = "ryan@dgnum.eu"; + }; + + thubrecht = { + name = "Tom Hubrecht"; + email = "tom.hubrecht@dgnum.eu"; + }; + }; + + groups = { + # members of this group are root on all nodes + root = [ + "thubrecht" + "raito" + "mdebray" + "luj" + ]; + + # members of this group are root on the fai infrastructure + fai = [ + "catvayor" + "ecoppens" + ]; + + lab = [ + "catvayor" + "ecoppens" + ]; + + }; + + external = { + dns = [ + "thubrecht" + "raito" + ]; + + email = [ "raito" ]; + + irc = [ "raito" ]; + }; + + services = { + # Démarches Normaliennes + ds-fr.admins = [ + "thubrecht" + "jemagius" + ]; + + # Cloud DGNum + nextcloud.admins = [ + "thubrecht" + "raito" + ]; + + # Netbox DGNum + netbox.adminGroups = [ + "root" + "fai" + ]; + + # Videos DGNum + peertube.admins = [ "thubrecht" ]; + }; +} diff --git a/meta/verify.nix b/meta/verify.nix new file mode 100644 index 0000000..ecf2ed2 --- /dev/null +++ b/meta/verify.nix @@ -0,0 +1,36 @@ +# Nix expression to check if meta module is evaluating correctly. +# To do so run `nix-build ./verify.nix` +let + sources = import ../npins; + pkgs = import sources.nixpkgs { }; + + dns = import sources."dns.nix" { inherit pkgs; }; +in + +{ + meta = + let + config = (import ./.) pkgs.lib; + failed = builtins.map (x: "- ${x.message}") (builtins.filter (x: !x.assertion) config.assertions); + in + if (failed != [ ]) then + throw '' + + Failed assertions: + ${builtins.concatStringsSep "\n" failed} + '' + else + pkgs.writers.writeJSON "meta.json" config; + + dns = dns.util.writeZone "dgnum.eu" ( + pkgs.lib.recursiveUpdate { SOA.serial = 0; } ( + import ./dns.nix { + inherit dns; + + lib = pkgs.lib // { + extra = import ../lib/nix-lib; + }; + } + ) + ); +} diff --git a/npins/default.nix b/npins/default.nix new file mode 100644 index 0000000..43cead3 --- /dev/null +++ b/npins/default.nix @@ -0,0 +1,65 @@ +# Generated by npins. Do not modify; will be overwritten regularly +let + data = builtins.fromJSON (builtins.readFile ./sources.json); + inherit (data) version; + + mkSource = + spec: + assert spec ? type; + let + path = + if spec.type == "Git" then + mkGitSource spec + else if spec.type == "GitRelease" then + mkGitSource spec + else if spec.type == "PyPi" then + mkPyPiSource spec + else if spec.type == "Channel" then + mkChannelSource spec + else + builtins.throw "Unknown source type ${spec.type}"; + in + spec // { outPath = path; }; + + mkGitSource = + { + repository, + revision, + url ? null, + hash, + ... + }: + assert repository ? type; + # At the moment, either it is a plain git repository (which has an url), or it is a GitHub/GitLab repository + # In the latter case, there we will always be an url to the tarball + if url != null then + (builtins.fetchTarball { + inherit url; + sha256 = hash; # FIXME: check nix version & use SRI hashes + }) + else + assert repository.type == "Git"; + builtins.fetchGit { + inherit (repository) url; + rev = revision; + # hash = hash; + }; + + mkPyPiSource = + { url, hash, ... }: + builtins.fetchurl { + inherit url; + sha256 = hash; + }; + + mkChannelSource = + { url, hash, ... }: + builtins.fetchTarball { + inherit url; + sha256 = hash; + }; +in +if version == 3 then + builtins.mapAttrs (_: mkSource) data.pins +else + throw "Unsupported format version ${toString version} in sources.json. Try running `npins upgrade`" diff --git a/npins/sources.json b/npins/sources.json new file mode 100644 index 0000000..7d4d2be --- /dev/null +++ b/npins/sources.json @@ -0,0 +1,188 @@ +{ + "pins": { + "agenix": { + "type": "GitRelease", + "repository": { + "type": "GitHub", + "owner": "ryantm", + "repo": "agenix" + }, + "pre_releases": false, + "version_upper_bound": null, + "release_prefix": null, + "version": "0.15.0", + "revision": "564595d0ad4be7277e07fa63b5a991b3c645655d", + "url": "https://api.github.com/repos/ryantm/agenix/tarball/0.15.0", + "hash": "01dhrghwa7zw93cybvx4gnrskqk97b004nfxgsys0736823956la" + }, + "disko": { + "type": "GitRelease", + "repository": { + "type": "GitHub", + "owner": "nix-community", + "repo": "disko" + }, + "pre_releases": false, + "version_upper_bound": null, + "release_prefix": null, + "version": "v1.8.0", + "revision": "624fd86460e482017ed9c3c3c55a3758c06a4e7f", + "url": "https://api.github.com/repos/nix-community/disko/tarball/v1.8.0", + "hash": "06ifryv6rw25cz8zda4isczajdgrvcl3aqr145p8njxx5jya2d77" + }, + "git-hooks": { + "type": "Git", + "repository": { + "type": "GitHub", + "owner": "cachix", + "repo": "git-hooks.nix" + }, + "branch": "master", + "revision": "1211305a5b237771e13fcca0c51e60ad47326a9a", + "url": "https://github.com/cachix/git-hooks.nix/archive/1211305a5b237771e13fcca0c51e60ad47326a9a.tar.gz", + "hash": "1qz8d9g7rhwjk4p2x0rx59alsf0dpjrb6kpzs681gi3rjr685ivq" + }, + "infrastructure": { + "type": "Git", + "repository": { + "type": "Git", + "url": "https://git.dgnum.eu/DGNum/infrastructure" + }, + "branch": "main", + "revision": "bb4a24f9e997445383ffb979e47b6d4ea2553119", + "url": null, + "hash": "1c0b6b5k3qb0hy42nic9nwbz8dz0n9nc8245yacq37x5x46jpbsw" + }, + "lix": { + "type": "Git", + "repository": { + "type": "Git", + "url": "https://git.lix.systems/lix-project/lix.git" + }, + "branch": "main", + "revision": "ed9b7f4f84fd60ad8618645cc1bae2d686ff0db6", + "url": null, + "hash": "05kxga8fs9h4qm0yvp5l7jvsda7hzqs7rvxcn8r52dqg3c80hva9" + }, + "lix-module": { + "type": "Git", + "repository": { + "type": "Git", + "url": "https://git.lix.systems/lix-project/nixos-module.git" + }, + "branch": "main", + "revision": "fd186f535a4ac7ae35d98c1dd5d79f0a81b7976d", + "url": null, + "hash": "0jxpqaz12lqibg03iv36sa0shfvamn2yhg937llv3kl4csijd34f" + }, + "lon": { + "type": "Git", + "repository": { + "type": "GitHub", + "owner": "nikstur", + "repo": "lon" + }, + "branch": "main", + "revision": "a8b4406e5151af87b989564d4aa98ecd6d4d3500", + "url": "https://github.com/nikstur/lon/archive/a8b4406e5151af87b989564d4aa98ecd6d4d3500.tar.gz", + "hash": "0rcl5g727n4fbygprmyga1mzid1zlrmdsd6b6hs8x5s1m38wlssl" + }, + "microvm.nix": { + "type": "Git", + "repository": { + "type": "GitHub", + "owner": "RaitoBezarius", + "repo": "microvm.nix" + }, + "branch": "main", + "revision": "49899c9a4fdf75320785e79709bf1608c34caeb8", + "url": "https://github.com/RaitoBezarius/microvm.nix/archive/49899c9a4fdf75320785e79709bf1608c34caeb8.tar.gz", + "hash": "0sz6azdpiz4bd36x23bcdhx6mwyqj8zl5cczjgv48xqfmysy8zwy" + }, + "nix-modules": { + "type": "Git", + "repository": { + "type": "Git", + "url": "https://git.hubrecht.ovh/hubrecht/nix-modules.git" + }, + "branch": "main", + "revision": "2fd7c7810b2a901020ddd2d0cc82810b83a313fc", + "url": null, + "hash": "0rag870ll745r5isnk6hlxv0b0sbgriba5k6nihahcwsal2f4830" + }, + "nix-patches": { + "type": "GitRelease", + "repository": { + "type": "Git", + "url": "https://git.hubrecht.ovh/hubrecht/nix-patches" + }, + "pre_releases": false, + "version_upper_bound": null, + "release_prefix": null, + "version": "v0.5.0", + "revision": "e11ba20945f4a867f09d84343c37328288f274b4", + "url": null, + "hash": "1c6cc44pwlg3ky6cnwhkml8ci77fw3sgjhwvqg0f6igxxf2fqv9v" + }, + "nix-pkgs": { + "type": "Git", + "repository": { + "type": "Git", + "url": "https://git.hubrecht.ovh/hubrecht/nix-pkgs" + }, + "branch": "main", + "revision": "3e731378f3984313ef902c5e5a49e002e6e2c27e", + "url": null, + "hash": "1vy2dj9fyy653w6idvi1r73s0nd2a332a1xkppddjip6rk0i030p" + }, + "nixos-23.11": { + "type": "Channel", + "name": "nixos-23.11", + "url": "https://releases.nixos.org/nixos/23.11/nixos-23.11.7870.205fd4226592/nixexprs.tar.xz", + "hash": "1mbp7jydzxqgv9w3a8fqggq1x8h3cd0vh9wafri5pls52ngyww47" + }, + "nixos-24.05": { + "type": "Channel", + "name": "nixos-24.05", + "url": "https://releases.nixos.org/nixos/24.05/nixos-24.05.5518.ecbc1ca8ffd6/nixexprs.tar.xz", + "hash": "1yr2v17d8jg9567rvadv62bpr6i47fp73by2454yjxh1m9ric2cm" + }, + "nixos-generators": { + "type": "Git", + "repository": { + "type": "GitHub", + "owner": "nix-community", + "repo": "nixos-generators" + }, + "branch": "master", + "revision": "9ae128172f823956e54947fe471bc6dfa670ecb4", + "url": "https://github.com/nix-community/nixos-generators/archive/9ae128172f823956e54947fe471bc6dfa670ecb4.tar.gz", + "hash": "1zn3lykymimzh21q4fixw6ql42n8j82dqwm5axifhcnl8dsdgrvr" + }, + "nixos-unstable": { + "type": "Channel", + "name": "nixos-unstable", + "url": "https://releases.nixos.org/nixos/unstable/nixos-24.11pre688563.bc947f541ae5/nixexprs.tar.xz", + "hash": "1jsaxwi128fiach3dj8rdj5agqivsr4sidb8lmdnl7g07fl9x0kj" + }, + "nixpkgs": { + "type": "Channel", + "name": "nixpkgs-unstable", + "url": "https://releases.nixos.org/nixpkgs/nixpkgs-24.11pre689466.7d49afd36b55/nixexprs.tar.xz", + "hash": "0r4zb6j8in4dk7gxciapfm49dqbdd0c7ajjzj9iy2xrrj5aj32qp" + }, + "proxmox-nixos": { + "type": "Git", + "repository": { + "type": "GitHub", + "owner": "SaumonNet", + "repo": "proxmox-nixos" + }, + "branch": "main", + "revision": "7869ffc2e0db36f314fb60f1ab0087b760700b00", + "url": "https://github.com/SaumonNet/proxmox-nixos/archive/7869ffc2e0db36f314fb60f1ab0087b760700b00.tar.gz", + "hash": "0cam36s3ar366y41rvihjqghkdjl9a1n1wzym8p2mkar1r9x7haj" + } + }, + "version": 3 +} diff --git a/patches/00-disable-installChecks-lix.patch b/patches/00-disable-installChecks-lix.patch new file mode 100644 index 0000000..9f6fa93 --- /dev/null +++ b/patches/00-disable-installChecks-lix.patch @@ -0,0 +1,13 @@ +diff --git a/package.nix b/package.nix +index 43b709023..b68857796 100644 +--- a/package.nix ++++ b/package.nix +@@ -351,7 +351,7 @@ stdenv.mkDerivation (finalAttrs: { + echo "doc internal-api-docs $out/share/doc/nix/internal-api/html" >> "$out/nix-support/hydra-build-products" + ''; + +- doInstallCheck = finalAttrs.doCheck; ++ doInstallCheck = false; + + mesonInstallCheckFlags = [ + "--suite=installcheck" diff --git a/patches/01-castopod.patch b/patches/01-castopod.patch new file mode 100644 index 0000000..2acd493 --- /dev/null +++ b/patches/01-castopod.patch @@ -0,0 +1,808 @@ +From 3b656cbdf40c6056983e95ac5c87839a68571096 Mon Sep 17 00:00:00 2001 +From: Alexander Tomokhov +Date: Tue, 3 Oct 2023 22:20:59 +0400 +Subject: [PATCH 1/8] castopod: 1.6.4 -> 1.6.5 + +--- + pkgs/applications/audio/castopod/default.nix | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/pkgs/applications/audio/castopod/default.nix b/pkgs/applications/audio/castopod/default.nix +index 9d9f83e2ecce40..83c70f9b36646d 100644 +--- a/pkgs/applications/audio/castopod/default.nix ++++ b/pkgs/applications/audio/castopod/default.nix +@@ -7,11 +7,11 @@ + }: + stdenv.mkDerivation { + pname = "castopod"; +- version = "1.6.4"; ++ version = "1.6.5"; + + src = fetchurl { +- url = "https://code.castopod.org/adaures/castopod/uploads/ce56d4f149242f12bedd20f9a2b0916d/castopod-1.6.4.tar.gz"; +- sha256 = "080jj91yxbn3xsbs0sywzwa2f5in9bp9qi2zwqcfqpaxlq9ga62v"; ++ url = "https://code.castopod.org/adaures/castopod/uploads/5aaaa6cf2edaed25bd7253449e5f8584/castopod-1.6.5.tar.gz"; ++ sha256 = "04gcq2vmfy5aa2fmsm1qqv1k8g024nikmysdrhy33wj460d529b5"; + }; + + dontBuild = true; + +From 4cd096c27c52ff9948bc7d9ebc05490147ca9675 Mon Sep 17 00:00:00 2001 +From: Alexander Tomokhov +Date: Tue, 3 Oct 2023 22:19:36 +0400 +Subject: [PATCH 2/8] nixos/castopod: fix startup, displaying images, uploads + up to 500 MiB + +- new maxUploadSize option +- new dataDir option (with ReadWritePaths systemd support) +- admin page reports correct free disk space (instead of /nix/store) +- fix example configuration in documentation +- now podcast creation and file upload are tested during NixOS test +- move castopod from audio to web-apps folder +- verbose logging from the browser test +--- + nixos/modules/module-list.nix | 2 +- + .../services/{audio => web-apps}/castopod.md | 11 +- + .../services/{audio => web-apps}/castopod.nix | 60 ++-- + nixos/tests/castopod.nix | 263 +++++++++++++----- + pkgs/applications/audio/castopod/default.nix | 13 +- + 5 files changed, 256 insertions(+), 93 deletions(-) + rename nixos/modules/services/{audio => web-apps}/castopod.md (72%) + rename nixos/modules/services/{audio => web-apps}/castopod.nix (80%) + +diff --git a/nixos/modules/module-list.nix b/nixos/modules/module-list.nix +index 627427262da632..97043c965400c5 100644 +--- a/nixos/modules/module-list.nix ++++ b/nixos/modules/module-list.nix +@@ -337,7 +337,6 @@ + ./services/amqp/rabbitmq.nix + ./services/audio/alsa.nix + ./services/audio/botamusique.nix +- ./services/audio/castopod.nix + ./services/audio/gmediarender.nix + ./services/audio/gonic.nix + ./services/audio/goxlr-utility.nix +@@ -1282,6 +1281,7 @@ + ./services/web-apps/bookstack.nix + ./services/web-apps/c2fmzq-server.nix + ./services/web-apps/calibre-web.nix ++ ./services/web-apps/castopod.nix + ./services/web-apps/coder.nix + ./services/web-apps/changedetection-io.nix + ./services/web-apps/chatgpt-retrieval-plugin.nix +diff --git a/nixos/modules/services/audio/castopod.md b/nixos/modules/services/web-apps/castopod.md +similarity index 72% +rename from nixos/modules/services/audio/castopod.md +rename to nixos/modules/services/web-apps/castopod.md +index ee8590737a7c73..f61bf1166a4d24 100644 +--- a/nixos/modules/services/audio/castopod.md ++++ b/nixos/modules/services/web-apps/castopod.md +@@ -4,6 +4,7 @@ Castopod is an open-source hosting platform made for podcasters who want to enga + + ## Quickstart {#module-services-castopod-quickstart} + ++Configure ACME (https://nixos.org/manual/nixos/unstable/#module-security-acme). + Use the following configuration to start a public instance of Castopod on `castopod.example.com` domain: + + ```nix +@@ -11,11 +12,11 @@ networking.firewall.allowedTCPPorts = [ 80 443 ]; + services.castopod = { + enable = true; + database.createLocally = true; +- nginx.virtualHost = { +- serverName = "castopod.example.com"; +- enableACME = true; +- forceSSL = true; +- }; ++ localDomain = "castopod.example.com"; ++}; ++services.nginx.virtualHosts."castopod.example.com" = { ++ enableACME = true; ++ forceSSL = true; + }; + ``` + +diff --git a/nixos/modules/services/audio/castopod.nix b/nixos/modules/services/web-apps/castopod.nix +similarity index 80% +rename from nixos/modules/services/audio/castopod.nix +rename to nixos/modules/services/web-apps/castopod.nix +index b782b548914795..7c99551c83183f 100644 +--- a/nixos/modules/services/audio/castopod.nix ++++ b/nixos/modules/services/web-apps/castopod.nix +@@ -4,7 +4,6 @@ let + fpm = config.services.phpfpm.pools.castopod; + + user = "castopod"; +- stateDirectory = "/var/lib/castopod"; + + # https://docs.castopod.org/getting-started/install.html#requirements + phpPackage = pkgs.php.withExtensions ({ enabled, all }: with all; [ +@@ -29,6 +28,15 @@ in + defaultText = lib.literalMD "pkgs.castopod"; + description = lib.mdDoc "Which Castopod package to use."; + }; ++ dataDir = lib.mkOption { ++ type = lib.types.path; ++ default = "/var/lib/castopod"; ++ description = lib.mdDoc '' ++ The path where castopod stores all data. This path must be in sync ++ with the castopod package (where it is hardcoded during the build in ++ accordance with its own `dataDir` argument). ++ ''; ++ }; + database = { + createLocally = lib.mkOption { + type = lib.types.bool; +@@ -111,6 +119,18 @@ in + Options for Castopod's PHP pool. See the documentation on `php-fpm.conf` for details on configuration directives. + ''; + }; ++ maxUploadSize = lib.mkOption { ++ type = lib.types.int; ++ default = 512; ++ description = lib.mdDoc '' ++ Maximum supported size for a file upload in MiB. Maximum HTTP body ++ size is set to this value for nginx and PHP (because castopod doesn't ++ support chunked uploads yet: ++ https://code.castopod.org/adaures/castopod/-/issues/330). Note, that ++ practical upload size limit is smaller. For example, with 512 MiB ++ setting - around 500 MiB is possible. ++ ''; ++ }; + }; + }; + +@@ -120,13 +140,13 @@ in + sslEnabled = with config.services.nginx.virtualHosts.${cfg.localDomain}; addSSL || forceSSL || onlySSL || enableACME || useACMEHost != null; + baseURL = "http${lib.optionalString sslEnabled "s"}://${cfg.localDomain}"; + in +- lib.mapAttrs (name: lib.mkDefault) { ++ lib.mapAttrs (_name: lib.mkDefault) { + "app.forceGlobalSecureRequests" = sslEnabled; + "app.baseURL" = baseURL; + +- "media.baseURL" = "/"; ++ "media.baseURL" = baseURL; + "media.root" = "media"; +- "media.storage" = stateDirectory; ++ "media.storage" = cfg.dataDir; + + "admin.gateway" = "admin"; + "auth.gateway" = "auth"; +@@ -142,13 +162,13 @@ in + services.phpfpm.pools.castopod = { + inherit user; + group = config.services.nginx.group; +- phpPackage = phpPackage; ++ inherit phpPackage; + phpOptions = '' +- # https://code.castopod.org/adaures/castopod/-/blob/main/docker/production/app/uploads.ini ++ # https://code.castopod.org/adaures/castopod/-/blob/develop/docker/production/common/uploads.template.ini + file_uploads = On + memory_limit = 512M +- upload_max_filesize = 500M +- post_max_size = 512M ++ upload_max_filesize = ${toString cfg.maxUploadSize}M ++ post_max_size = ${toString cfg.maxUploadSize}M + max_execution_time = 300 + max_input_time = 300 + ''; +@@ -165,25 +185,25 @@ in + path = [ pkgs.openssl phpPackage ]; + script = + let +- envFile = "${stateDirectory}/.env"; ++ envFile = "${cfg.dataDir}/.env"; + media = "${cfg.settings."media.storage"}/${cfg.settings."media.root"}"; + in + '' +- mkdir -p ${stateDirectory}/writable/{cache,logs,session,temp,uploads} ++ mkdir -p ${cfg.dataDir}/writable/{cache,logs,session,temp,uploads} + + if [ ! -d ${lib.escapeShellArg media} ]; then + cp --no-preserve=mode,ownership -r ${cfg.package}/share/castopod/public/media ${lib.escapeShellArg media} + fi + +- if [ ! -f ${stateDirectory}/salt ]; then +- openssl rand -base64 33 > ${stateDirectory}/salt ++ if [ ! -f ${cfg.dataDir}/salt ]; then ++ openssl rand -base64 33 > ${cfg.dataDir}/salt + fi + + cat <<'EOF' > ${envFile} + ${lib.generators.toKeyValue { } cfg.settings} + EOF + +- echo "analytics.salt=$(cat ${stateDirectory}/salt)" >> ${envFile} ++ echo "analytics.salt=$(cat ${cfg.dataDir}/salt)" >> ${envFile} + + ${if (cfg.database.passwordFile != null) then '' + echo "database.default.password=$(cat ${lib.escapeShellArg cfg.database.passwordFile})" >> ${envFile} +@@ -192,10 +212,10 @@ in + ''} + + ${lib.optionalString (cfg.environmentFile != null) '' +- cat ${lib.escapeShellArg cfg.environmentFile}) >> ${envFile} ++ cat ${lib.escapeShellArg cfg.environmentFile} >> ${envFile} + ''} + +- php spark castopod:database-update ++ php ${cfg.package}/share/castopod/spark castopod:database-update + ''; + serviceConfig = { + StateDirectory = "castopod"; +@@ -204,6 +224,7 @@ in + RemainAfterExit = true; + User = user; + Group = config.services.nginx.group; ++ ReadWritePaths = cfg.dataDir; + }; + }; + +@@ -212,9 +233,7 @@ in + wantedBy = [ "multi-user.target" ]; + path = [ phpPackage ]; + script = '' +- php public/index.php scheduled-activities +- php public/index.php scheduled-websub-publish +- php public/index.php scheduled-video-clips ++ php ${cfg.package}/share/castopod/spark tasks:run + ''; + serviceConfig = { + StateDirectory = "castopod"; +@@ -222,6 +241,8 @@ in + Type = "oneshot"; + User = user; + Group = config.services.nginx.group; ++ ReadWritePaths = cfg.dataDir; ++ LogLevelMax = "notice"; # otherwise periodic tasks flood the journal + }; + }; + +@@ -251,6 +272,7 @@ in + extraConfig = '' + try_files $uri $uri/ /index.php?$args; + index index.php index.html; ++ client_max_body_size ${toString cfg.maxUploadSize}M; + ''; + + locations."^~ /${cfg.settings."media.root"}/" = { +@@ -278,7 +300,7 @@ in + }; + }; + +- users.users.${user} = lib.mapAttrs (name: lib.mkDefault) { ++ users.users.${user} = lib.mapAttrs (_name: lib.mkDefault) { + description = "Castopod user"; + isSystemUser = true; + group = config.services.nginx.group; +diff --git a/nixos/tests/castopod.nix b/nixos/tests/castopod.nix +index 4435ec617d4e67..2db7aa0bda6507 100644 +--- a/nixos/tests/castopod.nix ++++ b/nixos/tests/castopod.nix +@@ -4,74 +4,211 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: + meta = with lib.maintainers; { + maintainers = [ alexoundos misuzu ]; + }; ++ + nodes.castopod = { nodes, ... }: { ++ # otherwise 500 MiB file upload fails! ++ virtualisation.diskSize = 512 + 3 * 512; ++ + networking.firewall.allowedTCPPorts = [ 80 ]; +- networking.extraHosts = '' +- 127.0.0.1 castopod.example.com +- ''; ++ networking.extraHosts = ++ lib.strings.concatStringsSep "\n" ++ (lib.attrsets.mapAttrsToList ++ (name: _: "127.0.0.1 ${name}") ++ nodes.castopod.services.nginx.virtualHosts); ++ + services.castopod = { + enable = true; + database.createLocally = true; + localDomain = "castopod.example.com"; ++ maxUploadSize = 512; + }; +- environment.systemPackages = +- let +- username = "admin"; +- email = "admin@castood.example.com"; +- password = "v82HmEp5"; +- testRunner = pkgs.writers.writePython3Bin "test-runner" +- { +- libraries = [ pkgs.python3Packages.selenium ]; +- flakeIgnore = [ +- "E501" +- ]; +- } '' +- from selenium.webdriver.common.by import By +- from selenium.webdriver import Firefox +- from selenium.webdriver.firefox.options import Options +- from selenium.webdriver.support.ui import WebDriverWait +- from selenium.webdriver.support import expected_conditions as EC +- +- options = Options() +- options.add_argument('--headless') +- driver = Firefox(options=options) +- try: +- driver.implicitly_wait(20) +- driver.get('http://castopod.example.com/cp-install') +- +- wait = WebDriverWait(driver, 10) +- +- wait.until(EC.title_contains("installer")) +- +- driver.find_element(By.CSS_SELECTOR, '#username').send_keys( +- '${username}' +- ) +- driver.find_element(By.CSS_SELECTOR, '#email').send_keys( +- '${email}' +- ) +- driver.find_element(By.CSS_SELECTOR, '#password').send_keys( +- '${password}' +- ) +- driver.find_element(By.XPATH, "//button[contains(., 'Finish install')]").click() +- +- wait.until(EC.title_contains("Auth")) +- +- driver.find_element(By.CSS_SELECTOR, '#email').send_keys( +- '${email}' +- ) +- driver.find_element(By.CSS_SELECTOR, '#password').send_keys( +- '${password}' +- ) +- driver.find_element(By.XPATH, "//button[contains(., 'Login')]").click() +- +- wait.until(EC.title_contains("Admin dashboard")) +- finally: +- driver.close() +- driver.quit() +- ''; +- in +- [ pkgs.firefox-unwrapped pkgs.geckodriver testRunner ]; + }; ++ ++ nodes.client = { nodes, pkgs, lib, ... }: ++ let ++ domain = nodes.castopod.services.castopod.localDomain; ++ ++ getIP = node: ++ (builtins.head node.networking.interfaces.eth1.ipv4.addresses).address; ++ ++ targetPodcastSize = 500 * 1024 * 1024; ++ lameMp3Bitrate = 348300; ++ lameMp3FileAdjust = -800; ++ targetPodcastDuration = toString ++ ((targetPodcastSize + lameMp3FileAdjust) / (lameMp3Bitrate / 8)); ++ mp3file = with pkgs; ++ runCommand "gen-castopod.mp3" { nativeBuildInputs = [ sox lame ]; } '' ++ sox -n -r 48000 -t wav - synth ${targetPodcastDuration} sine 440 ` ++ `| lame --noreplaygain -cbr -q 9 -b 320 - $out ++ FILESIZE="$(stat -c%s $out)" ++ [ "$FILESIZE" -gt 0 ] ++ [ "$FILESIZE" -le "${toString targetPodcastSize}" ] ++ ''; ++ ++ bannerWidth = 3000; ++ banner = pkgs.runCommand "gen-castopod-cover.jpg" { } '' ++ ${pkgs.imagemagick}/bin/magick ` ++ `-background green -bordercolor white -gravity northwest xc:black ` ++ `-duplicate 99 ` ++ `-seed 1 -resize "%[fx:rand()*72+24]" ` ++ `-seed 0 -rotate "%[fx:rand()*360]" -border 6x6 -splice 16x36 ` ++ `-seed 0 -rotate "%[fx:floor(rand()*4)*90]" -resize "150x50!" ` ++ `+append -crop 10x1@ +repage -roll "+%[fx:(t%2)*72]+0" -append ` ++ `-resize ${toString bannerWidth} -quality 1 $out ++ ''; ++ ++ coverWidth = toString 3000; ++ cover = pkgs.runCommand "gen-castopod-banner.jpg" { } '' ++ ${pkgs.imagemagick}/bin/magick ` ++ `-background white -bordercolor white -gravity northwest xc:black ` ++ `-duplicate 99 ` ++ `-seed 1 -resize "%[fx:rand()*72+24]" ` ++ `-seed 0 -rotate "%[fx:rand()*360]" -border 6x6 -splice 36x36 ` ++ `-seed 0 -rotate "%[fx:floor(rand()*4)*90]" -resize "144x144!" ` ++ `+append -crop 10x1@ +repage -roll "+%[fx:(t%2)*72]+0" -append ` ++ `-resize ${coverWidth} -quality 1 $out ++ ''; ++ in ++ { ++ networking.extraHosts = ++ lib.strings.concatStringsSep "\n" ++ (lib.attrsets.mapAttrsToList ++ (name: _: "${getIP nodes.castopod} ${name}") ++ nodes.castopod.services.nginx.virtualHosts); ++ ++ environment.systemPackages = ++ let ++ username = "admin"; ++ email = "admin@${domain}"; ++ password = "Abcd1234"; ++ podcastTitle = "Some Title"; ++ episodeTitle = "Episode Title"; ++ browser-test = pkgs.writers.writePython3Bin "browser-test" ++ { ++ libraries = [ pkgs.python3Packages.selenium ]; ++ flakeIgnore = [ "E124" "E501" ]; ++ } '' ++ from selenium.webdriver.common.by import By ++ from selenium.webdriver import Firefox ++ from selenium.webdriver.firefox.options import Options ++ from selenium.webdriver.firefox.service import Service ++ from selenium.webdriver.support.ui import WebDriverWait ++ from selenium.webdriver.support import expected_conditions as EC ++ from subprocess import STDOUT ++ import logging ++ ++ selenium_logger = logging.getLogger("selenium") ++ selenium_logger.setLevel(logging.DEBUG) ++ selenium_logger.addHandler(logging.StreamHandler()) ++ ++ options = Options() ++ options.add_argument('--headless') ++ service = Service(log_output=STDOUT) ++ driver = Firefox(options=options, service=service) ++ driver = Firefox(options=options) ++ driver.implicitly_wait(20) ++ ++ # install ########################################################## ++ ++ driver.get('http://${domain}/cp-install') ++ ++ wait = WebDriverWait(driver, 10) ++ ++ wait.until(EC.title_contains("installer")) ++ ++ driver.find_element(By.CSS_SELECTOR, '#username').send_keys( ++ '${username}' ++ ) ++ driver.find_element(By.CSS_SELECTOR, '#email').send_keys( ++ '${email}' ++ ) ++ driver.find_element(By.CSS_SELECTOR, '#password').send_keys( ++ '${password}' ++ ) ++ driver.find_element(By.XPATH, ++ "//button[contains(., 'Finish install')]" ++ ).click() ++ ++ wait.until(EC.title_contains("Auth")) ++ ++ driver.find_element(By.CSS_SELECTOR, '#email').send_keys( ++ '${email}' ++ ) ++ driver.find_element(By.CSS_SELECTOR, '#password').send_keys( ++ '${password}' ++ ) ++ driver.find_element(By.XPATH, ++ "//button[contains(., 'Login')]" ++ ).click() ++ ++ wait.until(EC.title_contains("Admin dashboard")) ++ ++ # create podcast ################################################### ++ ++ driver.get('http://${domain}/admin/podcasts/new') ++ ++ wait.until(EC.title_contains("Create podcast")) ++ ++ driver.find_element(By.CSS_SELECTOR, '#cover').send_keys( ++ '${cover}' ++ ) ++ driver.find_element(By.CSS_SELECTOR, '#banner').send_keys( ++ '${banner}' ++ ) ++ driver.find_element(By.CSS_SELECTOR, '#title').send_keys( ++ '${podcastTitle}' ++ ) ++ driver.find_element(By.CSS_SELECTOR, '#handle').send_keys( ++ 'some_handle' ++ ) ++ driver.find_element(By.CSS_SELECTOR, '#description').send_keys( ++ 'Some description' ++ ) ++ driver.find_element(By.CSS_SELECTOR, '#owner_name').send_keys( ++ 'Owner Name' ++ ) ++ driver.find_element(By.CSS_SELECTOR, '#owner_email').send_keys( ++ 'owner@email.xyz' ++ ) ++ driver.find_element(By.XPATH, ++ "//button[contains(., 'Create podcast')]" ++ ).click() ++ ++ wait.until(EC.title_contains("${podcastTitle}")) ++ ++ driver.find_element(By.XPATH, ++ "//span[contains(., 'Add an episode')]" ++ ).click() ++ ++ wait.until(EC.title_contains("Add an episode")) ++ ++ # upload podcast ################################################### ++ ++ driver.find_element(By.CSS_SELECTOR, '#audio_file').send_keys( ++ '${mp3file}' ++ ) ++ driver.find_element(By.CSS_SELECTOR, '#cover').send_keys( ++ '${cover}' ++ ) ++ driver.find_element(By.CSS_SELECTOR, '#description').send_keys( ++ 'Episode description' ++ ) ++ driver.find_element(By.CSS_SELECTOR, '#title').send_keys( ++ '${episodeTitle}' ++ ) ++ driver.find_element(By.XPATH, ++ "//button[contains(., 'Create episode')]" ++ ).click() ++ ++ wait.until(EC.title_contains("${episodeTitle}")) ++ ++ driver.close() ++ driver.quit() ++ ''; ++ in ++ [ pkgs.firefox-unwrapped pkgs.geckodriver browser-test ]; ++ }; ++ + testScript = '' + start_all() + castopod.wait_for_unit("castopod-setup.service") +@@ -79,9 +216,9 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: + castopod.wait_for_unit("nginx.service") + castopod.wait_for_open_port(80) + castopod.wait_until_succeeds("curl -sS -f http://castopod.example.com") +- castopod.succeed("curl -s http://localhost/cp-install | grep 'Create your Super Admin account' > /dev/null") + +- with subtest("Create superadmin and log in"): +- castopod.succeed("PYTHONUNBUFFERED=1 systemd-cat -t test-runner test-runner") ++ with subtest("Create superadmin, log in, create and upload a podcast"): ++ client.succeed(\ ++ "PYTHONUNBUFFERED=1 systemd-cat -t browser-test browser-test") + ''; + }) +diff --git a/pkgs/applications/audio/castopod/default.nix b/pkgs/applications/audio/castopod/default.nix +index 83c70f9b36646d..badace09587d2b 100644 +--- a/pkgs/applications/audio/castopod/default.nix ++++ b/pkgs/applications/audio/castopod/default.nix +@@ -3,7 +3,7 @@ + , ffmpeg-headless + , lib + , nixosTests +-, stateDirectory ? "/var/lib/castopod" ++, dataDir ? "/var/lib/castopod" + }: + stdenv.mkDerivation { + pname = "castopod"; +@@ -20,13 +20,16 @@ stdenv.mkDerivation { + postPatch = '' + # not configurable at runtime unfortunately: + substituteInPlace app/Config/Paths.php \ +- --replace "__DIR__ . '/../../writable'" "'${stateDirectory}/writable'" ++ --replace "__DIR__ . '/../../writable'" "'${dataDir}/writable'" + +- # configuration file must be writable, place it to ${stateDirectory} ++ substituteInPlace modules/Admin/Controllers/DashboardController.php \ ++ --replace "disk_total_space('./')" "disk_total_space('${dataDir}')" ++ ++ # configuration file must be writable, place it to ${dataDir} + substituteInPlace modules/Install/Controllers/InstallController.php \ +- --replace "ROOTPATH" "'${stateDirectory}/'" ++ --replace "ROOTPATH" "'${dataDir}/'" + substituteInPlace public/index.php spark \ +- --replace "DotEnv(ROOTPATH)" "DotEnv('${stateDirectory}')" ++ --replace "DotEnv(ROOTPATH)" "DotEnv('${dataDir}')" + + # ffmpeg is required for Video Clips feature + substituteInPlace modules/MediaClipper/VideoClipper.php \ + +From 45d43fe39fa3167d5cf7ba9a2cb9fcd6fbe2c5c3 Mon Sep 17 00:00:00 2001 +From: Alexander Tomokhov +Date: Mon, 11 Dec 2023 09:00:26 +0400 +Subject: [PATCH 3/8] nixos/castopod: little documentation fix + +--- + nixos/modules/services/web-apps/castopod.nix | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/nixos/modules/services/web-apps/castopod.nix b/nixos/modules/services/web-apps/castopod.nix +index 7c99551c83183f..11cf4b36aeb385 100644 +--- a/nixos/modules/services/web-apps/castopod.nix ++++ b/nixos/modules/services/web-apps/castopod.nix +@@ -126,9 +126,10 @@ in + Maximum supported size for a file upload in MiB. Maximum HTTP body + size is set to this value for nginx and PHP (because castopod doesn't + support chunked uploads yet: +- https://code.castopod.org/adaures/castopod/-/issues/330). Note, that +- practical upload size limit is smaller. For example, with 512 MiB +- setting - around 500 MiB is possible. ++ https://code.castopod.org/adaures/castopod/-/issues/330). ++ ++ Note, that practical upload size limit is smaller. For example, with ++ 512 MiB setting - around 500 MiB is possible. + ''; + }; + }; + +From 4aafd48b7e76748eaf0ff7409b12b455d1db31ec Mon Sep 17 00:00:00 2001 +From: sinavir +Date: Fri, 23 Feb 2024 22:02:10 +0100 +Subject: [PATCH 4/8] castopod: 1.6.5 -> 1.10.3 + +--- + pkgs/applications/audio/castopod/default.nix | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/pkgs/applications/audio/castopod/default.nix b/pkgs/applications/audio/castopod/default.nix +index badace09587d2b..438f1d728f9624 100644 +--- a/pkgs/applications/audio/castopod/default.nix ++++ b/pkgs/applications/audio/castopod/default.nix +@@ -10,8 +10,8 @@ stdenv.mkDerivation { + version = "1.6.5"; + + src = fetchurl { +- url = "https://code.castopod.org/adaures/castopod/uploads/5aaaa6cf2edaed25bd7253449e5f8584/castopod-1.6.5.tar.gz"; +- sha256 = "04gcq2vmfy5aa2fmsm1qqv1k8g024nikmysdrhy33wj460d529b5"; ++ url = "https://code.castopod.org/adaures/castopod/uploads/2bb52d4607a772ac8b397efa3559a3ae/castopod-1.10.3.tar.gz"; ++ sha256 = "0w1yl14v3aajm089vwpq9wkiibv3w312y004ggdbf7xwzsrmjs51"; + }; + + dontBuild = true; + +From 6205595efbdcc2f3440022b4ff7258e2f50a6427 Mon Sep 17 00:00:00 2001 +From: sinavir +Date: Fri, 23 Feb 2024 22:27:24 +0100 +Subject: [PATCH 6/8] nixos/castopod: use LoadCredentials + +--- + nixos/modules/services/web-apps/castopod.nix | 12 ++++++++++-- + 1 file changed, 10 insertions(+), 2 deletions(-) + +diff --git a/nixos/modules/services/web-apps/castopod.nix b/nixos/modules/services/web-apps/castopod.nix +index 11cf4b36aeb385..042fb3954d2b73 100644 +--- a/nixos/modules/services/web-apps/castopod.nix ++++ b/nixos/modules/services/web-apps/castopod.nix +@@ -67,6 +67,8 @@ in + description = lib.mdDoc '' + A file containing the password corresponding to + [](#opt-services.castopod.database.user). ++ ++ This file is loaded using systemd LoadCredentials. + ''; + }; + }; +@@ -93,6 +95,8 @@ in + Environment file to inject e.g. secrets into the configuration. + See [](https://code.castopod.org/adaures/castopod/-/blob/main/.env.example) + for available environment variables. ++ ++ This file is loaded using systemd LoadCredentials. + ''; + }; + configureNginx = lib.mkOption { +@@ -207,19 +211,23 @@ in + echo "analytics.salt=$(cat ${cfg.dataDir}/salt)" >> ${envFile} + + ${if (cfg.database.passwordFile != null) then '' +- echo "database.default.password=$(cat ${lib.escapeShellArg cfg.database.passwordFile})" >> ${envFile} ++ echo "database.default.password=$(cat "$CREDENTIALS_DIRECTORY/dbpasswordfile)" >> ${envFile} + '' else '' + echo "database.default.password=" >> ${envFile} + ''} + + ${lib.optionalString (cfg.environmentFile != null) '' +- cat ${lib.escapeShellArg cfg.environmentFile} >> ${envFile} ++ cat "$CREDENTIALS_DIRECTORY/envfile" >> ${envFile} + ''} + + php ${cfg.package}/share/castopod/spark castopod:database-update + ''; + serviceConfig = { + StateDirectory = "castopod"; ++ LoadCredential = lib.optional (cfg.environmentFile != null) ++ "envfile:${cfg.environmentFile}" ++ ++ (lib.optional (cfg.database.passwordFile != null) ++ "dbpasswordfile:${cfg.database.passwordFile}"); + WorkingDirectory = "${cfg.package}/share/castopod"; + Type = "oneshot"; + RemainAfterExit = true; + +From 9b03fc35a30671e5d4146bbcbe6b5536fa9baacc Mon Sep 17 00:00:00 2001 +From: sinavir +Date: Sat, 2 Mar 2024 18:01:54 +0100 +Subject: [PATCH 7/8] nixos/castopod: build mp3 in the test + +--- + nixos/tests/castopod.nix | 31 +++++++++++++++++++++---------- + 1 file changed, 21 insertions(+), 10 deletions(-) + +diff --git a/nixos/tests/castopod.nix b/nixos/tests/castopod.nix +index 2db7aa0bda6507..2bdc6941c23815 100644 +--- a/nixos/tests/castopod.nix ++++ b/nixos/tests/castopod.nix +@@ -37,14 +37,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: + targetPodcastDuration = toString + ((targetPodcastSize + lameMp3FileAdjust) / (lameMp3Bitrate / 8)); + mp3file = with pkgs; +- runCommand "gen-castopod.mp3" { nativeBuildInputs = [ sox lame ]; } '' +- sox -n -r 48000 -t wav - synth ${targetPodcastDuration} sine 440 ` +- `| lame --noreplaygain -cbr -q 9 -b 320 - $out +- FILESIZE="$(stat -c%s $out)" +- [ "$FILESIZE" -gt 0 ] +- [ "$FILESIZE" -le "${toString targetPodcastSize}" ] +- ''; +- ++ runCommand ; + bannerWidth = 3000; + banner = pkgs.runCommand "gen-castopod-cover.jpg" { } '' + ${pkgs.imagemagick}/bin/magick ` +@@ -185,7 +178,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: + # upload podcast ################################################### + + driver.find_element(By.CSS_SELECTOR, '#audio_file').send_keys( +- '${mp3file}' ++ '/tmp/podcast.mp3' + ) + driver.find_element(By.CSS_SELECTOR, '#cover').send_keys( + '${cover}' +@@ -206,7 +199,23 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: + driver.quit() + ''; + in +- [ pkgs.firefox-unwrapped pkgs.geckodriver browser-test ]; ++ [ ++ pkgs.firefox-unwrapped ++ pkgs.geckodriver ++ browser-test ++ (pkgs.writeShellApplication { ++ name = "build-mp3"; ++ runtimeInputs = with pkgs; [ sox lame ]; ++ text = '' ++ out=/tmp/podcast.mp3 ++ sox -n -r 48000 -t wav - synth ${targetPodcastDuration} sine 440 ` ++ `| lame --noreplaygain -cbr -q 9 -b 320 - $out ++ FILESIZE="$(stat -c%s $out)" ++ [ "$FILESIZE" -gt 0 ] ++ [ "$FILESIZE" -le "${toString targetPodcastSize}" ] ++ ''; ++ }) ++ ]; + }; + + testScript = '' +@@ -217,6 +226,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: + castopod.wait_for_open_port(80) + castopod.wait_until_succeeds("curl -sS -f http://castopod.example.com") + ++ client.succeed("build-mp3") ++ + with subtest("Create superadmin, log in, create and upload a podcast"): + client.succeed(\ + "PYTHONUNBUFFERED=1 systemd-cat -t browser-test browser-test") + +From 538281e8be427f820371f4005e991e0281872e12 Mon Sep 17 00:00:00 2001 +From: sinavir +Date: Sat, 2 Mar 2024 18:04:35 +0100 +Subject: [PATCH 8/8] nixos/castopod: Increase test timeouts + +--- + nixos/tests/castopod.nix | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/nixos/tests/castopod.nix b/nixos/tests/castopod.nix +index 2bdc6941c23815..2c5c745f7da0df 100644 +--- a/nixos/tests/castopod.nix ++++ b/nixos/tests/castopod.nix +@@ -105,7 +105,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: + + driver.get('http://${domain}/cp-install') + +- wait = WebDriverWait(driver, 10) ++ wait = WebDriverWait(driver, 20) + + wait.until(EC.title_contains("installer")) + diff --git a/patches/03-nextcloud.patch b/patches/03-nextcloud.patch new file mode 100644 index 0000000..0263c0d --- /dev/null +++ b/patches/03-nextcloud.patch @@ -0,0 +1,28 @@ +--- a/nixos/modules/services/web-apps/nextcloud.nix ++++ b/nixos/modules/services/web-apps/nextcloud.nix +@@ -131,6 +131,7 @@ + (mkRemovedOptionModule [ "services" "nextcloud" "disableImagemagick" ] '' + Use services.nextcloud.enableImagemagick instead. + '') ++ (mkRenamedOptionModule [ "services" "nextcloud" "config" "objectstore" "s3" "autocreate" ] [ "services" "nextcloud" "config" "objectstore" "s3" "verify_bucket_exists" ]) + ]; + + options.services.nextcloud = { +@@ -487,7 +487,7 @@ + The name of the S3 bucket. + ''; + }; +- autocreate = mkOption { ++ verify_bucket_exists = mkOption { + type = types.bool; + description = lib.mdDoc '' + Create the objectstore if it does not exist. +@@ -820,7 +820,7 @@ + 'class' => '\\OC\\Files\\ObjectStore\\S3', + 'arguments' => [ + 'bucket' => '${s3.bucket}', +- 'autocreate' => ${boolToString s3.autocreate}, ++ 'verify_bucket_exists' => ${boolToString s3.verify_bucket_exists}, + 'key' => '${s3.key}', + 'secret' => nix_read_secret('${s3.secretFile}'), + ${optionalString (s3.hostname != null) "'hostname' => '${s3.hostname}',"} diff --git a/patches/04-crabfit-karla.patch b/patches/04-crabfit-karla.patch new file mode 100644 index 0000000..366928a --- /dev/null +++ b/patches/04-crabfit-karla.patch @@ -0,0 +1,24 @@ +diff --git a/pkgs/by-name/cr/crabfit-frontend/package.nix b/pkgs/by-name/cr/crabfit-frontend/package.nix +index 99d7be0fdeae..9f858e8a9a9e 100644 +--- a/pkgs/by-name/cr/crabfit-frontend/package.nix ++++ b/pkgs/by-name/cr/crabfit-frontend/package.nix +@@ -8,7 +8,7 @@ + nodejs, + yarn, + fixup_yarn_lock, +- google-fonts, ++ karla, + api_url ? "http://127.0.0.1:3000", + frontend_url ? "crab.fit", + }: +@@ -83,9 +83,7 @@ stdenv.mkDerivation (finalAttrs: { + patchShebangs node_modules + + mkdir -p src/app/fonts +- cp "${ +- google-fonts.override { fonts = [ "Karla" ]; } +- }/share/fonts/truetype/Karla[wght].ttf" src/app/fonts/karla.ttf ++ cp "${karla}/share/fonts/truetype/Karla-Regular.ttf" src/app/fonts/karla.ttf + + runHook postConfigure + ''; diff --git a/patches/05-pmnos-correctness-build-directory.patch b/patches/05-pmnos-correctness-build-directory.patch new file mode 100644 index 0000000..0a57672 --- /dev/null +++ b/patches/05-pmnos-correctness-build-directory.patch @@ -0,0 +1,54 @@ +From 4d6e57d2d577cc105c9e0cd397408e9e3ce85cd0 Mon Sep 17 00:00:00 2001 +From: Raito Bezarius +Date: Tue, 8 Oct 2024 16:33:14 +0200 +Subject: [PATCH] fix(packaging): correctness of the build top directory + +It was using /build which is an implementation detail and not +guaranteed. + +Signed-off-by: Raito Bezarius +--- + pkgs/pve-container/default.nix | 6 +++--- + pkgs/pve-rs/default.nix | 2 +- + 2 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/pkgs/pve-container/default.nix b/pkgs/pve-container/default.nix +index 445c271..5633c0f 100644 +--- a/pkgs/pve-container/default.nix ++++ b/pkgs/pve-container/default.nix +@@ -30,7 +30,7 @@ perl536.pkgs.toPerlModule ( + postPatch = '' + sed -i Makefile \ + -e "s/pct.1 pct.conf.5 pct.bash-completion pct.zsh-completion //" \ +- -e "s,/usr/share/lxc,/build/lxc," \ ++ -e "s,/usr/share/lxc,$NIX_BUILD_TOP/lxc," \ + -e "/pve-doc-generator/d" \ + -e "/PVE_GENERATING_DOCS/d" \ + -e "/SERVICEDIR/d" \ +@@ -45,8 +45,8 @@ perl536.pkgs.toPerlModule ( + dontPatchShebangs = true; + + postConfigure = '' +- cp -r ${lxc}/share/lxc /build +- chmod -R +w /build/lxc ++ cp -r ${lxc}/share/lxc $NIX_BUILD_TOP/ ++ chmod -R +w $NIX_BUILD_TOP/lxc + ''; + + makeFlags = [ +diff --git a/pkgs/pve-rs/default.nix b/pkgs/pve-rs/default.nix +index c024287..881beab 100644 +--- a/pkgs/pve-rs/default.nix ++++ b/pkgs/pve-rs/default.nix +@@ -57,7 +57,7 @@ perl536.pkgs.toPerlModule ( + ]; + + makeFlags = [ +- "BUILDIR=/build" ++ "BUILDIR=$NIX_BUILD_TOP" + "BUILD_MODE=release" + "DESTDIR=$(out)" + "GITVERSION:=${src.rev}" +-- +2.46.0 + diff --git a/patches/default.nix b/patches/default.nix new file mode 100644 index 0000000..bec48f0 --- /dev/null +++ b/patches/default.nix @@ -0,0 +1,136 @@ +let + netboxAgent = { + id = "244549"; + hash = "sha256-SePkKEYQGDj6FpuyxZ+1ASeVPA02mCHf0G5i3koMdNw="; + }; +in + +{ + "nixos-24.05" = [ + # netbox qrcode plugin + { + _type = "commit"; + sha = "ae4bf4c110378ebacb3989c9533726859cfebbfa"; + hash = "sha256-SgHhW9HCkDQsxT3eG4P9q68c43e3sbDHRY9qs7oSt8o="; + } + + netboxAgent + + { + id = "275165"; + hash = "sha256-9a26V3Pi8yLD3N9+mC1kvJoruxRTp/qOHapnt6VX7pw="; + } + + # karla: init at 2.004 + { + _type = "commit"; + sha = "7c51104112e8ea0e2ac53bf7d535e677f7686a9e"; + hash = "sha256-1TBLzZkvkFhCL8RYVVIUhTyrH3+X1iJIMkyHffmrOWc="; + } + + # Crabfit: don't depend on all google-fonts + { + _type = "static"; + path = ./04-crabfit-karla.patch; + } + ]; + "nixos-23.11" = [ + # netbox module + { + _type = "commit"; + sha = "163fed297ed65a24241f190d8e954ce1877f9020"; + includes = [ "nixos/modules/services/web-apps/netbox.nix" ]; + hash = "sha256-mjeRxtZozgLNzHkCxcTs3xnonNPkmPoaGxawixC9jfo="; + } + + # netbox qrcode plugin + { + _type = "commit"; + sha = "ae4bf4c110378ebacb3989c9533726859cfebbfa"; + hash = "sha256-SgHhW9HCkDQsxT3eG4P9q68c43e3sbDHRY9qs7oSt8o="; + } + + netboxAgent + + # missing jsonargparse deps for netbox-agent + { + # json arg parse dep: typesched-client + _type = "commit"; + sha = "b1770afd23a7a1ebb4e5bccd8d34dfb3a58a4341"; + hash = "sha256-PNUOPgmNFQr+bwG8MQiwlI4+zkDKLL3e1GgHHskX/Dk="; + } + { + # json arg parse dep: reconplogger + _type = "commit"; + sha = "3835e47059eee32eaf5ef0eb03fce90e8c9b8ab4"; + hash = "sha256-owJL1nmYfEXSHekBGgbJrTkl3OpX6dG9oq3mjN4Xgj8="; + } + { + # json arg parse dep: logmatic + _type = "commit"; + sha = "b0de35af031b4282ac99919384f029a18704d819"; + hash = "sha256-0nJzF2WMPNs2/zI65T1j9DQ0kORDKCu3H2PWjd/bXuo="; + } + { + # init at 4.27.2 + _type = "commit"; + sha = "aa724f81163725b54a06623d191de6d4c064c5ea"; + hash = "sha256-ZWOVpVVsmjBCCiMe/j4YKz6YP4m5I8TTbqWNqBaMtKA="; + } + { + # 4.27.2 -> 4.27.3 + id = "284460"; + hash = "sha256-di9F8d5l0QDWt9yNpamHMyMm4AVxnsRko8+/Ruo7G3I="; + } + { + # 4.27.3 -> 4.27.4 + id = "285873"; + hash = "sha256-GRytcYvIsMP2RfHlmOJIH3po2/KmfZ3fDwo3YUyXGEU="; + } + { + # 4.27.4 -> 4.27.5 + id = "288546"; + hash = "sha256-iHXwZQKlaWmbd9kJbm6YyjIc3bPFHGbIghYRCPsSGHo="; + } + { + # 4.27.5 -> 4.27.6 + id = "296359"; + hash = "sha256-BHevlu8eqkoRgxwFMoS9bkT/9+w0Hfp3JXWh6UKovUk="; + } + { + # 4.27.6 -> 4.27.7 + id = "297825"; + hash = "sha256-tu91+T1G/sHfCLfLL4Jk/zLU/QwnHLbpxiYzRBJXFXw="; + } + + # castopod: 1.6.4 -> 1.7.0 + ajout du support de loadcredentials + { + _type = "static"; + path = ./01-castopod.patch; + } + + # nixos/nextcloud: Rename autocreate (a no-op) to verify_bucket_exists + { + _type = "static"; + path = ./03-nextcloud.patch; + } + ]; + + "nixos-unstable" = [ + netboxAgent + + # netbox qrcode plugin + { + _type = "commit"; + sha = "ae4bf4c110378ebacb3989c9533726859cfebbfa"; + hash = "sha256-SgHhW9HCkDQsxT3eG4P9q68c43e3sbDHRY9qs7oSt8o="; + } + ]; + + "proxmox-nixos" = [ + { + _type = "static"; + path = ./05-pmnos-correctness-build-directory.patch; + } + ]; +} diff --git a/scripts/cache-node.sh b/scripts/cache-node.sh new file mode 100644 index 0000000..38a2e6c --- /dev/null +++ b/scripts/cache-node.sh @@ -0,0 +1,20 @@ +set -eu -o pipefail + +cat <.netrc +default +login $STORE_USER +password $STORE_PASSWORD +EOF + +drv=$("@colmena@/bin/colmena" eval --instantiate -E "{ nodes, ... }: nodes.${BUILD_NODE}.config.system.build.toplevel") + +# Build the derivation and send it to the great beyond +nix-store --query --requisites --force-realise --include-outputs "$drv" | grep -v '.*\.drv' >paths.txt + +nix copy \ + --extra-experimental-features nix-command \ + --to "$STORE_ENDPOINT?compression=none" \ + --netrc-file .netrc \ + "$(nix-store --realise "$drv")" + +rm .netrc diff --git a/scripts/check-deployment.sh b/scripts/check-deployment.sh new file mode 100644 index 0000000..96371f7 --- /dev/null +++ b/scripts/check-deployment.sh @@ -0,0 +1,125 @@ +#!/usr/bin/env bash +#!@bash@/bin/bash +# shellcheck shell=bash + +set -o errexit +set -o nounset +set -o pipefail +shopt -s lastpipe + +usage="$(basename "$0") [-h] [--diff] [NODE] +Check if deployed config is actually the one on master +By default check all nodes + +where: + -h Show this help text + --diff Show diff with nvd + +Exemple: + check-deployment web01" + +while [[ $# -gt 0 ]]; do + case "$1" in + --help|-h) + echo "$usage" + exit 0 + ;; + + --diff) + diff=y + ;; + + *) + if [[ -z ${node-} ]]; then + node="$1" + else + echo "Too many arguments. Help:" + echo "$usage" + exit 1 + fi + ;; + esac + shift +done + +############# +# go to tmp # +############# + +TMP=$(mktemp -d) +GIT_TOP_LEVEL=$(git rev-parse --show-toplevel) + +echo "Cloning local main..." +git clone -q --branch main --single-branch "$GIT_TOP_LEVEL" "$TMP" +pushd "$TMP" > /dev/null || exit 2 + +#################### +# Evaluate configs # +#################### + +colmena_failed () { + >&2 echo "Colmena failed. Check your config. Logs:" + >&2 cat "$COLMENA_LOGS" + exit 3 +} + +COLMENA_LOGS=$(mktemp) + +echo "Evaluating configs..." +# Disable warning because of '${}' +# shellcheck disable=SC2016 +RESULTS=$(colmena eval -E '{ nodes, lib, ...}: lib.mapAttrsToList (k: v: { machine = k; path = v.config.system.build.toplevel; drv = v.config.system.build.toplevel.drvPath; domain = "${v.config.networking.hostName}.${v.config.networking.domain}"; }) nodes' 2> "$COLMENA_LOGS" || colmena_failed) + +rm "$COLMENA_LOGS" +echo "Evaluation finished" + +##################################### +# retrieve and check current-system # +##################################### + +retrieve_current_system () { + # TODO implement a less invasive method + ssh -n "root@$1" "readlink -f /run/current-system" +} + + +return_status=0 +echo "$RESULTS" | @jq@/bin/jq -c '.[]' | +while IFS=$'\n' read -r c; do + + machine=$(echo "$c" | @jq@/bin/jq -r '.machine') + if [[ -n ${node-} ]] && [[ "$machine" != "$node" ]]; then + echo "Skipping ${machine}" + continue + fi + expected_path=$(echo "$c" | @jq@/bin/jq -r '.path') + domain=$(echo "$c" | @jq@/bin/jq -r '.domain') + drv_path=$(echo "$c" | @jq@/bin/jq -r '.drv') + + err=0 + current_path=$(retrieve_current_system "$domain") || err=1 + if [[ "1" == "${err}" ]] ; then + echo "❌ failed to contact $domain !" + continue + fi + + if [ "$expected_path" == "$current_path" ] ; then + echo "✅ $machine -> OK" + elif [[ -n ${diff-} ]] ; then + nix-copy-closure --from "root@$domain" "$current_path" + nix-store -r "$drv_path" + echo "$machine -> error. nvd output:" + @nvd@/bin/nvd diff "$expected_path" "$current_path" + return_status=1 + else + echo "☠️ $machine -> error:" + echo " - Expected system: $expected_path" + echo " - Current system: $current_path" + return_status=1 + fi +done + +popd > /dev/null || exit 2 +rm -r "$TMP" + +exit $return_status diff --git a/scripts/default.nix b/scripts/default.nix new file mode 100644 index 0000000..3503729 --- /dev/null +++ b/scripts/default.nix @@ -0,0 +1,39 @@ +{ pkgs, ... }: + +let + substitutions = { + inherit (pkgs) + bash + colmena + coreutils + nvd + git + jq + ; + }; + + mkShellScript = + name: + (pkgs.substituteAll ( + { + inherit name; + src = ./. + "/${name}.sh"; + dir = "/bin/"; + isExecutable = true; + + checkPhase = '' + ${pkgs.stdenv.shellDryRun} "$target" + ''; + } + // substitutions + )); + + scripts = [ + "cache-node" + "check-deployment" + "launch-vm" + "list-nodes" + ]; +in + +builtins.map mkShellScript scripts diff --git a/scripts/launch-vm.sh b/scripts/launch-vm.sh new file mode 100755 index 0000000..7f8c598 --- /dev/null +++ b/scripts/launch-vm.sh @@ -0,0 +1,37 @@ +#!@bash@/bin/bash +# shellcheck shell=bash +set -o errexit +set -o nounset +set -o pipefail + +MACHINE="" +HOSTFWD="" + +while getopts 'p:o:h' opt; do + case "$opt" in + p) + HOSTFWD=",hostfwd=tcp::$OPTARG$HOSTFWD" + ;; + + o) + MACHINE="$OPTARG" + ;; + + h|?) + echo "Usage: $(basename "$0") [-p hostport-:guestport] -o MACHINE" + exit 1 + ;; + esac +done +shift "$((OPTIND -1))" + +if [ -z "$MACHINE" ]; then echo "-o option needed"; exit 1; fi + +DRV_PATH=$(@colmena@/bin/colmena eval --instantiate -E "{nodes, ...}: nodes.$MACHINE.config.system.build.vm") + +echo "Realising $DRV_PATH" +RESULT=$(nix-store -r "$DRV_PATH") + +echo "Built $RESULT, launching VM ..." + +"$RESULT/bin/run-$MACHINE-vm" -device e1000,netdev=net0 -netdev "user,id=net0$HOSTFWD" diff --git a/scripts/list-nodes.sh b/scripts/list-nodes.sh new file mode 100644 index 0000000..a6c90ff --- /dev/null +++ b/scripts/list-nodes.sh @@ -0,0 +1,6 @@ +#!@bash@/bin/bash +# shellcheck shell=bash + +cd $(@git@/bin/git rev-parse --show-toplevel) + +nix-instantiate --strict --eval --json -A nodes | @jq@/bin/jq . diff --git a/shell.nix b/shell.nix new file mode 100644 index 0000000..0076170 --- /dev/null +++ b/shell.nix @@ -0,0 +1 @@ +(import ./. { }).shells.default