Compare commits

..

16 commits

Author SHA1 Message Date
53e2108ee8
feat(kanidm): Add SuiteNumerique Drive client
All checks were successful
Check meta / check_dns (push) Successful in 17s
Check meta / check_meta (pull_request) Successful in 19s
Check workflows / check_workflows (pull_request) Successful in 18s
Build all the nodes / Jaccess04 (pull_request) Successful in 25s
Build all the nodes / Jaccess01 (pull_request) Successful in 28s
Run pre-commit on all files / pre-commit (pull_request) Successful in 36s
Run pre-commit on all files / pre-commit (push) Successful in 38s
Build all the nodes / netcore01 (pull_request) Successful in 27s
Build all the nodes / netcore02 (pull_request) Successful in 27s
Build all the nodes / ap01 (pull_request) Successful in 42s
Build all the nodes / bridge01 (pull_request) Successful in 53s
Build all the nodes / geo01 (pull_request) Successful in 58s
Build all the nodes / hypervisor01 (pull_request) Successful in 1m0s
Build all the nodes / geo02 (pull_request) Successful in 1m1s
Build all the nodes / lab-router01 (pull_request) Successful in 1m4s
Build all the nodes / hypervisor03 (pull_request) Successful in 1m5s
Build all the nodes / build01 (pull_request) Successful in 1m7s
Build all the nodes / hypervisor02 (pull_request) Successful in 1m6s
Build all the nodes / cof02 (pull_request) Successful in 1m8s
Build all the nodes / iso (pull_request) Successful in 1m8s
Build all the nodes / tower01 (pull_request) Successful in 47s
Build the shell / build-shell (pull_request) Successful in 31s
Build all the nodes / compute01 (pull_request) Successful in 1m35s
Build all the nodes / vault01 (pull_request) Successful in 1m10s
Build all the nodes / rescue01 (pull_request) Successful in 1m26s
Build all the nodes / web02 (pull_request) Successful in 1m4s
Build all the nodes / krz01 (pull_request) Successful in 1m48s
Build all the nodes / web03 (pull_request) Successful in 1m8s
Build all the nodes / storage01 (pull_request) Successful in 1m33s
Build all the nodes / web01 (pull_request) Successful in 1m12s
2025-06-07 23:08:54 +02:00
1032b3225e
feat(wordpress/npr): Add a plugin
All checks were successful
Build all the nodes / Jaccess04 (push) Successful in 22s
Build all the nodes / Jaccess01 (push) Successful in 25s
Run pre-commit on all files / pre-commit (push) Successful in 28s
Build all the nodes / ap01 (push) Successful in 35s
Build all the nodes / bridge01 (push) Successful in 44s
Build all the nodes / cof02 (push) Successful in 49s
Build all the nodes / build01 (push) Successful in 52s
Build all the nodes / geo01 (push) Successful in 53s
Build all the nodes / compute01 (push) Successful in 1m15s
Build all the nodes / geo02 (push) Successful in 52s
Build all the nodes / hypervisor01 (push) Successful in 43s
Build all the nodes / hypervisor02 (push) Successful in 42s
Build all the nodes / hypervisor03 (push) Successful in 43s
Build all the nodes / netcore01 (push) Successful in 22s
Build all the nodes / netcore02 (push) Successful in 20s
Build all the nodes / iso (push) Successful in 52s
Build all the nodes / lab-router01 (push) Successful in 44s
Build all the nodes / tower01 (push) Successful in 43s
Build all the nodes / vault01 (push) Successful in 54s
Build all the nodes / rescue01 (push) Successful in 1m7s
Build all the nodes / krz01 (push) Successful in 1m30s
Build all the nodes / web02 (push) Successful in 46s
Build all the nodes / storage01 (push) Successful in 1m16s
Build the shell / build-shell (push) Successful in 25s
Build all the nodes / web01 (push) Successful in 1m18s
Build all the nodes / web03 (push) Successful in 46s
2025-05-30 16:43:31 +02:00
75ba2e4fcf
fix(netbird): Update dashboard version
All checks were successful
Build the shell / build-shell (pull_request) Successful in 24s
Build all the nodes / storage01 (pull_request) Successful in 2m32s
Build all the nodes / web03 (pull_request) Successful in 47s
Build all the nodes / Jaccess01 (push) Successful in 21s
Build all the nodes / Jaccess04 (push) Successful in 21s
Run pre-commit on all files / pre-commit (push) Successful in 26s
Build all the nodes / ap01 (push) Successful in 35s
Build all the nodes / bridge01 (push) Successful in 42s
Build all the nodes / cof02 (push) Successful in 47s
Build all the nodes / build01 (push) Successful in 48s
Build all the nodes / geo01 (push) Successful in 41s
Build all the nodes / compute01 (push) Successful in 1m9s
Build all the nodes / geo02 (push) Successful in 45s
Build all the nodes / hypervisor01 (push) Successful in 44s
Build all the nodes / hypervisor02 (push) Successful in 42s
Build all the nodes / hypervisor03 (push) Successful in 41s
Build all the nodes / iso (push) Successful in 52s
Build all the nodes / netcore01 (push) Successful in 20s
Build all the nodes / lab-router01 (push) Successful in 44s
Build all the nodes / netcore02 (push) Successful in 21s
Build all the nodes / krz01 (push) Successful in 1m27s
Build all the nodes / tower01 (push) Successful in 42s
Build all the nodes / rescue01 (push) Successful in 58s
Build all the nodes / vault01 (push) Successful in 54s
Build all the nodes / storage01 (push) Successful in 1m11s
Build all the nodes / web02 (push) Successful in 46s
Build all the nodes / web01 (push) Successful in 1m3s
Build the shell / build-shell (push) Successful in 24s
Build all the nodes / web03 (push) Successful in 48s
Build all the nodes / lab-router01 (pull_request) Successful in 42s
The daemon and dashboard versions are supposed to be somewhat couples,
but nixpkgs does not do it. The daemon is regularly updated but the
dashboard lags behind.
2025-05-30 14:56:02 +02:00
4bc96151b2
feat(netconf): chnages in potos network
All checks were successful
Build all the nodes / rescue01 (pull_request) Successful in 1m14s
Build all the nodes / krz01 (pull_request) Successful in 1m42s
Build all the nodes / web01 (pull_request) Successful in 1m8s
Build all the nodes / storage01 (pull_request) Successful in 2m4s
Build all the nodes / Jaccess04 (push) Successful in 22s
Build all the nodes / Jaccess01 (push) Successful in 22s
Build all the nodes / ap01 (push) Successful in 37s
Build all the nodes / bridge01 (push) Successful in 44s
Build all the nodes / geo01 (push) Successful in 47s
Build all the nodes / cof02 (push) Successful in 50s
Build all the nodes / build01 (push) Successful in 53s
Build all the nodes / compute01 (push) Successful in 1m15s
Build all the nodes / geo02 (push) Successful in 56s
Build all the nodes / hypervisor01 (push) Successful in 56s
Build all the nodes / hypervisor02 (push) Successful in 46s
Build all the nodes / hypervisor03 (push) Successful in 43s
Build all the nodes / netcore02 (push) Successful in 22s
Build all the nodes / netcore01 (push) Successful in 22s
Build all the nodes / lab-router01 (push) Successful in 46s
Build all the nodes / iso (push) Successful in 53s
Build all the nodes / tower01 (push) Successful in 44s
Build all the nodes / krz01 (push) Successful in 1m32s
Build all the nodes / rescue01 (push) Successful in 1m5s
Build all the nodes / web02 (push) Successful in 45s
Build all the nodes / web03 (push) Successful in 55s
Build all the nodes / vault01 (push) Successful in 58s
Build the shell / build-shell (push) Successful in 27s
Build all the nodes / web01 (push) Successful in 1m4s
Build all the nodes / storage01 (push) Successful in 1m51s
Run pre-commit on all files / pre-commit (push) Successful in 38s
* core-links in dgn-isp module
* factorize nodes/netconf.nix
2025-05-26 14:28:58 +02:00
4bbaeee232
refactor(netconf): renamed switchs
netcore00 -> netcore01
netcore01 -> netcore02
netcore02 -> Jaccess01
netaccess01 -> Jaccess04
2025-05-26 14:24:07 +02:00
db195b9c0b
fix(colmena): Revert aliases
All checks were successful
Build all the nodes / storage01 (pull_request) Successful in 1m58s
Build the shell / build-shell (pull_request) Successful in 25s
Build all the nodes / web02 (pull_request) Successful in 42s
Build all the nodes / web03 (pull_request) Successful in 46s
Run pre-commit on all files / pre-commit (push) Successful in 28s
Build all the nodes / ap01 (push) Successful in 38s
Build all the nodes / bridge01 (push) Successful in 43s
Build all the nodes / geo01 (push) Successful in 50s
Build all the nodes / geo02 (push) Successful in 50s
Build all the nodes / cof02 (push) Successful in 52s
Build all the nodes / build01 (push) Successful in 55s
Build all the nodes / compute01 (push) Successful in 1m13s
Build all the nodes / hypervisor01 (push) Successful in 43s
Build all the nodes / iso (push) Successful in 1m20s
Build all the nodes / krz01 (push) Successful in 1m25s
Build all the nodes / netaccess01 (push) Successful in 21s
Build all the nodes / hypervisor02 (push) Successful in 44s
Build all the nodes / hypervisor03 (push) Successful in 1m38s
Build all the nodes / netcore00 (push) Successful in 1m14s
Build all the nodes / netcore01 (push) Successful in 22s
Build all the nodes / netcore02 (push) Successful in 22s
Build all the nodes / tower01 (push) Successful in 43s
Build all the nodes / rescue01 (push) Successful in 1m1s
Build all the nodes / web02 (push) Successful in 51s
Build all the nodes / vault01 (push) Successful in 54s
Build all the nodes / web01 (push) Successful in 1m3s
Build the shell / build-shell (push) Successful in 24s
Build all the nodes / storage01 (push) Successful in 1m42s
Build all the nodes / web03 (push) Successful in 48s
Build all the nodes / lab-router01 (push) Successful in 54s
This made colmena unnecessarily slow, we don't plan to use aliases and
it was a big bowl of slow spaghetti
2025-05-26 13:55:32 +02:00
b09a0e8b10
fix(storage01/victorialogs): bump maxConcurrentInserts to keepup log flow
All checks were successful
Build all the nodes / netcore00 (push) Successful in 27s
Build all the nodes / netcore01 (push) Successful in 26s
Build all the nodes / netaccess01 (push) Successful in 27s
Build all the nodes / netcore02 (push) Successful in 26s
Run pre-commit on all files / pre-commit (push) Successful in 32s
Build all the nodes / ap01 (push) Successful in 39s
Build all the nodes / bridge01 (push) Successful in 47s
Build all the nodes / hypervisor01 (push) Successful in 59s
Build all the nodes / geo02 (push) Successful in 1m0s
Build all the nodes / lab-router01 (push) Successful in 1m1s
Build all the nodes / hypervisor03 (push) Successful in 1m4s
Build all the nodes / geo01 (push) Successful in 1m5s
Build all the nodes / cof02 (push) Successful in 1m6s
Build all the nodes / hypervisor02 (push) Successful in 1m5s
Build all the nodes / build01 (push) Successful in 1m11s
Build all the nodes / iso (push) Successful in 1m15s
Build all the nodes / compute01 (push) Successful in 1m19s
Build all the nodes / tower01 (push) Successful in 45s
Build the shell / build-shell (push) Successful in 24s
Build all the nodes / rescue01 (push) Successful in 1m6s
Build all the nodes / krz01 (push) Successful in 1m36s
Build all the nodes / vault01 (push) Successful in 57s
Build all the nodes / storage01 (push) Successful in 1m19s
Build all the nodes / web02 (push) Successful in 48s
Build all the nodes / web03 (push) Successful in 51s
Build all the nodes / web01 (push) Successful in 1m3s
2025-05-26 00:37:09 +02:00
sinavir
5e731419f3 feat(agenix): Rekey
All checks were successful
Build all the nodes / rescue01 (pull_request) Successful in 1m16s
Build all the nodes / web01 (pull_request) Successful in 1m13s
Build all the nodes / web03 (pull_request) Successful in 57s
Build all the nodes / storage01 (pull_request) Successful in 1m17s
Run pre-commit on all files / pre-commit (push) Successful in 31s
Build all the nodes / ap01 (push) Successful in 39s
Build all the nodes / geo01 (push) Successful in 48s
Build all the nodes / hypervisor01 (push) Successful in 54s
Build all the nodes / bridge01 (push) Successful in 1m0s
Build all the nodes / hypervisor02 (push) Successful in 1m2s
Build all the nodes / hypervisor03 (push) Successful in 1m2s
Build all the nodes / geo02 (push) Successful in 1m2s
Build all the nodes / cof02 (push) Successful in 1m8s
Build all the nodes / build01 (push) Successful in 1m10s
Build all the nodes / netaccess01 (push) Successful in 22s
Build all the nodes / iso (push) Successful in 1m12s
Build all the nodes / netcore00 (push) Successful in 22s
Build all the nodes / compute01 (push) Successful in 1m18s
Build all the nodes / netcore01 (push) Successful in 23s
Build all the nodes / lab-router01 (push) Successful in 45s
Build all the nodes / netcore02 (push) Successful in 23s
Build all the nodes / krz01 (push) Successful in 1m31s
Build the shell / build-shell (push) Successful in 21s
Build all the nodes / tower01 (push) Successful in 45s
Build all the nodes / web02 (push) Successful in 51s
Build all the nodes / vault01 (push) Successful in 58s
Build all the nodes / rescue01 (push) Successful in 1m13s
Build all the nodes / storage01 (push) Successful in 1m16s
Build all the nodes / web03 (push) Successful in 1m3s
Build all the nodes / web01 (push) Successful in 1m10s
2025-05-25 22:22:53 +02:00
sinavir
e37ca27064 fix(dgn-forgejo-runners): Forgejo runner secret token don't have to be known by all machines 2025-05-25 22:22:53 +02:00
sinavir
78fbf6cc28 fixup! fix(keys): take root age keys for mkRootSecrets 2025-05-25 22:22:53 +02:00
1c1c19487e fix(keys): take root age keys for mkRootSecrets 2025-05-25 22:22:53 +02:00
e53d46108f
feat(hypervisor03): activate SFP
All checks were successful
Check workflows / check_workflows (pull_request) Successful in 15s
Run pre-commit on all files / pre-commit (pull_request) Successful in 38s
Check meta / check_meta (push) Successful in 14s
Check meta / check_dns (push) Successful in 16s
Run pre-commit on all files / pre-commit (push) Successful in 30s
Build all the nodes / ap01 (push) Successful in 39s
Build all the nodes / bridge01 (push) Successful in 45s
Build all the nodes / geo01 (push) Successful in 48s
Build all the nodes / geo02 (push) Successful in 50s
Build all the nodes / cof02 (push) Successful in 53s
Build all the nodes / build01 (push) Successful in 55s
Build all the nodes / hypervisor01 (push) Successful in 46s
Build all the nodes / hypervisor02 (push) Successful in 46s
Build all the nodes / compute01 (push) Successful in 1m12s
Build all the nodes / netaccess01 (push) Successful in 21s
Build all the nodes / netcore01 (push) Successful in 21s
Build all the nodes / netcore00 (push) Successful in 23s
Build all the nodes / hypervisor03 (push) Successful in 45s
Build all the nodes / netcore02 (push) Successful in 23s
Build all the nodes / iso (push) Successful in 56s
Build all the nodes / lab-router01 (push) Successful in 47s
Build the shell / build-shell (push) Successful in 22s
Build all the nodes / tower01 (push) Successful in 47s
Build all the nodes / vault01 (push) Successful in 56s
Build all the nodes / web02 (push) Successful in 48s
Build all the nodes / rescue01 (push) Successful in 1m9s
Build all the nodes / web03 (push) Successful in 57s
Build all the nodes / storage01 (push) Successful in 1m20s
Build all the nodes / krz01 (push) Successful in 1m39s
Build all the nodes / web01 (push) Successful in 1m15s
2025-05-25 20:53:13 +02:00
e8f4fcce60
feat(hypervisor02): activate SFP 2025-05-25 20:52:10 +02:00
c6b14fb48e
feat(hypervisor01): activate SFP 2025-05-25 20:50:44 +02:00
e96d74a726
feat(dgn-network): add metric option 2025-05-25 20:46:11 +02:00
dec93715be
feat(build01): activate SFP 2025-05-25 19:56:08 +02:00
26 changed files with 1010 additions and 313 deletions

View file

@ -1,6 +1,44 @@
###
# This file was automatically generated with nix-actions.
jobs:
Jaccess01:
runs-on: nix-infra
steps:
- uses: actions/checkout@v3
- env:
BUILD_NODE: Jaccess01
name: Eval Jaccess01
run: "nix-shell -A eval-nodes --run 'set -o pipefail\nset -o nounset\nset -o
errexit\nDRV=$(instantiate-node)\necho \"DRV=$DRV\" >> $GITHUB_ENV\n'"
- name: Build Jaccess01
run: "STORE_PATH=\"$(nix-store --realise \"$DRV\")\"\necho \"STORE_PATH=$STORE_PATH\"\
\ >> $GITHUB_ENV\n"
- env:
STORE_ENDPOINT: https://snix-store.dgnum.eu/infra.signing/
STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }}
STORE_USER: admin
name: Cache Jaccess01
run: "nix-shell -A eval-nodes --run 'set -o pipefail\nset -o nounset\nset -o
errexit\npush-to-cache \"$STORE_PATH\"\n'"
Jaccess04:
runs-on: nix-infra
steps:
- uses: actions/checkout@v3
- env:
BUILD_NODE: Jaccess04
name: Eval Jaccess04
run: "nix-shell -A eval-nodes --run 'set -o pipefail\nset -o nounset\nset -o
errexit\nDRV=$(instantiate-node)\necho \"DRV=$DRV\" >> $GITHUB_ENV\n'"
- name: Build Jaccess04
run: "STORE_PATH=\"$(nix-store --realise \"$DRV\")\"\necho \"STORE_PATH=$STORE_PATH\"\
\ >> $GITHUB_ENV\n"
- env:
STORE_ENDPOINT: https://snix-store.dgnum.eu/infra.signing/
STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }}
STORE_USER: admin
name: Cache Jaccess04
run: "nix-shell -A eval-nodes --run 'set -o pipefail\nset -o nounset\nset -o
errexit\npush-to-cache \"$STORE_PATH\"\n'"
ap01:
runs-on: nix-infra
steps:
@ -248,44 +286,6 @@ jobs:
name: Cache lab-router01
run: "nix-shell -A eval-nodes --run 'set -o pipefail\nset -o nounset\nset -o
errexit\npush-to-cache \"$STORE_PATH\"\n'"
netaccess01:
runs-on: nix-infra
steps:
- uses: actions/checkout@v3
- env:
BUILD_NODE: netaccess01
name: Eval netaccess01
run: "nix-shell -A eval-nodes --run 'set -o pipefail\nset -o nounset\nset -o
errexit\nDRV=$(instantiate-node)\necho \"DRV=$DRV\" >> $GITHUB_ENV\n'"
- name: Build netaccess01
run: "STORE_PATH=\"$(nix-store --realise \"$DRV\")\"\necho \"STORE_PATH=$STORE_PATH\"\
\ >> $GITHUB_ENV\n"
- env:
STORE_ENDPOINT: https://snix-store.dgnum.eu/infra.signing/
STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }}
STORE_USER: admin
name: Cache netaccess01
run: "nix-shell -A eval-nodes --run 'set -o pipefail\nset -o nounset\nset -o
errexit\npush-to-cache \"$STORE_PATH\"\n'"
netcore00:
runs-on: nix-infra
steps:
- uses: actions/checkout@v3
- env:
BUILD_NODE: netcore00
name: Eval netcore00
run: "nix-shell -A eval-nodes --run 'set -o pipefail\nset -o nounset\nset -o
errexit\nDRV=$(instantiate-node)\necho \"DRV=$DRV\" >> $GITHUB_ENV\n'"
- name: Build netcore00
run: "STORE_PATH=\"$(nix-store --realise \"$DRV\")\"\necho \"STORE_PATH=$STORE_PATH\"\
\ >> $GITHUB_ENV\n"
- env:
STORE_ENDPOINT: https://snix-store.dgnum.eu/infra.signing/
STORE_PASSWORD: ${{ secrets.STORE_PASSWORD }}
STORE_USER: admin
name: Cache netcore00
run: "nix-shell -A eval-nodes --run 'set -o pipefail\nset -o nounset\nset -o
errexit\npush-to-cache \"$STORE_PATH\"\n'"
netcore01:
runs-on: nix-infra
steps:

2
REUSE.toml generated
View file

@ -2,7 +2,7 @@ version = 1
[[annotations]]
SPDX-FileCopyrightText = "NONE"
SPDX-License-Identifier = "CC0-1.0"
path = ["**/.envrc", "**/Cargo.lock", "**/_hardware-configuration.nix", ".gitignore", "REUSE.toml", "shell.nix", "pkgs/by-name/docuseal/rubyEnv/*", "pkgs/by-name/docuseal/deps.json", "pkgs/by-name/docuseal/yarn.lock"]
path = ["**/.envrc", "**/Cargo.lock", "**/_hardware-configuration.nix", ".gitignore", "REUSE.toml", "shell.nix", "patches/colmena/0001-*", "pkgs/by-name/docuseal/rubyEnv/*", "pkgs/by-name/docuseal/deps.json", "pkgs/by-name/docuseal/yarn.lock"]
precedence = "closest"
[[annotations]]

View file

@ -75,6 +75,9 @@ let
"REUSE.toml"
"shell.nix"
# Commit revert
"patches/colmena/0001-*"
# Docuseal
"pkgs/by-name/docuseal/rubyEnv/*"
"pkgs/by-name/docuseal/deps.json"

View file

@ -33,6 +33,9 @@ rec {
# List of keys for the root group
rootKeys = getMemberKeys meta.organization.groups.root;
# List of keys for the root group (for age encryption and decryption)
rootAgeKeys = getAgeKeys meta.organization.groups.root;
# All admins for a node
getNodeAdmins = node: meta.organization.groups.root ++ meta.nodes.${node}.admins;
@ -40,15 +43,15 @@ rec {
getSecretKeys = node: lib.unique (getAgeKeys (getNodeAdmins node) ++ getNodeKeys [ node ]);
# List of keys for all machines wide secrets
machineKeys = rootKeys ++ (getNodeKeys (builtins.attrNames meta.nodes));
machineKeys = rootAgeKeys ++ (getNodeKeys (builtins.attrNames meta.nodes));
mkSecrets = nodes: setDefault { publicKeys = lib.unique (builtins.concatMap getSecretKeys nodes); };
mkRootSecrets = setDefault { publicKeys = lib.unique rootKeys; };
mkRootSecrets = setDefault { publicKeys = lib.unique rootAgeKeys; };
machineKeysBySystem =
system:
rootKeys
rootAgeKeys
++ (getNodeKeys (
builtins.attrNames (lib.filterAttrs (_: v: v.nixpkgs.system == system) meta.nodes)
));

View file

@ -0,0 +1,77 @@
# SPDX-FileCopyrightText: 2024 Lubin Bailly <lubin.bailly@dgnum.eu>
#
# SPDX-License-Identifier: EUPL-1.2
{
dgn-hardware.model = "EX2300-48P";
dgn-isp = {
enable = true;
AP = [
# H1-00
"ge-0/0/0"
"ge-0/0/1"
"ge-0/0/2"
"ge-0/0/3"
"ge-0/0/4"
"ge-0/0/5"
# H1-01
"ge-0/0/6"
"ge-0/0/7"
"ge-0/0/8"
"ge-0/0/9"
"ge-0/0/10"
"ge-0/0/11"
# H1-02
"ge-0/0/12"
"ge-0/0/13"
"ge-0/0/14"
"ge-0/0/15"
"ge-0/0/16"
"ge-0/0/17"
];
admin-ip = "fd26:baf9:d250:8000::1001/64";
};
dgn-interfaces = {
# oob
"ge-0/0/42".ethernet-switching = {
interface-mode = "trunk";
vlans = [ "all" ];
};
# ilo
"ge-0/0/47".ethernet-switching = {
interface-mode = "access";
vlans = [ "admin-core" ];
};
# router
"xe-0/1/0".ethernet-switching = {
interface-mode = "trunk";
vlans = [ "all" ];
};
# netaccess01
"xe-0/1/1".ethernet-switching = {
interface-mode = "trunk";
vlans = [
"users"
"ap-staging"
"admin-ap"
"admin-core"
];
};
# netcore01 (Potos)
"xe-0/1/2".ethernet-switching = {
interface-mode = "trunk";
vlans = [
"all"
];
};
# uplink
"ge-0/1/3".ethernet-switching = {
interface-mode = "trunk";
vlans = [ "uplink-cri" ];
};
# debug management
"me0".inet.addresses = [ "192.168.42.6/24" ];
};
}

View file

@ -1,28 +0,0 @@
# SPDX-FileCopyrightText: 2025 Lubin Bailly <lubin.bailly@dgnum.eu>
#
# SPDX-License-Identifier: EUPL-1.2
{
dgn-hardware = {
model = "EX4400-24X";
extensions = [ "EX4400-EM-4Y" ];
};
dgn-isp = {
enable = true;
admin-ip = "fd26:baf9:d250:8000::1010/64";
};
dgn-interfaces = {
"xe-0/2/0".ethernet-switching = {
interface-mode = "trunk";
vlans = [ "all" ];
};
"xe-0/0/23".ethernet-switching = {
interface-mode = "trunk";
vlans = [ "all" ];
};
# debug management
"me0".inet.addresses = [ "192.168.2.3/24" ];
};
}

View file

@ -3,50 +3,45 @@
# SPDX-License-Identifier: EUPL-1.2
{
dgn-hardware.model = "EX4100-F-48P";
dgn-hardware = {
model = "EX4400-24X";
extensions = [ "EX4400-EM-4Y" ];
};
dgn-isp = {
enable = true;
admin-ip = "fd26:baf9:d250:8000::100f/64";
};
dgn-profiles = {
"hypervisor" = {
interfaces = [
"ge-0/0/1"
"ge-0/0/3"
"ge-0/0/5"
"ge-0/0/7"
"ge-0/0/9"
];
configuration.ethernet-switching = {
interface-mode = "access";
vlans = [ "hypervisor" ];
};
};
"idrac" = {
interfaces = [
"ge-0/0/0"
"ge-0/0/2"
"ge-0/0/4"
"ge-0/0/6"
"ge-0/0/8"
# PDU and PSU
"ge-0/0/46"
"ge-0/0/47"
];
configuration.ethernet-switching = {
interface-mode = "access";
vlans = [ "admin-core" ];
};
};
admin-ip = "fd26:baf9:d250:8000::1010/64";
core-links = [
"xe-0/0/0"
"xe-0/0/3"
"xe-0/0/22"
"xe-0/0/21"
];
};
dgn-interfaces = {
"xe-0/2/0".ethernet-switching = {
"ge-0/0/23".ethernet-switching = {
interface-mode = "trunk";
vlans = [ "all" ];
vlans = [ "uplink-cri" ];
};
"xe-0/0/0".ethernet-switching.vlans = [ "uplink-cri" ];
"xe-0/0/21".ethernet-switching.vlans = [ "all" ];
"xe-0/0/22".ethernet-switching.vlans = [ "all" ];
# debug management
"me0".inet.addresses = [ "192.168.2.2/24" ];
"me0".inet.addresses = [ "192.168.2.3/24" ];
};
dgn-profiles."hypervisor" = {
interfaces = [
"xe-0/0/4"
"xe-0/0/5"
"xe-0/0/6"
"xe-0/0/7"
"xe-0/0/8"
"xe-0/0/9"
];
configuration.ethernet-switching = {
interface-mode = "access";
vlans = [ "hypervisor" ];
};
};
}

View file

@ -1,77 +1,56 @@
# SPDX-FileCopyrightText: 2024 Lubin Bailly <lubin.bailly@dgnum.eu>
# SPDX-FileCopyrightText: 2025 Lubin Bailly <lubin.bailly@dgnum.eu>
#
# SPDX-License-Identifier: EUPL-1.2
{
dgn-hardware.model = "EX2300-48P";
dgn-hardware.model = "EX4100-F-48P";
dgn-isp = {
enable = true;
AP = [
# H1-00
"ge-0/0/0"
"ge-0/0/1"
"ge-0/0/2"
"ge-0/0/3"
"ge-0/0/4"
"ge-0/0/5"
# H1-01
"ge-0/0/6"
"ge-0/0/7"
"ge-0/0/8"
"ge-0/0/9"
"ge-0/0/10"
"ge-0/0/11"
# H1-02
"ge-0/0/12"
"ge-0/0/13"
"ge-0/0/14"
"ge-0/0/15"
"ge-0/0/16"
"ge-0/0/17"
];
admin-ip = "fd26:baf9:d250:8000::1001/64";
admin-ip = "fd26:baf9:d250:8000::100f/64";
};
dgn-profiles = {
"hypervisor" = {
interfaces = [
"ge-0/0/1"
"ge-0/0/3"
"ge-0/0/5"
"ge-0/0/7"
"ge-0/0/9"
];
configuration.ethernet-switching = {
interface-mode = "access";
vlans = [ "hypervisor" ];
};
};
"idrac" = {
interfaces = [
"ge-0/0/0"
"ge-0/0/2"
"ge-0/0/4"
"ge-0/0/6"
"ge-0/0/8"
"ge-0/0/10"
"ge-0/0/12"
"ge-0/0/14"
# PDU and PSU
"ge-0/0/45"
"ge-0/0/46"
"ge-0/0/47"
];
configuration.ethernet-switching = {
interface-mode = "access";
vlans = [ "admin-core" ];
};
};
};
dgn-interfaces = {
# oob
"ge-0/0/42".ethernet-switching = {
"xe-0/2/0".ethernet-switching = {
interface-mode = "trunk";
vlans = [ "all" ];
};
# ilo
"ge-0/0/47".ethernet-switching = {
interface-mode = "access";
vlans = [ "admin-core" ];
};
# router
"xe-0/1/0".ethernet-switching = {
interface-mode = "trunk";
vlans = [ "all" ];
};
# netaccess01
"xe-0/1/1".ethernet-switching = {
interface-mode = "trunk";
vlans = [
"users"
"ap-staging"
"admin-ap"
"admin-core"
];
};
# netcore01 (Potos)
"xe-0/1/2".ethernet-switching = {
interface-mode = "trunk";
vlans = [
"all"
];
};
# uplink
"ge-0/1/3".ethernet-switching = {
interface-mode = "trunk";
vlans = [ "uplink-cri" ];
};
# debug management
"me0".inet.addresses = [ "192.168.42.6/24" ];
"me0".inet.addresses = [ "192.168.2.2/24" ];
};
}

View file

@ -33,6 +33,8 @@ in
};
dashboard = {
package = nixpkgs.nixos.unstable.netbird-dashboard;
settings = {
AUTH_AUTHORITY = "https://sso.dgnum.eu/oauth2/openid/dgn_netbird";
AUTH_AUDIENCE = "dgn_netbird";

View file

@ -15,6 +15,7 @@ in
flags = {
retentionPeriod = "4w";
httpListenAddr = "${meta.network.${name}.netbirdIp}:${builtins.toString port}";
maxConcurrentInserts = "200";
};
};

View file

@ -68,7 +68,7 @@ in
};
plugins = {
inherit (wp4nix.plugins) user-role-editor;
inherit (wp4nix.plugins) pages-with-category-and-tag user-role-editor;
};
languages = [ pkgs.wordpressPackages.languages.fr_FR ];

View file

@ -23,7 +23,7 @@
build01 = {
interfaces = {
enp35s0f0np0 = {
enp35s0f1np1 = {
ipv4 = [
{
address = "10.0.254.21";
@ -31,6 +31,19 @@
}
];
gateways = [ "10.0.254.1" ];
enableDefaultDNS = true;
};
enp66s0f1np1 = {
ipv4 = [
{
address = "10.0.254.21";
prefixLength = 24;
}
];
metric = 500;
gateways = [ "10.0.254.1" ];
enableDefaultDNS = true;
};
@ -141,6 +154,20 @@
gateways = [ "10.0.254.1" ];
enableDefaultDNS = true;
};
eno1 = {
ipv4 = [
{
address = "10.0.254.11";
prefixLength = 24;
}
];
metric = 500;
gateways = [ "10.0.254.1" ];
enableDefaultDNS = true;
};
};
hostId = "4dbbd76a";
@ -160,6 +187,20 @@
gateways = [ "10.0.254.1" ];
enableDefaultDNS = true;
};
eno1 = {
ipv4 = [
{
address = "10.0.254.12";
prefixLength = 24;
}
];
metric = 500;
gateways = [ "10.0.254.1" ];
enableDefaultDNS = true;
};
};
hostId = "d0b48483";
@ -179,6 +220,20 @@
gateways = [ "10.0.254.1" ];
enableDefaultDNS = true;
};
enp66s0f0 = {
ipv4 = [
{
address = "10.0.254.13";
prefixLength = 24;
}
];
metric = 500;
gateways = [ "10.0.254.1" ];
enableDefaultDNS = true;
};
};
hostId = "1c407ea8";

View file

@ -1,106 +1,61 @@
# SPDX-FileCopyrightText: 2024 Lubin Bailly <lubin.bailly@dgnum.eu>
#
# SPDX-License-Identifier: EUPL-1.2
{
nodes = {
netcore00 = {
site = "pot01";
{ lib, ... }:
let
inherit (lib) mapAttrs;
hashedPassword = "$6$BKetIIfT$JVyE0B7F4O.fJwQFu5jVrVExAZROrEMLW5HkDkhjMShJ9cRIgxSm2VM9OThDowsnLmAewqDN7eAY.EQt4UR4U0";
hashedPassword = "$2b$05$3h26h8rJ5yks1vJYXZG.fuHagYBwrVMlGR7iFmsV1FTJlMUj97fl2";
mkSwitch =
{
site,
ip,
entry,
}:
{
inherit site hashedPassword;
stateVersion = null;
adminGroups = [ "fai" ];
deployment = {
targetHost = "fd26:baf9:d250:8000::1010";
targetHost = ip;
sshOptions = [
"-J"
"root@vault01.hyp01.infra.dgnum.eu"
entry
"-p"
"830"
];
};
nixpkgs = {
version = "24.05"; # FIXME: meaningless
version = "24.05";
system = "netconf";
};
};
in
{
nodes = mapAttrs (_: mkSwitch) {
netcore01 = {
site = "pot01";
hashedPassword = "$6$BKetIIfT$JVyE0B7F4O.fJwQFu5jVrVExAZROrEMLW5HkDkhjMShJ9cRIgxSm2VM9OThDowsnLmAewqDN7eAY.EQt4UR4U0";
stateVersion = null;
adminGroups = [ "fai" ];
deployment = {
targetHost = "fd26:baf9:d250:8000::100f";
sshOptions = [
"-J"
"root@vault01.hyp01.infra.dgnum.eu"
"-p"
"830"
];
};
nixpkgs = {
version = "24.05"; # FIXME: meaningless
system = "netconf";
};
ip = "fd26:baf9:d250:8000::1010";
entry = "root@vault01.hyp01.infra.dgnum.eu";
};
netcore02 = {
site = "hyp01";
hashedPassword = "$6$BKetIIfT$JVyE0B7F4O.fJwQFu5jVrVExAZROrEMLW5HkDkhjMShJ9cRIgxSm2VM9OThDowsnLmAewqDN7eAY.EQt4UR4U0";
stateVersion = null;
adminGroups = [ "fai" ];
deployment = {
targetHost = "fd26:baf9:d250:8000::1001";
sshOptions = [
"-J"
"root@vault01.hyp01.infra.dgnum.eu"
"-p"
"830"
];
};
nixpkgs = {
version = "24.05"; # FIXME: meaningless
system = "netconf";
};
site = "pot01";
ip = "fd26:baf9:d250:8000::100f";
entry = "root@vault01.hyp01.infra.dgnum.eu";
};
netaccess01 = {
Jaccess01 = {
site = "hyp01";
ip = "fd26:baf9:d250:8000::1001";
entry = "root@vault01.hyp01.infra.dgnum.eu";
};
Jaccess04 = {
site = "hyp02";
hashedPassword = "$6$BKetIIfT$JVyE0B7F4O.fJwQFu5jVrVExAZROrEMLW5HkDkhjMShJ9cRIgxSm2VM9OThDowsnLmAewqDN7eAY.EQt4UR4U0";
stateVersion = null;
adminGroups = [ "fai" ];
deployment = {
targetHost = "fd26:baf9:d250:8000::2001";
sshOptions = [
"-J"
"root@vault01.hyp01.infra.dgnum.eu"
"-p"
"830"
];
};
nixpkgs = {
version = "24.05"; # FIXME: meaningless
system = "netconf";
};
ip = "fd26:baf9:d250:8000::2001";
entry = "root@vault01.hyp01.infra.dgnum.eu";
};
};
}

View file

@ -21,6 +21,7 @@ let
attrs
attrsOf
bool
int
enum
ints
listOf
@ -374,6 +375,14 @@ in
'';
};
metric = mkOption {
type = int;
default = 1000;
description = ''
Metric of routes created by this interface.
'';
};
DHCP = mkOption {
type = nullOr str;
default = null;

View file

@ -23,6 +23,14 @@ in
options.dgn-isp = {
enable = mkEnableOption "Common isp configuration";
core-links = mkOption {
type = listOf str;
default = [ ];
description = ''
Interfaces of link between switches
'';
};
AP = mkOption {
type = listOf str;
default = [ ];
@ -69,6 +77,18 @@ in
dgn-interfaces."irb".inet6.addresses = [ cfg.admin-ip ];
dgn-profiles = {
core = {
interfaces = cfg.core-links;
configuration.ethernet-switching = {
interface-mode = "trunk";
vlans = [
"users"
"ap-staging"
"admin-ap"
"admin-core"
];
};
};
AP = {
interfaces = cfg.AP;
configuration = {

View file

@ -1,62 +1,39 @@
age-encryption.org/v1
-> ssh-ed25519 jIXfPA IBDWE9Xn6wliGbftDIGWXHa7HKUMNefpFqPbyM1hGyE
g3RFQAd1fr04JDJWNOiA5Az+Th5y4KQsMoZtDUTChII
-> ssh-ed25519 QlRB9Q wUwww3D9NqymfeRaiTlPqSQs+d4jgeU5et7t5Bh16G0
vQMO+HmuvKUouppp1fPSVzF88WWnWtTfHm+c6WJeJNs
-> ssh-ed25519 r+nK/Q CyIT2nwckqhLB5WojCJOzKw+62qejILHOyUI91IJMyc
pStkO3bVXBwsoadaJ+FyjwY/SxNneaLjGreqlGXy7MI
-> ssh-ed25519 ZIo4kw 4Dvbj86lTQ6xC73sI8eEU6GyGo/Qzv3Htq3jZCzaWSA
5sbbvnKk6Ef6zacjMV2EK9optOKVUIqfk/IAuFd3M5o
-> ssh-ed25519 9/PCvA b5eEu4H7gu2Jxta0J9pVUJjCBqy3sa9fl4/Glwxatwk
JAcfuZyUGyEHsTrOpfcYKGlmoc6WHLQdJCm6mVc7Bq4
-> ssh-ed25519 prNEsA DC648/P8gUL0kTjuXdM34wT+lo4wkTwtk0CghxWumjU
SZaiXJXPjL+gYkPn5mVlw7vQ4otKVSKX5VIdkgsgURA
-> ssh-ed25519 jIXfPA an1P3n+ZiKaUOYMZCcWJXwkmL8ZzehP2of1DunzlugE
w0tyvbo/Dkl9ovbi2jbveuu/9fQ6JYox3711Ke+PL7c
-> ssh-ed25519 QlRB9Q iyFxHQBFLBVKMv2Uv97p2cQCizTOSrQDYMctdoafmXk
nMemf/k6qdrY91waHLxG7L4AuZCZWexPsC0pUI/dKdc
-> ssh-ed25519 r+nK/Q NRJzo86V7hd9M9mLkZS/SFv+3wyzQUIAxSop1t0NIk4
kB+sIcPPx80o276zRVwOJ1kZlrmZRagAS+J2K9pIs54
-> ssh-rsa krWCLQ
xSBrLm6xfBZC2NSVjWOlw1PdAfV4SsrUr6FuLeMa291otjMyz5o/RhxVeU6uEh2F
B6lk7WdEsYWl6lp98ha1bz4udabPJZdMHO1dj0uOx2Qnt5F5P+Dk/dOsZlbylX7n
oKLm7w+P0xKMZyy4VoK0UWuwAwNZ+Rvh0aiqRxWQK+fh6W9pii/MDssCn/cwqsP3
0ehUupmTP1GPWzXufVdvdV3EUuD6wczYMit3XhVpHWs3rh8nCY8WEw2kaF/YJ3kq
QAzT1MGn2Bd9qJxPmnThJPr669cdX0FxRT8tirskBwGD50/AGzkGHmrDxhJ+lA+a
oaAOAH2JuzxVU8eZTNzBaA
-> ssh-ed25519 /vwQcQ wnnNXgdxTHBfy1m9Q85dE6qiQ1SGk40NSXEKFhJhoUg
VRpUiwOot1uuTyp9zoBdoHVH/Sk/ZQvXZ/OlJrFLJNw
-> ssh-ed25519 0R97PA 3NnOUAVFDA6jGIImHHaY5neOksXHY5W3SRvQjSMHIhE
0fpC7PKc2Ih/ZaUamsiXMya+cRHFo47pu+/+VPPh1mY
-> ssh-ed25519 JGx7Ng wjLJcV9WaZzaXL9G4+gWLrJwlQHpu2wNauNGCKHlcjE
KXtlcgvCx4IjWKZYcdrc/+ptN89TYdztcKx3D8SOGJQ
-> ssh-ed25519 bUjjig 1anXdq+bgCUIgtwdb3r107avA454AmvzCZ/9Gg6KJRY
jZ+57yUb5S2o4jYIfle2Cosnr244lyT6OovC6pki3aM
-> ssh-ed25519 DqHxWQ IRL5LMBRyHEuerrJLXwmFLn60w9kPLFfdwJ+B98B4ys
+WLwLTWwswxa6q/okI+h8lMjAk8qEiOT/eODA8K6o4Q
-> ssh-ed25519 IxxZqA susvyhJg3BFg25/w0u8HlbyuuUVBUkKt4OYdDyrs3SY
kShLE/m+WjpHFe9rp1u1myVVT92wTSX9/LbBCBBEDGM
-> ssh-ed25519 /fsvPA 8v8ltxt9mHugEPxI6+oIrVhV5z63QExUl2FF81hvoDY
3UyQnrt9ZUQgoFWa+sOhUyfW8ayE1hT6iJktRUqsPCs
-> ssh-ed25519 tDqJRg 0TyPeokIZ2VEJIefa771p/mgPKXNn9N1gFfJZNzJ5g4
9c+48/+JkSNjIpbdLS1gMqCJsVR++aTd2fAtq+Vf5mU
-> ssh-ed25519 9pVK7Q zGCbrGgD9+ZJWRjOmxXG+NTK3sXaRtVa5nFUd16dkgY
5KplC/lMmYROpQXez04Fek7csTAKhCDTFMvi1gTkZLg
-> ssh-ed25519 /BRpBQ BaEwBZBU4V86HQJL//Y7yZgNiIsVe14NrgHqXNdVIjI
o23RVqBoMazA5FcvvrxTsO/8YY6v/Fi2fe0vHCuMXP8
-> ssh-ed25519 t0vvHQ 3ry3IKDCwvHlya5gRl1WvWWg9eyvzoscLFIkSS+0W0M
ARJQYM8UOxL4MKYeVTm4iuqRmKKt5WVA7vyHxBeSbFE
-> ssh-ed25519 E6cGqw l8mjNJC/Rv8H8RAJJ6YPFh1giAELtq6YwOg9NGfpPEI
3LZG3xs9yWKzDiIfXK5WuT5DBoKRRZXvMW4wrvD0Yz4
-> ssh-ed25519 EEPmeQ vST8LKLp2Z5KqAVyGRedZ0UYrvTeBOh6VktNovlh+BE
h67yS6SCGgDAbaVZFGPJWSTZjyKihR4lU2WI0ddtPS0
-> ssh-ed25519 /x+F2Q MGL5T94LGLWkGMDHWgsYSklh9bNNS5wQ4UsHd2DhIAw
wN9w+g0v4Ai9/LLBp0AVFPxGlYb9TFvFMherfQ1HAII
-> ssh-ed25519 +MNHsw MmeQYG3LMRYshE1iW8c0aK+4ClW4cdmL0IRNOG4b0xE
H172Nrg6evAabW0tiPkbmtqM0pJd2V+AXqbcrqizO50
-> ssh-ed25519 rHotTw jAf0aGFc8O4Q6kFT/bvDvMgsng+P7McydHsf3XGXOU4
hVU16P3uqnBXN0VM1k0D/yUB/4qXPJHlucZYaEeotu0
-> ssh-ed25519 NaIdrw RDQ5Pt+Q3pDI7jNEFuOJj6FhyQ+rOs7tjW7aZ6juvU8
l84vMl7TqapmfSpM0n7OqSGPzpxckppENDVsH1UfUOk
-> ssh-ed25519 +mFdtQ TCPW7Vv185tMyBUD1kKUYiLCjs2BODC6/yk1bnCNLAw
uCRPJ7/vD5IsGiclvSDUccqghHpJafw6bOnuCd1hSTU
-> ssh-ed25519 0IVRbA zQ1t9aOeOZsoyG5PYe/qmQhz5zTwRaZOH1w747gcOV8
/6kyxLF7RhGo5csn1FRzutUur5dEe95L8OJ1xuUGbp0
-> ssh-ed25519 IY5FSQ fg14TlFknPaqzzhvOBj1xUPK+SXdVwE1kVrqsXguQhQ
JM2jK91iWXF4NoOimeFgYBRr5Y51C1qKGW/bQh1I2wk
-> ssh-ed25519 VQSaNw KRupcX4ff19rhTEE7T7OYbX5R7N8FN/6dKZOw1BfRCc
v5K8+gyqx8v5qp48hFCblLWdmm1dTjqr8Tnjz/PsLgs
-> )-grease >`MLFR
AzmLKWjXTSMRbDzD9U0wWgH8Llfigdh3Ks2EAHWp3Rrbb1lyfsjnTBkPGTz3oaKx
Ucpzmu+rcVhNfb7S9RGnJXAm1bcdFGj+fipRUsLqcfxtG8+7Gy9WyZkgxKdJqRcn
--- BcXdoymLtYCqMH5BALUywcLlG1MVXoAmxH4/p4BBxgw
|êamxòD[¾±OЦ*;%`³å®uŒ4eƹjÜü¸g•$Vi´LGï×~½µY®¬@žÊiƒÚ¬GÈã§Äy®äòSAÇm·<6D>)
Vu4hzL2lpYaPlT0M5Dq7i2sYZQiHtc0wx/+Ol7yX6imT+36ZYGWOr5etTWNsG5H1
r4POK1efijLJDcDXdG9H0aIRKYk0LIWu8Lc8mXQicS4sI/DAuHcwd4yEHoiqBroB
HrIEtpctjszOUwcORPPnVeuxrc7qV7dZHQOTFb37m5wB1mOWQPf9+aEZ8Pa9N3HB
MYyCiZdkjnB+GV2CRdpg3Nrn5MBYzTk6qKc/4Dt97fSjnZp7mEAUu3MwJN6xwZvt
edoxH7qH/pyjn6L4F3BOzoXi0OGp7UhZrWdSm9rpP9bQoSDl2xPlfyQltAx6F5L/
rWCxDh1tMzKTkjfLdA5yYg
-> ssh-ed25519 /vwQcQ ed+mwY1AiYksn4D74JVHIHCXdujK+T4M5fqRertiXDE
dZ27Pab3C8OesdLbHREAkQs76fk2+HQFsC5vrqHenU8
-> ssh-ed25519 0R97PA /SJIhnQqyJ30Igm3lD58ASvHDDVVHvGOVBCQPHXDyn4
N4Q7XsCTpgVDEl9BC5D+W559VZGzRBmZT7zdHH2TxVw
-> ssh-ed25519 JGx7Ng kOpYTaE79ToAfe21Y4Rj6OzarTviMcZrr/XPeORY/Gs
pTJv9SEeCj/c7WoXsS9/C7Ur0GObvNXQN82P2Vpv/ks
-> ssh-ed25519 bUjjig R+nYaUJc2bnGlu7MfwH2l4UZPCyp//JA6XIcTPQ76Vw
GkXas4s/L+cPmw9/gFi/vlfEGDvOJ4bHxpNmHx1lIXE
-> ssh-ed25519 rHotTw vR1ALL0WuY0iD3XlIbgTXDw+BlHaeqWvo0xLRfg4PA8
SzI7LrCzE14oHrnVfgUWr01iB6HFDEHDqYw4rnGu8Hs
-> ssh-ed25519 oRtTqQ ZsVQ29JKayWapAJ8Ni3p5XTtHM6V13kPI+9Y+QHPBxs
mAKmtdqpq+VhI2mCIpW8JIC4k2R8MI4ZH73yFuluWAg
-> ssh-ed25519 IxxZqA gIpd8G6cBK+KjQXbVgDWbtHdtcQhmZbruODpFyQajzk
Gi1rQGYakrkDDTlGobnv4bQhrJdWb1jF54W2jBcNPY8
-> oa~]37NU-grease &> o~hy` 5>|
Brm0NbNjpmLUrPn/REprmi00Gdsu+1VX5Oqt7jrguA4ushkDL9KbhhmLfaE1Bih4
wwtPwaAnWoYGhLEFQ/Z4IMhZ
--- KFSjNpmZ/nktxjdYS+2Cg/RztxprVrb5VRpOol5vHtM
dH<EFBFBD> -üä¯mÒ¾<C392>§ÂË <09>B'Û9š ²³Y*™1J&:§Ï¦Ü ­,g~ëiÍEÜŠ <0B>¶² àM¡zôR[)Ifûóˆ²ñu—

View file

@ -1,5 +1,8 @@
# SPDX-FileCopyrightText: 2024 La Délégation Générale Numérique <contact@dgnum.eu>
#
# SPDX-License-Identifier: EUPL-1.2
{ forgejo_runners-token_file.publicKeys = (import ../../../keys.nix).machineKeysBySystem "nixos"; }
(import ../../../keys.nix).mkSecrets
[ "storage01" "build01" ]
[
"forgejo_runners-token_file"
]

View file

@ -17,8 +17,8 @@ let
net' = meta.network.${name};
mkAddress = { address, prefixLength, ... }: "${address}/${builtins.toString prefixLength}";
mkRoute = Gateway: {
inherit Gateway;
mkRoute = Metric: Gateway: {
inherit Gateway Metric;
GatewayOnLink = true;
};
@ -27,7 +27,7 @@ let
value = {
name = interface;
address = builtins.map mkAddress (net.ipv4 ++ net.ipv6);
routes = builtins.map mkRoute net.gateways;
routes = builtins.map (mkRoute net.metric) net.gateways;
inherit (net) DHCP dns;
};

View file

@ -0,0 +1,639 @@
From f49797f5a589b88e6ad938038e53570fdaa37fa8 Mon Sep 17 00:00:00 2001
From: Tom Hubrecht <tom.hubrecht@dgnum.eu>
Date: Mon, 26 May 2025 13:47:07 +0200
Subject: [PATCH] Revert "add aliases, refactor eval logic"
This reverts commit 3ef5ba8658f68564ed268828ccee39a363200188.
---
src/error.rs | 14 +-
src/nix/hive/mod.rs | 94 +++----------
src/nix/hive/options.nix | 9 --
src/nix/mod.rs | 2 -
src/nix/node_filter.rs | 290 ++++++++++++++-------------------------
5 files changed, 122 insertions(+), 287 deletions(-)
diff --git a/src/error.rs b/src/error.rs
index cbc41ba..f0fbe11 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -6,7 +6,7 @@ use std::process::ExitStatus;
use snafu::{Backtrace, Snafu};
use validator::ValidationErrors;
-use crate::nix::{key, NodeName, Profile, StorePath};
+use crate::nix::{key, Profile, StorePath};
pub type ColmenaResult<T> = Result<T, ColmenaError>;
@@ -76,18 +76,6 @@ pub enum ColmenaError {
#[snafu(display("Filter rule cannot be empty"))]
EmptyFilterRule,
- #[snafu(display(
- "Alias \"{}\" is already taken by {} \"{}\"",
- what.as_str(),
- if *is_node_name { "node name" } else { "alias" },
- with.as_str(),
- ))]
- DuplicateAlias {
- what: NodeName,
- is_node_name: bool,
- with: NodeName,
- },
-
#[snafu(display("Deployment already executed"))]
DeploymentAlreadyExecuted,
diff --git a/src/nix/hive/mod.rs b/src/nix/hive/mod.rs
index 8ebf6de..63ae08e 100644
--- a/src/nix/hive/mod.rs
+++ b/src/nix/hive/mod.rs
@@ -3,7 +3,7 @@ mod assets;
#[cfg(test)]
mod tests;
-use std::collections::{HashMap, HashSet};
+use std::collections::HashMap;
use std::convert::AsRef;
use std::path::{Path, PathBuf};
use std::str::FromStr;
@@ -14,7 +14,6 @@ use tokio::sync::OnceCell;
use validator::Validate;
use super::deployment::TargetNode;
-use super::node_filter::{NeedsEval, PartialNodeConfig};
use super::{
Flake, MetaConfig, NixExpression, NixFlags, NodeConfig, NodeFilter, NodeName,
ProfileDerivation, RegistryConfig, SerializedNixExpression, StorePath,
@@ -262,66 +261,39 @@ impl Hive {
ssh_config: Option<PathBuf>,
ssh_only: bool,
) -> ColmenaResult<HashMap<NodeName, TargetNode>> {
+ let mut node_configs = None;
+
log::info!("Enumerating systems...");
let registry = self.get_registry_config().await?;
log::info!("Enumerating nodes...");
+
let all_nodes = self.node_names().await?;
-
- // try to quickly evaluate the filter without any data to see if it's trivial to evaluate
- let filter_trivial = filter.as_ref().and_then(|filter| filter.try_eval_trivial());
-
let selected_nodes = match filter {
- Some(filter) if filter_trivial.is_none() => {
- log::debug!("Retrieving deployment info for all nodes...");
+ Some(filter) => {
+ if filter.has_node_config_rules() {
+ log::debug!("Retrieving deployment info for all nodes...");
- let needs_eval = filter.needs_eval();
+ let all_node_configs = self.deployment_info().await?;
+ let filtered = filter
+ .filter_node_configs(all_node_configs.iter())
+ .into_iter()
+ .collect();
- let all_node_configs = self.deployment_info_partial(needs_eval).await?;
+ node_configs = Some(all_node_configs);
- // Check for collisions between node names and aliases
- // Returns error if:
- // - A node has an alias matching another node's name
- // - A node has an alias matching its own name
- // - A node has an alias already used by another node
- if needs_eval.aliases {
- let mut taken_aliases = HashSet::new();
- for (name, config) in all_node_configs.iter() {
- for alias in config.aliases.as_ref().unwrap().iter() {
- let overlaps_this = alias == name;
- let overlaps_names = all_node_configs.contains_key(alias);
- let overlaps_aliases = !taken_aliases.insert(alias.clone());
- if overlaps_this || overlaps_names || overlaps_aliases {
- return Err(ColmenaError::DuplicateAlias {
- what: alias.clone(),
- is_node_name: overlaps_this || overlaps_names,
- with: name.clone(),
- });
- }
- }
- }
+ filtered
+ } else {
+ filter.filter_node_names(&all_nodes)?.into_iter().collect()
}
-
- let filtered = filter
- .filter_nodes(all_node_configs.iter())
- .into_iter()
- .collect();
-
- filtered
}
- _ => match filter_trivial {
- // Filter is known to always evaluate to no nodes
- Some(false) => vec![],
- _ => all_nodes.clone(),
- },
+ None => all_nodes.clone(),
};
let n_selected = selected_nodes.len();
- log::debug!("Filtered {n_selected} node names for deployment");
- let mut node_configs = if n_selected == all_nodes.len() {
- log::debug!("Retrieving deployment info for all nodes...");
- self.deployment_info().await?
+ let mut node_configs = if let Some(configs) = node_configs {
+ configs.into_iter().filter(|(name, _)| selected_nodes.contains(name)).collect()
} else {
log::debug!("Retrieving deployment info for selected nodes...");
self.deployment_info_selected(&selected_nodes).await?
@@ -425,34 +397,6 @@ impl Hive {
Ok(configs)
}
- pub async fn deployment_info_partial(
- &self,
- needs_eval: NeedsEval,
- ) -> ColmenaResult<HashMap<NodeName, PartialNodeConfig>> {
- if !needs_eval.any() {
- // Need just the un-aliased names
- return Ok(self
- .node_names()
- .await?
- .into_iter()
- .map(|name| (name, PartialNodeConfig::default()))
- .collect());
- }
-
- let expr = format!(
- "(mapAttrs (name: attrs: {{ inherit (attrs) {} {}; }}) hive.deploymentConfig)",
- needs_eval.aliases.then_some("aliases").unwrap_or_default(),
- needs_eval.tags.then_some("tags").unwrap_or_default(),
- );
- let configs: HashMap<NodeName, PartialNodeConfig> = self
- .nix_instantiate(&expr)
- .eval_with_builders()
- .await?
- .capture_json()
- .await?;
- Ok(configs)
- }
-
/// Retrieve deployment info for a single node.
#[cfg_attr(not(target_os = "linux"), allow(dead_code))]
pub async fn deployment_info_single(
diff --git a/src/nix/hive/options.nix b/src/nix/hive/options.nix
index 0d13642..0db53c5 100644
--- a/src/nix/hive/options.nix
+++ b/src/nix/hive/options.nix
@@ -179,15 +179,6 @@ with builtins; rec {
type = types.listOf types.str;
default = [];
};
- aliases = lib.mkOption {
- description = ''
- A list of aliases for the node.
-
- Can be used to select a node with another name.
- '';
- type = types.listOf types.str;
- default = [];
- };
keys = lib.mkOption {
description = ''
A set of secrets to be deployed to the node.
diff --git a/src/nix/mod.rs b/src/nix/mod.rs
index 6728270..4823f74 100644
--- a/src/nix/mod.rs
+++ b/src/nix/mod.rs
@@ -75,8 +75,6 @@ pub struct NodeConfig {
tags: Vec<String>,
- aliases: Vec<NodeName>,
-
#[serde(rename = "replaceUnknownProfiles")]
replace_unknown_profiles: bool,
diff --git a/src/nix/node_filter.rs b/src/nix/node_filter.rs
index 434fb95..886ad50 100644
--- a/src/nix/node_filter.rs
+++ b/src/nix/node_filter.rs
@@ -7,7 +7,6 @@ use std::str::FromStr;
use clap::Args;
use glob::Pattern as GlobPattern;
-use serde::Deserialize;
use super::{ColmenaError, ColmenaResult, NodeConfig, NodeName};
@@ -29,53 +28,6 @@ The list is comma-separated and globs are supported. To match tags, prepend the
pub on: Option<NodeFilter>,
}
-/// Which fields need to be evaluated
-/// in order to execute the node filter.
-#[derive(Clone, Copy, Debug, Default)]
-pub struct NeedsEval {
- /// Need to evaluate deployment.aliases of all nodes.
- pub aliases: bool,
- /// Need to evaluate deployment.tags of all nodes.
- pub tags: bool,
-}
-
-impl NeedsEval {
- pub fn any(&self) -> bool {
- self.aliases || self.tags
- }
-}
-
-impl std::ops::BitOr for NeedsEval {
- type Output = Self;
- fn bitor(self, rhs: Self) -> Self::Output {
- Self {
- aliases: self.aliases || rhs.aliases,
- tags: self.tags || rhs.tags,
- }
- }
-}
-
-impl std::ops::BitOrAssign for NeedsEval {
- fn bitor_assign(&mut self, rhs: Self) {
- *self = *self | rhs;
- }
-}
-
-#[derive(Debug, Default, Deserialize)]
-pub struct PartialNodeConfig {
- pub aliases: Option<Vec<NodeName>>,
- pub tags: Option<Vec<String>>,
-}
-
-impl From<NodeConfig> for PartialNodeConfig {
- fn from(node_config: NodeConfig) -> Self {
- Self {
- aliases: Some(node_config.aliases),
- tags: Some(node_config.tags),
- }
- }
-}
-
/// A filter rule.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum NodeFilter {
@@ -288,47 +240,30 @@ impl NodeFilter {
}
}
- /// Returns which NodeConfig information is needed to evaluate the filter.
+ /// Returns whether the filter has any rule matching NodeConfig information.
///
/// Evaluating `config.deployment` can potentially be very expensive,
/// especially when its values (e.g., tags) depend on other parts of
/// the configuration.
- pub fn needs_eval(&self) -> NeedsEval {
- // XXX: is the hashset overkill?
+ pub fn has_node_config_rules(&self) -> bool {
match self {
- Self::MatchName(_) => NeedsEval {
- aliases: true,
- ..Default::default()
- },
- Self::MatchTag(_) => NeedsEval {
- tags: true,
- ..Default::default()
- },
- Self::Union(v) | Self::Inter(v) => v
- .iter()
- .fold(NeedsEval::default(), |acc, e| acc | e.needs_eval()),
- Self::Not(e) => e.needs_eval(),
- Self::Empty => NeedsEval::default(),
+ Self::MatchName(_) => false,
+ Self::MatchTag(_) => true,
+ Self::Union(v) => v.iter().any(|e| e.has_node_config_rules()),
+ Self::Inter(v) => v.iter().any(|e| e.has_node_config_rules()),
+ Self::Not(e) => e.has_node_config_rules(),
+ Self::Empty => false,
}
}
/// Decides whether a node is accepted by the filter or not.
- /// panic if the filter depends on tags or aliases and they're None
- fn is_accepted(&self, name: &NodeName, config: &PartialNodeConfig) -> bool {
+ /// panic if the filter depends on tags and config is None
+ fn is_accepted(&self, name: &NodeName, config: Option<&NodeConfig>) -> bool {
match self {
- Self::MatchName(pat) => {
- pat.matches(name.as_str())
- || config
- .aliases
- .as_ref()
- .expect("aliases missing")
- .iter()
- .any(|alias| pat.matches(&alias.0))
- }
+ Self::MatchName(pat) => pat.matches(name.as_str()),
Self::MatchTag(pat) => config
- .tags
- .as_ref()
- .expect("tags missing")
+ .unwrap()
+ .tags()
.iter()
.any(|tag| pat.matches(tag.as_str())),
Self::Union(v) => v.iter().any(|e| e.is_accepted(name, config)),
@@ -339,17 +274,17 @@ impl NodeFilter {
}
/// Runs the filter against a set of NodeConfigs and returns the matched ones.
- pub fn filter_nodes<'a, I>(&self, nodes: I) -> HashSet<NodeName>
+ pub fn filter_node_configs<'a, I>(&self, nodes: I) -> HashSet<NodeName>
where
- I: Iterator<Item = (&'a NodeName, &'a PartialNodeConfig)>,
+ I: Iterator<Item = (&'a NodeName, &'a NodeConfig)>,
{
if self == &Self::Empty {
return HashSet::new();
}
nodes
- .filter_map(|(name, config)| {
- if self.is_accepted(name, config) {
+ .filter_map(|(name, node)| {
+ if self.is_accepted(name, Some(node)) {
Some(name)
} else {
None
@@ -359,34 +294,26 @@ impl NodeFilter {
.collect()
}
- /// In case of trivial filters which dont actually use any node info
- /// Try to eval them immediately
- pub fn try_eval_trivial(&self) -> Option<bool> {
- match self {
- Self::MatchName(_) => None,
- Self::MatchTag(_) => None,
- Self::Union(fs) => {
- for f in fs {
- match f.try_eval_trivial() {
- None => return None,
- Some(true) => return Some(true),
- Some(false) => continue,
+ /// Runs the filter against a set of node names and returns the matched ones.
+ pub fn filter_node_names(&self, nodes: &[NodeName]) -> ColmenaResult<HashSet<NodeName>> {
+ if self.has_node_config_rules() {
+ Err(ColmenaError::Unknown {
+ message: format!(
+ "Not enough information to run rule {:?} - We only have node names",
+ self
+ ),
+ })
+ } else {
+ Ok(nodes
+ .iter()
+ .filter_map(|name| {
+ if self.is_accepted(name, None) {
+ Some(name.clone())
+ } else {
+ None
}
- }
- Some(false)
- }
- Self::Inter(fs) => {
- for f in fs {
- match f.try_eval_trivial() {
- None => return None,
- Some(true) => continue,
- Some(false) => return Some(false),
- }
- }
- Some(true)
- }
- Self::Not(f) => f.try_eval_trivial().map(|b| !b),
- Self::Empty => Some(true),
+ })
+ .collect())
}
}
}
@@ -395,36 +322,6 @@ impl NodeFilter {
mod tests {
use super::*;
- impl PartialNodeConfig {
- fn known_empty() -> Self {
- Self {
- aliases: Some(vec![]),
- tags: Some(vec![]),
- }
- }
-
- pub fn known_aliases_tags(
- aliases: Option<Vec<NodeName>>,
- tags: Option<Vec<String>>,
- ) -> Self {
- Self { aliases, tags }
- }
-
- fn known_tags(tags: Vec<String>) -> Self {
- Self {
- aliases: Some(vec![]),
- tags: Some(tags),
- }
- }
-
- fn known_aliases(aliases: Vec<NodeName>) -> Self {
- Self {
- aliases: Some(aliases),
- tags: Some(vec![]),
- }
- }
- }
-
use std::collections::{HashMap, HashSet};
macro_rules! node {
@@ -527,109 +424,126 @@ mod tests {
}
#[test]
- fn test_filter_nodes_names_only() {
- let nodes = vec![
- (node!("lax-alpha"), PartialNodeConfig::known_empty()),
- (node!("lax-beta"), PartialNodeConfig::known_empty()),
- (node!("sfo-gamma"), PartialNodeConfig::known_empty()),
- ];
+ fn test_filter_node_names() {
+ let nodes = vec![node!("lax-alpha"), node!("lax-beta"), node!("sfo-gamma")];
assert_eq!(
&HashSet::from_iter([node!("lax-alpha")]),
&NodeFilter::new("lax-alpha")
.unwrap()
- .filter_nodes(nodes.iter().map(|x| (&x.0, &x.1))),
+ .filter_node_names(&nodes)
+ .unwrap(),
);
assert_eq!(
&HashSet::from_iter([node!("lax-alpha"), node!("lax-beta")]),
&NodeFilter::new("lax-*")
.unwrap()
- .filter_nodes(nodes.iter().map(|x| (&x.0, &x.1))),
+ .filter_node_names(&nodes)
+ .unwrap(),
);
}
#[test]
- fn test_filter_nodes() {
- let nodes: HashMap<NodeName, PartialNodeConfig> = HashMap::from([
- (
- node!("alpha"),
- PartialNodeConfig::known_tags(vec!["web".to_string(), "infra-lax".to_string()]),
- ),
- (
- node!("beta"),
- PartialNodeConfig::known_tags(vec!["router".to_string(), "infra-sfo".to_string()]),
- ),
- (
- node!("gamma-a"),
- PartialNodeConfig::known_tags(vec!["controller".to_string()]),
- ),
- (
- node!("gamma-b"),
- PartialNodeConfig::known_tags(vec!["ewaste".to_string()]),
- ),
- (
- node!("aliases-test"),
- PartialNodeConfig::known_aliases_tags(
- Some(vec![node!("whatever-alias1"), node!("whatever-alias2")]),
- Some(vec!["testing".into()]),
- ),
- ),
- ]);
- assert_eq!(5, nodes.len());
+ fn test_filter_node_configs() {
+ // TODO: Better way to mock
+ let template = NodeConfig {
+ tags: vec![],
+ target_host: None,
+ target_user: None,
+ target_port: None,
+ allow_local_deployment: false,
+ build_on_target: false,
+ replace_unknown_profiles: false,
+ privilege_escalation_command: vec![],
+ extra_ssh_options: vec![],
+ keys: HashMap::new(),
+ system_type: None,
+ };
+
+ let mut nodes = HashMap::new();
+
+ nodes.insert(
+ node!("alpha"),
+ NodeConfig {
+ tags: vec!["web".to_string(), "infra-lax".to_string()],
+ ..template.clone()
+ },
+ );
+
+ nodes.insert(
+ node!("beta"),
+ NodeConfig {
+ tags: vec!["router".to_string(), "infra-sfo".to_string()],
+ ..template.clone()
+ },
+ );
+
+ nodes.insert(
+ node!("gamma-a"),
+ NodeConfig {
+ tags: vec!["controller".to_string()],
+ ..template.clone()
+ },
+ );
+
+ nodes.insert(
+ node!("gamma-b"),
+ NodeConfig {
+ tags: vec!["ewaste".to_string()],
+ ..template
+ },
+ );
+
+ assert_eq!(4, nodes.len());
assert_eq!(
&HashSet::from_iter([node!("alpha")]),
- &NodeFilter::new("@web").unwrap().filter_nodes(nodes.iter()),
+ &NodeFilter::new("@web")
+ .unwrap()
+ .filter_node_configs(nodes.iter()),
);
assert_eq!(
&HashSet::from_iter([node!("alpha"), node!("beta")]),
&NodeFilter::new("@infra-*")
.unwrap()
- .filter_nodes(nodes.iter()),
+ .filter_node_configs(nodes.iter()),
);
assert_eq!(
&HashSet::from_iter([node!("beta"), node!("gamma-a")]),
&NodeFilter::new("@router,@controller")
.unwrap()
- .filter_nodes(nodes.iter()),
+ .filter_node_configs(nodes.iter()),
);
assert_eq!(
&HashSet::from_iter([node!("beta"), node!("gamma-a"), node!("gamma-b")]),
&NodeFilter::new("@router,gamma-*")
.unwrap()
- .filter_nodes(nodes.iter()),
+ .filter_node_configs(nodes.iter()),
);
assert_eq!(
&HashSet::from_iter([]),
&NodeFilter::new("@router&@controller")
.unwrap()
- .filter_nodes(nodes.iter()),
+ .filter_node_configs(nodes.iter()),
);
assert_eq!(
&HashSet::from_iter([node!("beta")]),
&NodeFilter::new("@router&@infra-*")
.unwrap()
- .filter_nodes(nodes.iter()),
+ .filter_node_configs(nodes.iter()),
);
assert_eq!(
&HashSet::from_iter([node!("alpha")]),
&NodeFilter::new("!@router&@infra-*")
.unwrap()
- .filter_nodes(nodes.iter()),
- );
-
- assert_eq!(
- &HashSet::from_iter([node!("aliases-test")]),
- &NodeFilter::new("whatever-alias1")
- .unwrap()
- .filter_nodes(nodes.iter()),
+ .filter_node_configs(nodes.iter()),
);
}
}
--
2.49.0

View file

@ -12,6 +12,10 @@ with {
};
{
colmena = [
(local ./colmena/0001-Revert-add-aliases-refactor-eval-logic.patch)
];
lix = [
(local ./lix/01-disable-installChecks.patch)
(local ./lix/02-fetchGit-locked.patch)
@ -44,6 +48,9 @@ with {
"nixos-unstable" = [
# Build netbird-relay
(local ./nixpkgs/05-netbird-relay.patch)
# netbird-dashboard: 2.9.0 -> 2.12.0
(npr 403844 "sha256-oQUc/UEvWOdZ5IyemnZeFS5dVysblqdV9fm6t790Kms=")
];
"agenix" = [