forked from DGNum/colmena
Support per-node Nixpkgs overrides and local deployment
Also renamed the `network` key to `meta`.
This commit is contained in:
parent
b3d84703f3
commit
45b6568164
12 changed files with 312 additions and 53 deletions
3
.gitattributes
vendored
3
.gitattributes
vendored
|
@ -1,2 +1 @@
|
|||
/Cargo.nix linguist-generated
|
||||
/nix/* liguist-generated
|
||||
nix/* liguist-generated=true
|
||||
|
|
19
Cargo.lock
generated
19
Cargo.lock
generated
|
@ -85,7 +85,9 @@ dependencies = [
|
|||
"console",
|
||||
"futures",
|
||||
"glob",
|
||||
"hostname",
|
||||
"indicatif",
|
||||
"libc",
|
||||
"log",
|
||||
"quit",
|
||||
"serde",
|
||||
|
@ -244,6 +246,17 @@ dependencies = [
|
|||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hostname"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"match_cfg",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indicatif"
|
||||
version = "0.15.0"
|
||||
|
@ -301,6 +314,12 @@ dependencies = [
|
|||
"cfg-if 0.1.10",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "match_cfg"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.3.4"
|
||||
|
|
|
@ -12,7 +12,9 @@ clap = "2.33.3"
|
|||
console = "0.13.0"
|
||||
futures = "0.3.8"
|
||||
glob = "0.3.0"
|
||||
hostname = "0.3.1"
|
||||
indicatif = "0.15.0"
|
||||
libc = "0.2.81"
|
||||
log = "0.4.11"
|
||||
quit = "1.1.2"
|
||||
serde = { version = "1.0.118", features = ["derive"] }
|
||||
|
|
60
README.md
60
README.md
|
@ -17,13 +17,18 @@ Here is a sample `hive.nix` with two nodes, with some common configurations appl
|
|||
|
||||
```nix
|
||||
{
|
||||
network = {
|
||||
meta = {
|
||||
# Override to pin the Nixpkgs version (recommended). This option
|
||||
# accepts one of the following:
|
||||
# - A path to a Nixpkgs checkout
|
||||
# - The Nixpkgs lambda (e.g., import <nixpkgs>)
|
||||
# - An initialized Nixpkgs attribute set
|
||||
nixpkgs = <nixpkgs>;
|
||||
|
||||
# You can also override Nixpkgs by node!
|
||||
nodeNixpkgs = {
|
||||
node-b = ./another-nixos-checkout;
|
||||
};
|
||||
};
|
||||
|
||||
defaults = { pkgs, ... }: {
|
||||
|
@ -83,6 +88,59 @@ Then you can evaluate with:
|
|||
colmena introspect your-lambda.nix
|
||||
```
|
||||
|
||||
## `colmena apply-local`
|
||||
|
||||
For some machines, you may still want to stick with the manual `nixos-rebuild`-type of workflow.
|
||||
Colmena allows you to build and activate configurations on the host running Colmena itself, provided that:
|
||||
|
||||
1. The node must be running NixOS.
|
||||
1. The node must have `deployment.allowLocalDeployment` set to `true`.
|
||||
1. The node's _attribute name_ must match the hostname of the machine.
|
||||
|
||||
If you invoke `apply-local` with `--sudo`, Colmena will attempt to elevate privileges with `sudo` if it's not run as root.
|
||||
You may also find it helpful to set `deployment.targetHost` to `null` if you don't intend to deploy to the host via SSH.
|
||||
|
||||
As an example, the following `hive.nix` includes a node (`laptop`) that is meant to be only deployed with `apply-local`:
|
||||
|
||||
```nix
|
||||
{
|
||||
meta = {
|
||||
nixpkgs = ./deps/nixpkgs-stable;
|
||||
|
||||
# I'd like to use the unstable version of Nixpkgs on
|
||||
# my desktop machines.
|
||||
nodeNixpkgs = {
|
||||
laptop = ./deps/nixpkgs-unstable;
|
||||
};
|
||||
};
|
||||
|
||||
# This attribute name must match the output of `hostname` on your machine
|
||||
laptop = { name, nodes, ... }: {
|
||||
networking.hostName = "laptop";
|
||||
|
||||
deployment = {
|
||||
# Allow local deployment with `colmena apply-local`
|
||||
allowLocalDeployment = true;
|
||||
|
||||
# Disable SSH deployment. This node will be skipped in a
|
||||
# normal`colmena apply`.
|
||||
targetHost = null;
|
||||
};
|
||||
|
||||
# Rest of configuration...
|
||||
};
|
||||
|
||||
server-a = { pkgs, ... }: {
|
||||
# This node will use the default Nixpkgs checkout specified
|
||||
# in `meta.nixpkgs`.
|
||||
|
||||
# Rest of configuration...
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
On `laptop`, run `colmena apply-local --sudo` to activate the configuration.
|
||||
|
||||
## Current limitations
|
||||
|
||||
- It's required to use SSH keys to log into the remote hosts, and interactive authentication will not work.
|
||||
|
|
|
@ -10,5 +10,5 @@ in rustPlatform.buildRustPackage {
|
|||
version = "0.1.0";
|
||||
|
||||
src = ./.;
|
||||
cargoSha256 = "06qw50wd8w9b6j7hayx75c9hvff9kxa0cllaqg8x854b1ww9pk8j";
|
||||
cargoSha256 = "1ayfw41kaa5wcqym4sz1l44gldi0qz1pfhfsqd53hgaim4nqiwrn";
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ use crate::util;
|
|||
|
||||
pub fn subcommand() -> App<'static, 'static> {
|
||||
let command = SubCommand::with_name("apply")
|
||||
.about("Apply the configuration")
|
||||
.about("Apply configurations on remote machines")
|
||||
.arg(Arg::with_name("goal")
|
||||
.help("Deployment goal")
|
||||
.long_help("Same as the targets for switch-to-configuration.\n\"push\" means only copying the closures to remote nodes.")
|
||||
|
@ -61,7 +61,7 @@ pub async fn run(_global_args: &ArgMatches<'_>, local_args: &ArgMatches<'_>) {
|
|||
}
|
||||
|
||||
// Some ugly argument mangling :/
|
||||
let profiles = hive.build_selected(selected_nodes).await.unwrap();
|
||||
let mut profiles = hive.build_selected(selected_nodes).await.unwrap();
|
||||
let goal = DeploymentGoal::from_str(local_args.value_of("goal").unwrap()).unwrap();
|
||||
let verbose = local_args.is_present("verbose");
|
||||
|
||||
|
@ -72,17 +72,26 @@ pub async fn run(_global_args: &ArgMatches<'_>, local_args: &ArgMatches<'_>) {
|
|||
};
|
||||
|
||||
let mut task_list: Vec<DeploymentTask> = Vec::new();
|
||||
for (name, profile) in profiles.iter() {
|
||||
let task = DeploymentTask::new(
|
||||
name.clone(),
|
||||
all_nodes.get(name).unwrap().to_host(),
|
||||
profile.clone(),
|
||||
goal,
|
||||
);
|
||||
task_list.push(task);
|
||||
let mut skip_list: Vec<String> = Vec::new();
|
||||
for (name, profile) in profiles.drain() {
|
||||
let target = all_nodes.get(&name).unwrap().to_ssh_host();
|
||||
|
||||
match target {
|
||||
Some(target) => {
|
||||
let task = DeploymentTask::new(name, target, profile, goal);
|
||||
task_list.push(task);
|
||||
}
|
||||
None => {
|
||||
skip_list.push(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!("Applying configurations...");
|
||||
if skip_list.len() != 0 {
|
||||
println!("Applying configurations ({} skipped)...", skip_list.len());
|
||||
} else {
|
||||
println!("Applying configurations...");
|
||||
}
|
||||
|
||||
deploy(task_list, max_parallelism, !verbose).await;
|
||||
}
|
||||
|
|
115
src/command/apply_local.rs
Normal file
115
src/command/apply_local.rs
Normal file
|
@ -0,0 +1,115 @@
|
|||
use std::env;
|
||||
|
||||
use clap::{Arg, App, SubCommand, ArgMatches};
|
||||
use tokio::fs;
|
||||
use tokio::process::Command;
|
||||
|
||||
use crate::nix::{Hive, DeploymentTask, DeploymentGoal, Host};
|
||||
use crate::nix::host;
|
||||
|
||||
pub fn subcommand() -> App<'static, 'static> {
|
||||
SubCommand::with_name("apply-local")
|
||||
.about("Apply configurations on the local machine")
|
||||
.arg(Arg::with_name("goal")
|
||||
.help("Deployment goal")
|
||||
.long_help("Same as the targets for switch-to-configuration.\n\"push\" is noop in apply-local.")
|
||||
.default_value("switch")
|
||||
.index(1)
|
||||
.possible_values(&["push", "switch", "boot", "test", "dry-activate"]))
|
||||
.arg(Arg::with_name("config")
|
||||
.short("f")
|
||||
.long("config")
|
||||
.help("Path to a Hive expression")
|
||||
.default_value("hive.nix")
|
||||
.required(true))
|
||||
.arg(Arg::with_name("sudo")
|
||||
.long("sudo")
|
||||
.help("Attempt to escalate privileges if not run as root")
|
||||
.takes_value(false))
|
||||
.arg(Arg::with_name("we-are-launched-by-sudo")
|
||||
.long("we-are-launched-by-sudo")
|
||||
.hidden(true)
|
||||
.takes_value(false))
|
||||
}
|
||||
|
||||
pub async fn run(_global_args: &ArgMatches<'_>, local_args: &ArgMatches<'_>) {
|
||||
// Sanity check: Are we running NixOS?
|
||||
if let Ok(os_release) = fs::read_to_string("/etc/os-release").await {
|
||||
if !os_release.contains("ID=nixos\n") {
|
||||
eprintln!("\"apply-local\" only works on NixOS machines.");
|
||||
quit::with_code(5);
|
||||
}
|
||||
} else {
|
||||
eprintln!("Coult not detect the OS version from /etc/os-release.");
|
||||
quit::with_code(5);
|
||||
}
|
||||
|
||||
// Escalate privileges?
|
||||
{
|
||||
let euid: u32 = unsafe { libc::geteuid() };
|
||||
if euid != 0 {
|
||||
if local_args.is_present("we-are-launched-by-sudo") {
|
||||
eprintln!("Failed to escalate privileges. We are still not root despite a successful sudo invocation.");
|
||||
quit::with_code(3);
|
||||
}
|
||||
|
||||
if local_args.is_present("sudo") {
|
||||
escalate().await;
|
||||
} else {
|
||||
eprintln!("Colmena was not started by root. This is probably not going to work.");
|
||||
eprintln!("Hint: Add the --sudo flag.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut hive = Hive::from_config_arg(local_args).unwrap();
|
||||
let hostname = hostname::get().expect("Could not get hostname")
|
||||
.to_string_lossy().into_owned();
|
||||
let goal = DeploymentGoal::from_str(local_args.value_of("goal").unwrap()).unwrap();
|
||||
|
||||
println!("Enumerating nodes...");
|
||||
let all_nodes = hive.deployment_info().await.unwrap();
|
||||
|
||||
let target: Box<dyn Host> = {
|
||||
if let Some(info) = all_nodes.get(&hostname) {
|
||||
if !info.allows_local_deployment() {
|
||||
eprintln!("Local deployment is not enabled for host {}.", hostname);
|
||||
eprintln!("Hint: Set deployment.allowLocalDeployment to true.");
|
||||
quit::with_code(2);
|
||||
}
|
||||
host::local()
|
||||
} else {
|
||||
eprintln!("Host {} is not present in the Hive configuration.", hostname);
|
||||
quit::with_code(2);
|
||||
}
|
||||
};
|
||||
|
||||
println!("Building local node configuration...");
|
||||
let profile = {
|
||||
let selected_nodes: Vec<String> = vec![hostname.clone()];
|
||||
let mut profiles = hive.build_selected(selected_nodes).await
|
||||
.expect("Failed to build local configurations");
|
||||
profiles.remove(&hostname).unwrap()
|
||||
};
|
||||
|
||||
let mut task = DeploymentTask::new(hostname, target, profile, goal);
|
||||
task.execute().await.unwrap();
|
||||
}
|
||||
|
||||
async fn escalate() -> ! {
|
||||
// Restart ourselves with sudo
|
||||
let argv: Vec<String> = env::args().collect();
|
||||
|
||||
let exit = Command::new("sudo")
|
||||
.arg("--")
|
||||
.args(argv)
|
||||
.arg("--no-sudo")
|
||||
.spawn()
|
||||
.expect("Failed to run sudo to escalate privileges")
|
||||
.wait()
|
||||
.await
|
||||
.expect("Failed to wait on child");
|
||||
|
||||
// Exit with the same exit code
|
||||
quit::with_code(exit.code().unwrap());
|
||||
}
|
|
@ -1,3 +1,4 @@
|
|||
pub mod build;
|
||||
pub mod apply;
|
||||
pub mod introspect;
|
||||
pub mod apply_local;
|
||||
|
|
22
src/main.rs
22
src/main.rs
|
@ -7,18 +7,24 @@ mod deployment;
|
|||
mod util;
|
||||
|
||||
macro_rules! command {
|
||||
($name:ident, $matches:ident) => {
|
||||
if let Some(sub_matches) = $matches.subcommand_matches(stringify!($name)) {
|
||||
command::$name::run(&$matches, &sub_matches).await;
|
||||
($module:ident, $matches:ident) => {
|
||||
if let Some(sub_matches) = $matches.subcommand_matches(stringify!($module)) {
|
||||
command::$module::run(&$matches, &sub_matches).await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
($name:expr, $module:ident, $matches:ident) => {
|
||||
if let Some(sub_matches) = $matches.subcommand_matches($name) {
|
||||
command::$module::run(&$matches, &sub_matches).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! bind_command {
|
||||
($name:ident, $app:ident) => {
|
||||
$app = $app.subcommand(command::$name::subcommand());
|
||||
}
|
||||
($module:ident, $app:ident) => {
|
||||
$app = $app.subcommand(command::$module::subcommand());
|
||||
};
|
||||
}
|
||||
|
||||
#[tokio::main(flavor = "multi_thread")]
|
||||
|
@ -31,12 +37,14 @@ async fn main() {
|
|||
.setting(AppSettings::ArgRequiredElseHelp);
|
||||
|
||||
bind_command!(apply, app);
|
||||
bind_command!(apply_local, app);
|
||||
bind_command!(build, app);
|
||||
bind_command!(introspect, app);
|
||||
|
||||
let matches = app.get_matches();
|
||||
|
||||
command!(apply, matches);
|
||||
command!("apply-local", apply_local, matches);
|
||||
command!(build, matches);
|
||||
command!(introspect, matches);
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ with builtins;
|
|||
let
|
||||
defaultHive = {
|
||||
# Will be set in defaultHiveMeta
|
||||
network = {};
|
||||
meta = {};
|
||||
|
||||
# Like in NixOps, there is a special host named `defaults`
|
||||
# containing configurations that will be applied to all
|
||||
|
@ -11,12 +11,16 @@ let
|
|||
defaults = {};
|
||||
};
|
||||
|
||||
defaultHiveMeta = {
|
||||
defaultMeta = {
|
||||
name = "hive";
|
||||
description = "A Colmena Hive";
|
||||
|
||||
# Can be a path, a lambda, or an initialized Nixpkgs attrset
|
||||
nixpkgs = <nixpkgs>;
|
||||
|
||||
# Per-node Nixpkgs overrides
|
||||
# Keys are hostnames.
|
||||
nodeNixpkgs = {};
|
||||
};
|
||||
|
||||
# Colmena-specific options
|
||||
|
@ -32,9 +36,10 @@ let
|
|||
description = ''
|
||||
The target SSH node for deployment.
|
||||
|
||||
If not specified, the node's attribute name will be used.
|
||||
By default, the node's attribute name will be used.
|
||||
If set to null, only local deployment will be supported.
|
||||
'';
|
||||
type = types.str;
|
||||
type = types.nullOr types.str;
|
||||
default = name;
|
||||
};
|
||||
targetUser = lib.mkOption {
|
||||
|
@ -44,6 +49,23 @@ let
|
|||
type = types.str;
|
||||
default = "root";
|
||||
};
|
||||
allowLocalDeployment = lib.mkOption {
|
||||
description = ''
|
||||
Allow the configuration to be applied locally on the host running
|
||||
Colmena.
|
||||
|
||||
For local deployment to work, all of the following must be true:
|
||||
- The node must be running NixOS.
|
||||
- The node must have deployment.allowLocalDeployment set to true.
|
||||
- The node's networking.hostName must match the hostname.
|
||||
|
||||
To apply the configurations locally, run `colmena apply-local`.
|
||||
You can also set deployment.targetHost to null if the nost is not
|
||||
accessible over SSH (only local deployment will be possible).
|
||||
'';
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
tags = lib.mkOption {
|
||||
description = ''
|
||||
A list of tags for the node.
|
||||
|
@ -57,42 +79,53 @@ let
|
|||
};
|
||||
};
|
||||
|
||||
hiveMeta = {
|
||||
network = defaultHiveMeta // (if rawHive ? network then rawHive.network else {});
|
||||
};
|
||||
hive = defaultHive // rawHive // hiveMeta;
|
||||
userMeta = if rawHive ? meta then rawHive.meta
|
||||
else if rawHive ? network then rawHive.network
|
||||
else {};
|
||||
|
||||
pkgs = let
|
||||
pkgConf = hive.network.nixpkgs;
|
||||
in if typeOf pkgConf == "path" then
|
||||
import pkgConf {}
|
||||
else if typeOf pkgConf == "lambda" then
|
||||
pkgConf {}
|
||||
else if typeOf pkgConf == "set" then
|
||||
pkgConf
|
||||
else throw ''
|
||||
network.nixpkgs must be one of:
|
||||
# The final hive will always have the meta key instead of network.
|
||||
hive = let
|
||||
mergedHive = removeAttrs (defaultHive // rawHive) [ "meta" "network" ];
|
||||
meta = {
|
||||
meta = lib.recursiveUpdate defaultMeta userMeta;
|
||||
};
|
||||
in mergedHive // meta;
|
||||
|
||||
- A path to Nixpkgs (e.g., <nixpkgs>)
|
||||
- A Nixpkgs lambda (e.g., import <nixpkgs>)
|
||||
- A Nixpkgs attribute set
|
||||
'';
|
||||
mkNixpkgs = configName: pkgConf:
|
||||
if typeOf pkgConf == "path" then
|
||||
import pkgConf {}
|
||||
else if typeOf pkgConf == "lambda" then
|
||||
pkgConf {}
|
||||
else if typeOf pkgConf == "set" then
|
||||
pkgConf
|
||||
else throw ''
|
||||
${configName} must be one of:
|
||||
|
||||
- A path to Nixpkgs (e.g., <nixpkgs>)
|
||||
- A Nixpkgs lambda (e.g., import <nixpkgs>)
|
||||
- A Nixpkgs attribute set
|
||||
'';
|
||||
|
||||
pkgs = mkNixpkgs "meta.nixpkgs" (defaultMeta // userMeta).nixpkgs;
|
||||
lib = pkgs.lib;
|
||||
reservedNames = [ "defaults" "network" "meta" ];
|
||||
|
||||
evalNode = name: config: let
|
||||
evalConfig = import (pkgs.path + "/nixos/lib/eval-config.nix");
|
||||
npkgs =
|
||||
if hasAttr name hive.meta.nodeNixpkgs
|
||||
then mkNixpkgs "meta.nodeNixpkgs.${name}" hive.meta.nodeNixpkgs.${name}
|
||||
else pkgs;
|
||||
evalConfig = import (npkgs.path + "/nixos/lib/eval-config.nix");
|
||||
in evalConfig {
|
||||
system = currentSystem;
|
||||
modules = [
|
||||
deploymentOptions
|
||||
hive.defaults
|
||||
config
|
||||
] ++ (import (pkgs.path + "/nixos/modules/module-list.nix"));
|
||||
] ++ (import (npkgs.path + "/nixos/modules/module-list.nix"));
|
||||
specialArgs = {
|
||||
inherit name nodes;
|
||||
modulesPath = pkgs.path + "/nixos/modules";
|
||||
modulesPath = npkgs.path + "/nixos/modules";
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -118,7 +151,7 @@ let
|
|||
# Change in the order of the names should not cause a derivation to be created
|
||||
selected = lib.attrsets.filterAttrs (name: _: elem name names) toplevel;
|
||||
in derivation rec {
|
||||
name = "colmena-${hive.network.name}";
|
||||
name = "colmena-${hive.meta.name}";
|
||||
system = currentSystem;
|
||||
json = toJSON (lib.attrsets.mapAttrs (k: v: toString v) selected);
|
||||
builder = pkgs.writeScript "${name}.sh" ''
|
||||
|
|
|
@ -84,6 +84,13 @@ impl Host for Local {
|
|||
paths.lines().map(|p| p.to_string().into()).collect()
|
||||
})
|
||||
}
|
||||
async fn activate(&mut self, profile: &StorePath, goal: DeploymentGoal) -> NixResult<()> {
|
||||
let activation_command = format!("{}/bin/switch-to-configuration", profile.as_path().to_str().unwrap());
|
||||
Command::new(activation_command)
|
||||
.arg(goal.as_str().unwrap())
|
||||
.passthrough()
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
/// A remote machine connected over SSH.
|
||||
|
|
|
@ -15,7 +15,7 @@ use tempfile::{NamedTempFile, TempPath};
|
|||
use tokio::process::Command;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
mod host;
|
||||
pub mod host;
|
||||
pub use host::{Host, CopyDirection};
|
||||
use host::SSH;
|
||||
|
||||
|
@ -131,18 +131,26 @@ impl Hive {
|
|||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct DeploymentConfig {
|
||||
#[serde(rename = "targetHost")]
|
||||
target_host: String,
|
||||
target_host: Option<String>,
|
||||
|
||||
#[serde(rename = "targetUser")]
|
||||
target_user: String,
|
||||
|
||||
#[serde(rename = "allowLocalDeployment")]
|
||||
allow_local_deployment: bool,
|
||||
tags: Vec<String>,
|
||||
}
|
||||
|
||||
impl DeploymentConfig {
|
||||
pub fn tags(&self) -> &[String] { &self.tags }
|
||||
pub fn to_host(&self) -> Box<dyn Host> {
|
||||
let host = SSH::new(self.target_user.clone(), self.target_host.clone());
|
||||
Box::new(host)
|
||||
pub fn allows_local_deployment(&self) -> bool { self.allow_local_deployment }
|
||||
|
||||
pub fn to_ssh_host(&self) -> Option<Box<dyn Host>> {
|
||||
self.target_host.as_ref().map(|target_host| {
|
||||
let host = SSH::new(self.target_user.clone(), target_host.clone());
|
||||
let host: Box<dyn Host> = Box::new(host);
|
||||
host
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue