Add basic Flakes support

Co-authored-by: Alex Zero <joseph@marsden.space>
This commit is contained in:
Zhaofeng Li 2021-06-29 01:02:43 -07:00
parent 9d8153ee6d
commit e50ba82bf2
12 changed files with 294 additions and 63 deletions

View file

@ -7,7 +7,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.4
- uses: cachix/install-nix-action@v12
- uses: cachix/install-nix-action@v13
with:
install_url: https://github.com/numtide/nix-unstable-installer/releases/download/nix-2.4pre20210604_8e6ee1b/install
install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve'
extra_nix_config: |
experimental-features = nix-command flakes
- run: nix-build
tests:
runs-on: ubuntu-latest

View file

@ -5,6 +5,8 @@
Colmena is a simple, stateless [NixOS](https://nixos.org) deployment tool modeled after [NixOps](https://github.com/NixOS/nixops) and [Morph](https://github.com/DBCDK/morph), written in Rust.
It's a thin wrapper over Nix commands like `nix-instantiate` and `nix-copy-closure`, and supports parallel deployment.
Now with 100% more flakes! See *Tutorial with Flakes* below.
<pre>
$ <b>colmena apply --on @tag-a</b>
[INFO ] Enumerating nodes...
@ -32,6 +34,8 @@ nix-env -if default.nix
## Tutorial
*See Tutorial with Flakes for usage with Nix Flakes.*
Colmena should work with your existing NixOps and Morph configurations with minimal modification.
Here is a sample `hive.nix` with two nodes, with some common configurations applied to both nodes:
@ -118,6 +122,45 @@ Here is a sample `hive.nix` with two nodes, with some common configurations appl
The full set of options can be found at `src/nix/eval.nix`.
Run `colmena build` in the same directory to build the configuration, or do `colmena apply` to deploy it to all nodes.
## Tutorial with Flakes
To use with Nix Flakes, create `outputs.colmena` in your `flake.nix`.
Here is a short example:
```nix
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
};
outputs = { nixpkgs, ... }: {
colmena = {
meta = {
inherit nixpkgs;
};
# Also see the non-Flakes hive.nix example above.
host-a = { name, nodes, pkgs, ... }: {
boot.isContainer = true;
time.timeZone = nodes.host-b.config.time.timeZone;
};
host-b = {
deployment = {
targetHost = "somehost.tld";
targetPort = 1234;
targetUser = "luser";
};
boot.isContainer = true;
time.timeZone = "America/Los_Angeles";
};
};
};
}
```
The full set of options can be found at `src/nix/eval.nix`.
Run `colmena build` in the same directory to build the configuration, or do `colmena apply` to deploy it to all nodes.
## `colmena introspect`
Sometimes you may want to extract values from your Hive configuration for consumption in another program (e.g., [OctoDNS](https://github.com/octodns/octodns)).

View file

@ -41,11 +41,11 @@ pub fn build_cli(include_internal: bool) -> App<'static, 'static> {
// The default value is a lie (sort of)!
//
// The default behavior is to search upwards from the
// current working directory for a file named "hive.nix".
// This behavior is disabled if --config/-f is explicitly
// supplied by the user (occurrences_of > 0).
// current working directory for a file named "flake.nix"
// or "hive.nix". This behavior is disabled if --config/-f
// is explicitly supplied by the user (occurrences_of > 0).
.default_value("hive.nix")
.long_help(r#"If this argument is not specified, Colmena will search upwards from the current working directory for a file named "hive.nix". This behavior is disabled if --config/-f is given explicitly.
.long_help(r#"If this argument is not specified, Colmena will search upwards from the current working directory for a file named "flake.nix" or "hive.nix". This behavior is disabled if --config/-f is given explicitly.
For a sample configuration, see <https://github.com/zhaofengli/colmena>.
"#)

View file

@ -116,7 +116,6 @@ pub fn subcommand() -> App<'static, 'static> {
pub async fn run(_global_args: &ArgMatches<'_>, local_args: &ArgMatches<'_>) {
let hive = util::hive_from_args(local_args).unwrap();
let hive_base = hive.as_path().parent().unwrap().to_owned();
log::info!("Enumerating nodes...");
let all_nodes = hive.deployment_info().await.unwrap();
@ -197,7 +196,7 @@ pub async fn run(_global_args: &ArgMatches<'_>, local_args: &ArgMatches<'_>) {
options.set_force_replace_unknown_profiles(local_args.is_present("force-replace-unknown-profiles"));
if local_args.is_present("keep-result") {
options.set_gc_roots(hive_base.join(".gcroots"));
options.set_create_gc_roots(true);
}
deployment.set_options(options);

View file

@ -1,7 +1,5 @@
use std::cmp::max;
use std::collections::HashMap;
use std::convert::AsRef;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use futures::future::join_all;
@ -388,13 +386,17 @@ impl Deployment {
}
}
if let Some(base) = &arc_self.options.gc_roots {
if arc_self.options.create_gc_roots {
// Create GC roots
if let Err(e) = profiles.create_gc_roots(base).await {
if let Some(dir) = arc_self.hive.context_dir() {
let base = dir.join(".gcroots");
if let Err(e) = profiles.create_gc_roots(&base).await {
let bar = progress.create_task_progress(BATCH_OPERATION_LABEL.to_string());
bar.failure(&format!("Failed to create GC roots: {:?}", e));
}
}
}
drop(permit);
profiles
@ -635,8 +637,11 @@ pub struct DeploymentOptions {
/// Whether to upload keys when deploying.
upload_keys: bool,
/// Directory to create GC roots for node profiles in.
gc_roots: Option<PathBuf>,
/// Whether to create GC roots for node profiles.
///
/// If true, .gc_roots will be created under the hive's context
/// directory if it exists.
create_gc_roots: bool,
/// Ignore the node-level `deployment.replaceUnknownProfiles` option.
force_replace_unknown_profiles: bool,
@ -649,7 +654,7 @@ impl Default for DeploymentOptions {
substituters_push: true,
gzip: true,
upload_keys: true,
gc_roots: None,
create_gc_roots: false,
force_replace_unknown_profiles: false,
}
}
@ -672,8 +677,8 @@ impl DeploymentOptions {
self.upload_keys = enable;
}
pub fn set_gc_roots<P: AsRef<Path>>(&mut self, path: P) {
self.gc_roots = Some(path.as_ref().to_owned());
pub fn set_create_gc_roots(&mut self, enable: bool) {
self.create_gc_roots = enable;
}
pub fn set_force_replace_unknown_profiles(&mut self, enable: bool) {

View file

@ -1,4 +1,7 @@
{ rawHive }:
{ rawHive ? null # Colmena Hive attrset
, flakeUri ? null # Nix Flake URI with `outputs.colmena`
, hermetic ? flakeUri != null # Whether we are allowed to use <nixpkgs>
}:
with builtins;
let
defaultHive = {
@ -37,6 +40,8 @@ let
- A path to a Nixpkgs checkout
- The Nixpkgs lambda (e.g., import <nixpkgs>)
- An initialized Nixpkgs attribute set
This option must be specified when using Flakes.
'';
type = types.unspecified;
default = <nixpkgs>;
@ -229,20 +234,30 @@ let
};
};
flakeToHive = flakeUri: let
flake = builtins.getFlake flakeUri;
hive = if flake.outputs ? colmena then flake.outputs.colmena else throw "Flake must define outputs.colmena.";
in hive;
uncheckedHive =
if rawHive != null then rawHive
else if flakeUri != null then flakeToHive flakeUri
else throw "Either an attribute set or a flake URI must be specified.";
uncheckedUserMeta =
if rawHive ? meta && rawHive ? network then
if uncheckedHive ? meta && uncheckedHive ? network then
throw "Only one of `network` and `meta` may be specified. `meta` should be used as `network` is for NixOps compatibility."
else if rawHive ? meta then rawHive.meta
else if rawHive ? network then rawHive.network
else if uncheckedHive ? meta then uncheckedHive.meta
else if uncheckedHive ? network then uncheckedHive.network
else {};
# The final hive will always have the meta key instead of network.
hive = let
userMeta = (lib.modules.evalModules {
modules = [ metaOptions uncheckedUserMeta ];
}).config;
# The final hive will always have the meta key instead of network.
hive = let
mergedHive = removeAttrs (defaultHive // rawHive) [ "meta" "network" ];
mergedHive = removeAttrs (defaultHive // uncheckedHive) [ "meta" "network" ];
meta = {
meta = userMeta;
};
@ -255,7 +270,9 @@ let
else if typeOf pkgConf == "lambda" then
pkgConf {}
else if typeOf pkgConf == "set" then
pkgConf
# FIXME: Allow configuring `system`
if pkgConf ? outputs then mkNixpkgs configName pkgConf.outputs.legacyPackages.${currentSystem}.path
else pkgConf
else throw ''
${configName} must be one of:
@ -266,7 +283,10 @@ let
pkgs = let
# Can't rely on the module system yet
nixpkgsConf = if uncheckedUserMeta ? nixpkgs then uncheckedUserMeta.nixpkgs else <nixpkgs>;
nixpkgsConf =
if uncheckedUserMeta ? nixpkgs then uncheckedUserMeta.nixpkgs
else if hermetic then throw "meta.nixpkgs must be specified in hermetic mode."
else <nixpkgs>;
in mkNixpkgs "meta.nixpkgs" nixpkgsConf;
lib = pkgs.lib;

View file

@ -1,6 +1,7 @@
use std::collections::HashMap;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::convert::AsRef;
use tempfile::{NamedTempFile, TempPath};
use tokio::process::Command;
@ -19,26 +20,84 @@ use crate::progress::TaskProgress;
const HIVE_EVAL: &'static [u8] = include_bytes!("eval.nix");
#[derive(Debug)]
pub enum HivePath {
/// A Nix Flake URI.
///
/// The flake must contain the `colmena` output.
Flake(String),
/// A regular .nix file
Legacy(PathBuf),
}
impl HivePath {
pub fn from_path<P: AsRef<Path>>(path: P) -> Self {
let path = path.as_ref();
if let Some(osstr) = path.file_name() {
if osstr == "flake.nix" {
let parent = path.parent().unwrap().to_str().unwrap();
let uri = format!("path:{}", parent);
return Self::Flake(uri);
}
}
Self::Legacy(path.to_owned())
}
fn context_dir(&self) -> Option<PathBuf> {
match self {
Self::Legacy(p) => {
if let Some(parent) = p.parent() {
return Some(parent.to_owned());
}
None
}
_ => None,
}
}
}
#[derive(Debug)]
pub struct Hive {
hive: PathBuf,
/// Path to the hive.
path: HivePath,
/// Path to the context directory.
///
/// Normally this is directory containing the "hive.nix"
/// or "flake.nix".
context_dir: Option<PathBuf>,
/// Path to temporary file containing eval.nix.
eval_nix: TempPath,
/// Whether to pass --show-trace in Nix commands.
show_trace: bool,
}
impl Hive {
pub fn new<P: AsRef<Path>>(hive: P) -> NixResult<Self> {
pub fn new(path: HivePath) -> NixResult<Self> {
let mut eval_nix = NamedTempFile::new()?;
eval_nix.write_all(HIVE_EVAL).unwrap();
let context_dir = path.context_dir();
Ok(Self {
hive: hive.as_ref().to_owned(),
path,
context_dir,
eval_nix: eval_nix.into_temp_path(),
show_trace: false,
})
}
pub fn show_trace(&mut self, value: bool) {
pub fn context_dir(&self) -> Option<&Path> {
self.context_dir.as_ref().map(|p| p.as_ref())
}
pub fn set_show_trace(&mut self, value: bool) {
self.show_trace = value;
}
@ -59,10 +118,6 @@ impl Hive {
Ok(options)
}
pub fn as_path(&self) -> &Path {
&self.hive
}
/// Retrieve deployment info for all nodes.
pub async fn deployment_info(&self) -> NixResult<HashMap<String, NodeConfig>> {
// FIXME: Really ugly :(
@ -145,6 +200,10 @@ impl Hive {
fn nix_instantiate(&self, expression: &str) -> NixInstantiate {
NixInstantiate::new(&self, expression.to_owned())
}
fn path(&self) -> &HivePath {
&self.path
}
}
struct NixInstantiate<'hive> {
@ -166,15 +225,32 @@ impl<'hive> NixInstantiate<'hive> {
// but Nix may not like it...
let mut command = Command::new("nix-instantiate");
match self.hive.path() {
HivePath::Legacy(path) => {
command
.arg("--no-gc-warning")
.arg("-E")
.arg(format!(
"with builtins; let eval = import {}; hive = eval {{ rawHive = import {}; }}; in {}",
self.hive.eval_nix.to_str().unwrap(),
self.hive.as_path().to_str().unwrap(),
path.to_str().unwrap(),
self.expression,
));
}
HivePath::Flake(uri) => {
command
.args(&["--experimental-features", "flakes"])
.arg("--no-gc-warning")
.arg("-E")
.arg(format!(
"with builtins; let eval = import {}; hive = eval {{ flakeUri = \"{}\"; }}; in {}",
self.hive.eval_nix.to_str().unwrap(),
&uri,
self.expression,
));
}
}
if self.hive.show_trace {
command.arg("--show-trace");

View file

@ -18,7 +18,7 @@ pub use host::{Host, CopyDirection, CopyOptions};
use host::Ssh;
pub mod hive;
pub use hive::Hive;
pub use hive::{Hive, HivePath};
pub mod store;
pub use store::{StorePath, StoreDerivation};

View file

@ -33,7 +33,8 @@ impl TempHive {
let mut temp_file = NamedTempFile::new().unwrap();
temp_file.write_all(text.as_bytes()).unwrap();
let hive = Hive::new(temp_file.path()).unwrap();
let hive_path = HivePath::from_path(temp_file.path());
let hive = Hive::new(hive_path).unwrap();
Self {
hive,
@ -46,7 +47,7 @@ impl TempHive {
/// Note that this _does not_ attempt to evaluate `config.toplevel`.
pub fn valid(text: &str) {
let mut hive = Self::new(text);
hive.hive.show_trace(true);
hive.hive.set_show_trace(true);
assert!(block_on(hive.deployment_info()).is_ok());
}
@ -145,6 +146,20 @@ fn test_parse_simple() {
assert_eq!("luser", &nodes["host-b"].target_user);
}
#[test]
fn test_parse_flake() {
let hive_path = HivePath::Flake("path:./src/nix/tests/simple-flake".to_string());
let mut hive = Hive::new(hive_path).unwrap();
hive.set_show_trace(true);
let nodes = block_on(hive.deployment_info()).unwrap();
assert!(set_eq(
&["host-a", "host-b"],
&nodes.keys().map(String::as_str).collect::<Vec<&str>>(),
));
}
#[test]
fn test_parse_node_references() {
TempHive::valid(r#"

View file

@ -0,0 +1,27 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1624626397,
"narHash": "sha256-+h0ulo5//RqStx6g6MDqD9MzgmBfeZ1VYxwEaSmw/Zs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "e1f8852faac7638e88d5e8a5b9ee2a7568685e3f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View file

@ -0,0 +1,26 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
};
outputs = { nixpkgs, ... }: {
colmena = {
meta = {
inherit nixpkgs;
};
host-a = { name, nodes, pkgs, ... }: {
boot.isContainer = true;
time.timeZone = nodes.host-b.config.time.timeZone;
};
host-b = {
deployment = {
targetHost = "somehost.tld";
targetPort = 1234;
targetUser = "luser";
};
boot.isContainer = true;
time.timeZone = "America/Los_Angeles";
};
};
};
}

View file

@ -9,7 +9,7 @@ use glob::Pattern as GlobPattern;
use tokio::io::{AsyncRead, AsyncBufReadExt, BufReader};
use tokio::process::Command;
use super::nix::{NodeConfig, Hive, NixResult};
use super::nix::{NodeConfig, Hive, HivePath, NixResult};
use super::progress::TaskProgress;
enum NodeFilter {
@ -79,13 +79,12 @@ impl CommandExecution {
}
}
pub fn hive_from_args(args: &ArgMatches<'_>) -> NixResult<Hive> {
let path = match args.occurrences_of("config") {
0 => {
// traverse upwards until we find hive.nix
let mut cur = std::env::current_dir()?;
let mut hive_path = None;
let mut file_path = None;
loop {
let mut listing = match fs::read_dir(&cur) {
@ -106,7 +105,7 @@ pub fn hive_from_args(args: &ArgMatches<'_>) -> NixResult<Hive> {
match rdirent {
Err(e) => Some(Err(e)),
Ok(f) => {
if f.file_name() == "hive.nix" {
if f.file_name() == "flake.nix" || f.file_name() == "hive.nix" {
Some(Ok(f))
} else {
None
@ -117,7 +116,7 @@ pub fn hive_from_args(args: &ArgMatches<'_>) -> NixResult<Hive> {
if let Some(rdirent) = found {
let dirent = rdirent?;
hive_path = Some(dirent.path());
file_path = Some(dirent.path());
break;
}
@ -131,22 +130,37 @@ pub fn hive_from_args(args: &ArgMatches<'_>) -> NixResult<Hive> {
}
}
if hive_path.is_none() {
log::error!("Could not find `hive.nix` in {:?} or any parent directory", std::env::current_dir()?);
if file_path.is_none() {
log::error!("Could not find `hive.nix` or `flake.nix` in {:?} or any parent directory", std::env::current_dir()?);
}
hive_path.unwrap()
file_path.unwrap()
}
_ => {
let path = args.value_of("config").expect("The config arg should exist").to_owned();
canonicalize_cli_path(path)
let fpath = canonicalize_cli_path(&path);
if !fpath.exists() && path.contains(":") {
// Treat as flake URI
let hive_path = HivePath::Flake(path);
let mut hive = Hive::new(hive_path)?;
if args.is_present("show-trace") {
hive.set_show_trace(true);
}
return Ok(hive);
}
fpath
}
};
let mut hive = Hive::new(path)?;
let hive_path = HivePath::from_path(path);
let mut hive = Hive::new(hive_path)?;
if args.is_present("show-trace") {
hive.show_trace(true);
hive.set_show_trace(true);
}
Ok(hive)
@ -207,7 +221,7 @@ The list is comma-separated and globs are supported. To match tags, prepend the
.takes_value(true))
}
fn canonicalize_cli_path(path: String) -> PathBuf {
fn canonicalize_cli_path(path: &str) -> PathBuf {
if !path.starts_with("/") {
format!("./{}", path).into()
} else {