forked from DGNum/colmena
Convert apply arguments to type-safe clap derive
This commit is contained in:
parent
b80b57cb48
commit
87f4e3a676
6 changed files with 210 additions and 187 deletions
|
@ -1,135 +1,126 @@
|
|||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
|
||||
use clap::{
|
||||
builder::{ArgPredicate, PossibleValuesParser, ValueParser},
|
||||
value_parser, Arg, ArgMatches, Command as ClapCommand, FromArgMatches,
|
||||
};
|
||||
use clap::{builder::ArgPredicate, ArgMatches, Args, Command as ClapCommand, FromArgMatches};
|
||||
|
||||
use crate::nix::deployment::{
|
||||
Deployment, EvaluationNodeLimit, EvaluatorType, Goal, Options, ParallelismLimit,
|
||||
use crate::nix::{
|
||||
deployment::{Deployment, EvaluationNodeLimit, EvaluatorType, Goal, Options, ParallelismLimit},
|
||||
node_filter::NodeFilterOpts,
|
||||
};
|
||||
use crate::nix::NodeFilter;
|
||||
use crate::progress::SimpleProgressOutput;
|
||||
use crate::util;
|
||||
|
||||
use crate::{error::ColmenaError, nix::hive::HiveArgs};
|
||||
|
||||
pub fn register_deploy_args(command: ClapCommand) -> ClapCommand {
|
||||
command
|
||||
.arg(Arg::new("eval-node-limit")
|
||||
.long("eval-node-limit")
|
||||
.value_name("LIMIT")
|
||||
.help("Evaluation node limit")
|
||||
.long_help(r#"Limits the maximum number of hosts to be evaluated at once.
|
||||
#[derive(Debug, Args)]
|
||||
pub struct DeployOpts {
|
||||
#[arg(
|
||||
value_name = "LIMIT",
|
||||
default_value_t,
|
||||
long,
|
||||
help = "Evaluation node limit",
|
||||
long_help = r#"Limits the maximum number of hosts to be evaluated at once.
|
||||
|
||||
The evaluation process is RAM-intensive. The default behavior is to limit the maximum number of host evaluated at the same time based on naive heuristics.
|
||||
|
||||
Set to 0 to disable the limit.
|
||||
"#)
|
||||
.default_value("auto")
|
||||
.num_args(1)
|
||||
.value_parser(ValueParser::new(|s: &str| -> Result<EvaluationNodeLimit, String> {
|
||||
if s == "auto" {
|
||||
return Ok(EvaluationNodeLimit::Heuristic);
|
||||
}
|
||||
"#
|
||||
)]
|
||||
eval_node_limit: EvaluationNodeLimit,
|
||||
#[arg(
|
||||
value_name = "LIMIT",
|
||||
default_value_t = 10,
|
||||
long,
|
||||
short,
|
||||
help = "Deploy parallelism limit",
|
||||
long_help = r#"Limits the maximum number of hosts to be deployed in parallel.
|
||||
|
||||
match s.parse::<usize>() {
|
||||
Ok(0) => Ok(EvaluationNodeLimit::None),
|
||||
Ok(n) => Ok(EvaluationNodeLimit::Manual(n)),
|
||||
Err(_) => Err(String::from("The value must be a valid number")),
|
||||
}
|
||||
})))
|
||||
.arg(Arg::new("parallel")
|
||||
.short('p')
|
||||
.long("parallel")
|
||||
.value_name("LIMIT")
|
||||
.help("Deploy parallelism limit")
|
||||
.long_help(r#"Limits the maximum number of hosts to be deployed in parallel.
|
||||
|
||||
Set to 0 to disable parallemism limit.
|
||||
"#)
|
||||
.default_value("10")
|
||||
.num_args(1)
|
||||
.value_parser(value_parser!(usize)))
|
||||
.arg(Arg::new("keep-result")
|
||||
.long("keep-result")
|
||||
.help("Create GC roots for built profiles")
|
||||
.long_help(r#"Create GC roots for built profiles.
|
||||
Set to 0 to disable parallelism limit.
|
||||
"#
|
||||
)]
|
||||
parallel: usize,
|
||||
#[arg(
|
||||
long,
|
||||
help = "Create GC roots for built profiles",
|
||||
long_help = r#"Create GC roots for built profiles.
|
||||
|
||||
The built system profiles will be added as GC roots so that they will not be removed by the garbage collector.
|
||||
The links will be created under .gcroots in the directory the Hive configuration is located.
|
||||
"#)
|
||||
.num_args(0))
|
||||
.arg(Arg::new("verbose")
|
||||
.short('v')
|
||||
.long("verbose")
|
||||
.help("Be verbose")
|
||||
.long_help("Deactivates the progress spinner and prints every line of output.")
|
||||
.num_args(0))
|
||||
.arg(Arg::new("no-keys")
|
||||
.long("no-keys")
|
||||
.help("Do not upload keys")
|
||||
.long_help(r#"Do not upload secret keys set in `deployment.keys`.
|
||||
"#
|
||||
)]
|
||||
keep_result: bool,
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
help = "Be verbose",
|
||||
long_help = "Deactivates the progress spinner and prints every line of output."
|
||||
)]
|
||||
verbose: bool,
|
||||
#[arg(
|
||||
long,
|
||||
help = "Do not upload keys",
|
||||
long_help = r#"Do not upload secret keys set in `deployment.keys`.
|
||||
|
||||
By default, Colmena will upload keys set in `deployment.keys` before deploying the new profile on a node.
|
||||
To upload keys without building or deploying the rest of the configuration, use `colmena upload-keys`.
|
||||
"#)
|
||||
.num_args(0))
|
||||
.arg(Arg::new("reboot")
|
||||
.long("reboot")
|
||||
.help("Reboot nodes after activation")
|
||||
.long_help("Reboots nodes after activation and waits for them to come back up.")
|
||||
.num_args(0))
|
||||
.arg(Arg::new("no-substitute")
|
||||
.long("no-substitute")
|
||||
.alias("no-substitutes")
|
||||
.help("Do not use substitutes")
|
||||
.long_help("Disables the use of substituters when copying closures to the remote host.")
|
||||
.num_args(0))
|
||||
.arg(Arg::new("no-gzip")
|
||||
.long("no-gzip")
|
||||
.help("Do not use gzip")
|
||||
.long_help("Disables the use of gzip when copying closures to the remote host.")
|
||||
.num_args(0))
|
||||
.arg(Arg::new("build-on-target")
|
||||
.long("build-on-target")
|
||||
.help("Build the system profiles on the target nodes")
|
||||
.long_help(r#"Build the system profiles on the target nodes themselves.
|
||||
"#
|
||||
)]
|
||||
no_keys: bool,
|
||||
#[arg(
|
||||
long,
|
||||
help = "Reboot nodes after activation",
|
||||
long_help = "Reboots nodes after activation and waits for them to come back up."
|
||||
)]
|
||||
reboot: bool,
|
||||
#[arg(
|
||||
long,
|
||||
alias = "no-substitutes",
|
||||
help = "Do not use substitutes",
|
||||
long_help = "Disables the use of substituters when copying closures to the remote host."
|
||||
)]
|
||||
no_substitute: bool,
|
||||
#[arg(
|
||||
long,
|
||||
help = "Do not use gzip",
|
||||
long_help = "Disables the use of gzip when copying closures to the remote host."
|
||||
)]
|
||||
no_gzip: bool,
|
||||
#[arg(
|
||||
long,
|
||||
help = "Build the system profiles on the target nodes",
|
||||
long_help = r#"Build the system profiles on the target nodes themselves.
|
||||
|
||||
If enabled, the system profiles will be built on the target nodes themselves, not on the host running Colmena itself.
|
||||
This overrides per-node perferences set in `deployment.buildOnTarget`.
|
||||
To temporarily disable remote build on all nodes, use `--no-build-on-target`.
|
||||
"#)
|
||||
.num_args(0))
|
||||
.arg(Arg::new("no-build-on-target")
|
||||
.long("no-build-on-target")
|
||||
.hide(true)
|
||||
.num_args(0))
|
||||
.arg(Arg::new("force-replace-unknown-profiles")
|
||||
.long("force-replace-unknown-profiles")
|
||||
.help("Ignore all targeted nodes deployment.replaceUnknownProfiles setting")
|
||||
.long_help(r#"If `deployment.replaceUnknownProfiles` is set for a target, using this switch
|
||||
will treat deployment.replaceUnknownProfiles as though it was set true and perform unknown profile replacement."#)
|
||||
.num_args(0))
|
||||
.arg(Arg::new("evaluator")
|
||||
.long("evaluator")
|
||||
.help("The evaluator to use (experimental)")
|
||||
.long_help(r#"If set to `chunked` (default), evaluation of nodes will happen in batches. If set to `streaming`, the experimental streaming evaluator (nix-eval-jobs) will be used and nodes will be evaluated in parallel.
|
||||
"#
|
||||
)]
|
||||
build_on_target: bool,
|
||||
#[arg(long, hide = true)]
|
||||
no_build_on_target: bool,
|
||||
#[arg(
|
||||
long,
|
||||
help = "Ignore all targeted nodes deployment.replaceUnknownProfiles setting",
|
||||
long_help = r#"If `deployment.replaceUnknownProfiles` is set for a target, using this switch
|
||||
will treat deployment.replaceUnknownProfiles as though it was set true and perform unknown profile replacement."#
|
||||
)]
|
||||
force_replace_unknown_profiles: bool,
|
||||
#[arg(
|
||||
long,
|
||||
default_value_t,
|
||||
help = "The evaluator to use (experimental)",
|
||||
long_help = r#"If set to `chunked` (default), evaluation of nodes will happen in batches. If set to `streaming`, the experimental streaming evaluator (nix-eval-jobs) will be used and nodes will be evaluated in parallel.
|
||||
|
||||
This is an experimental feature."#)
|
||||
.default_value("chunked")
|
||||
.value_parser(value_parser!(EvaluatorType)))
|
||||
This is an experimental feature."#
|
||||
)]
|
||||
evaluator: EvaluatorType,
|
||||
}
|
||||
|
||||
pub fn subcommand() -> ClapCommand {
|
||||
let command = ClapCommand::new("apply")
|
||||
.about("Apply configurations on remote machines")
|
||||
.arg(
|
||||
Arg::new("goal")
|
||||
.help("Deployment goal")
|
||||
.long_help(
|
||||
r#"The goal of the deployment.
|
||||
#[derive(Debug, Args)]
|
||||
#[command(name = "apply", about = "Apply configurations on remote machines")]
|
||||
struct Opts {
|
||||
#[arg(
|
||||
help = "Deployment goal",
|
||||
long_help = r#"The goal of the deployment.
|
||||
|
||||
Same as the targets for switch-to-configuration, with the following extra pseudo-goals:
|
||||
|
||||
|
@ -139,24 +130,17 @@ Same as the targets for switch-to-configuration, with the following extra pseudo
|
|||
|
||||
`switch` is the default goal unless `--reboot` is passed, in which case `boot` is the default.
|
||||
"#,
|
||||
)
|
||||
.default_value("switch")
|
||||
.default_value_if("reboot", ArgPredicate::IsPresent, Some("boot"))
|
||||
.default_value("switch")
|
||||
.index(1)
|
||||
.value_parser(PossibleValuesParser::new([
|
||||
"build",
|
||||
"push",
|
||||
"switch",
|
||||
"boot",
|
||||
"test",
|
||||
"dry-activate",
|
||||
"keys",
|
||||
])),
|
||||
);
|
||||
let command = register_deploy_args(command);
|
||||
default_value_if("reboot", ArgPredicate::IsPresent, Some("boot"))
|
||||
)]
|
||||
goal: Goal,
|
||||
#[command(flatten)]
|
||||
deploy: DeployOpts,
|
||||
#[command(flatten)]
|
||||
node_filter: NodeFilterOpts,
|
||||
}
|
||||
|
||||
util::register_selector_args(command)
|
||||
pub fn subcommand() -> ClapCommand {
|
||||
Opts::augment_args(ClapCommand::new("apply"))
|
||||
}
|
||||
|
||||
pub async fn run(_global_args: &ArgMatches, local_args: &ArgMatches) -> Result<(), ColmenaError> {
|
||||
|
@ -168,13 +152,27 @@ pub async fn run(_global_args: &ArgMatches, local_args: &ArgMatches) -> Result<(
|
|||
|
||||
let ssh_config = env::var("SSH_CONFIG_FILE").ok().map(PathBuf::from);
|
||||
|
||||
// FIXME: Just get_one::<Goal>
|
||||
let goal_arg = local_args.get_one::<String>("goal").unwrap();
|
||||
let goal = Goal::from_str(goal_arg).unwrap();
|
||||
let Opts {
|
||||
goal,
|
||||
deploy:
|
||||
DeployOpts {
|
||||
eval_node_limit,
|
||||
parallel,
|
||||
keep_result,
|
||||
verbose,
|
||||
no_keys,
|
||||
reboot,
|
||||
no_substitute,
|
||||
no_gzip,
|
||||
build_on_target,
|
||||
no_build_on_target,
|
||||
force_replace_unknown_profiles,
|
||||
evaluator,
|
||||
},
|
||||
node_filter,
|
||||
} = Opts::from_arg_matches(local_args).expect("Failed to parse `apply` args");
|
||||
|
||||
let filter = local_args.get_one::<NodeFilter>("on");
|
||||
|
||||
if filter.is_none() && goal != Goal::Build {
|
||||
if node_filter.on.is_none() && goal != Goal::Build {
|
||||
// User did not specify node, we should check meta and see rules
|
||||
let meta = hive.get_meta_config().await?;
|
||||
if !meta.allow_apply_all {
|
||||
|
@ -185,11 +183,15 @@ pub async fn run(_global_args: &ArgMatches, local_args: &ArgMatches) -> Result<(
|
|||
}
|
||||
|
||||
let targets = hive
|
||||
.select_nodes(filter.cloned(), ssh_config, goal.requires_target_host())
|
||||
.select_nodes(
|
||||
node_filter.on.clone(),
|
||||
ssh_config,
|
||||
goal.requires_target_host(),
|
||||
)
|
||||
.await?;
|
||||
let n_targets = targets.len();
|
||||
|
||||
let verbose = local_args.get_flag("verbose") || goal == Goal::DryActivate;
|
||||
let verbose = verbose || goal == Goal::DryActivate;
|
||||
let mut output = SimpleProgressOutput::new(verbose);
|
||||
let progress = output.get_sender();
|
||||
|
||||
|
@ -198,27 +200,20 @@ pub async fn run(_global_args: &ArgMatches, local_args: &ArgMatches) -> Result<(
|
|||
// FIXME: Configure limits
|
||||
let options = {
|
||||
let mut options = Options::default();
|
||||
options.set_substituters_push(!local_args.get_flag("no-substitute"));
|
||||
options.set_gzip(!local_args.get_flag("no-gzip"));
|
||||
options.set_upload_keys(!local_args.get_flag("no-keys"));
|
||||
options.set_reboot(local_args.get_flag("reboot"));
|
||||
options.set_force_replace_unknown_profiles(
|
||||
local_args.get_flag("force-replace-unknown-profiles"),
|
||||
);
|
||||
options.set_evaluator(
|
||||
local_args
|
||||
.get_one::<EvaluatorType>("evaluator")
|
||||
.unwrap()
|
||||
.to_owned(),
|
||||
);
|
||||
options.set_substituters_push(!no_substitute);
|
||||
options.set_gzip(!no_gzip);
|
||||
options.set_upload_keys(!no_keys);
|
||||
options.set_reboot(reboot);
|
||||
options.set_force_replace_unknown_profiles(force_replace_unknown_profiles);
|
||||
options.set_evaluator(evaluator);
|
||||
|
||||
if local_args.get_flag("keep-result") {
|
||||
if keep_result {
|
||||
options.set_create_gc_roots(true);
|
||||
}
|
||||
|
||||
if local_args.get_flag("no-build-on-target") {
|
||||
if no_build_on_target {
|
||||
options.set_force_build_on_target(false);
|
||||
} else if local_args.get_flag("build-on-target") {
|
||||
} else if build_on_target {
|
||||
options.set_force_build_on_target(true);
|
||||
}
|
||||
|
||||
|
@ -227,7 +222,7 @@ pub async fn run(_global_args: &ArgMatches, local_args: &ArgMatches) -> Result<(
|
|||
|
||||
deployment.set_options(options);
|
||||
|
||||
if local_args.get_flag("no-keys") && goal == Goal::UploadKeys {
|
||||
if no_keys && goal == Goal::UploadKeys {
|
||||
log::error!("--no-keys cannot be used when the goal is to upload keys");
|
||||
quit::with_code(1);
|
||||
}
|
||||
|
@ -235,23 +230,17 @@ pub async fn run(_global_args: &ArgMatches, local_args: &ArgMatches) -> Result<(
|
|||
let parallelism_limit = {
|
||||
let mut limit = ParallelismLimit::default();
|
||||
limit.set_apply_limit({
|
||||
let limit = local_args.get_one::<usize>("parallel").unwrap().to_owned();
|
||||
if limit == 0 {
|
||||
if parallel == 0 {
|
||||
n_targets
|
||||
} else {
|
||||
limit
|
||||
parallel
|
||||
}
|
||||
});
|
||||
limit
|
||||
};
|
||||
|
||||
let evaluation_node_limit = local_args
|
||||
.get_one::<EvaluationNodeLimit>("eval-node-limit")
|
||||
.unwrap()
|
||||
.to_owned();
|
||||
|
||||
deployment.set_parallelism_limit(parallelism_limit);
|
||||
deployment.set_evaluation_node_limit(evaluation_node_limit);
|
||||
deployment.set_evaluation_node_limit(eval_node_limit);
|
||||
|
||||
let (deployment, output) = tokio::join!(deployment.execute(), output.run_until_completion(),);
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
use clap::{builder::PossibleValuesParser, Arg, Command as ClapCommand};
|
||||
use clap::{builder::PossibleValuesParser, Arg, Args, Command as ClapCommand};
|
||||
|
||||
use crate::util;
|
||||
|
||||
use super::apply;
|
||||
pub use super::apply::run;
|
||||
use super::apply::DeployOpts;
|
||||
|
||||
pub fn subcommand() -> ClapCommand {
|
||||
let command = ClapCommand::new("build")
|
||||
|
@ -21,7 +21,5 @@ This subcommand behaves as if you invoked `apply` with the `build` goal."#,
|
|||
.num_args(1),
|
||||
);
|
||||
|
||||
let command = apply::register_deploy_args(command);
|
||||
|
||||
util::register_selector_args(command)
|
||||
util::register_selector_args(DeployOpts::augment_args_for_update(command))
|
||||
}
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
use clap::{builder::PossibleValuesParser, Arg, Command as ClapCommand};
|
||||
use clap::{builder::PossibleValuesParser, Arg, Args, Command as ClapCommand};
|
||||
|
||||
use crate::util;
|
||||
|
||||
use super::apply;
|
||||
pub use super::apply::run;
|
||||
use super::apply::DeployOpts;
|
||||
|
||||
pub fn subcommand() -> ClapCommand {
|
||||
let command = ClapCommand::new("upload-keys")
|
||||
|
@ -21,7 +21,5 @@ This subcommand behaves as if you invoked `apply` with the pseudo `keys` goal."#
|
|||
.num_args(1),
|
||||
);
|
||||
|
||||
let command = apply::register_deploy_args(command);
|
||||
|
||||
util::register_selector_args(command)
|
||||
util::register_selector_args(DeployOpts::augment_args_for_update(command))
|
||||
}
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
//! Parallelism limits.
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
/// Amount of RAM reserved for the system, in MB.
|
||||
|
@ -55,9 +57,10 @@ impl ParallelismLimit {
|
|||
/// - A simple heuristic based on remaining memory in the system
|
||||
/// - A supplied number
|
||||
/// - No limit at all
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
#[derive(Copy, Clone, Debug, Default)]
|
||||
pub enum EvaluationNodeLimit {
|
||||
/// Use a naive heuristic based on available memory.
|
||||
#[default]
|
||||
Heuristic,
|
||||
|
||||
/// Supply the maximum number of nodes.
|
||||
|
@ -67,9 +70,28 @@ pub enum EvaluationNodeLimit {
|
|||
None,
|
||||
}
|
||||
|
||||
impl Default for EvaluationNodeLimit {
|
||||
fn default() -> Self {
|
||||
Self::Heuristic
|
||||
impl FromStr for EvaluationNodeLimit {
|
||||
type Err = &'static str;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s == "auto" {
|
||||
return Ok(EvaluationNodeLimit::Heuristic);
|
||||
}
|
||||
|
||||
match s.parse::<usize>() {
|
||||
Ok(0) => Ok(EvaluationNodeLimit::None),
|
||||
Ok(n) => Ok(EvaluationNodeLimit::Manual(n)),
|
||||
Err(_) => Err("The value must be a valid number or `auto`"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for EvaluationNodeLimit {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Heuristic => write!(f, "auto"),
|
||||
Self::None => write!(f, "0"),
|
||||
Self::Manual(n) => write!(f, "{n}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//! Deployment options.
|
||||
|
||||
use clap::{builder::PossibleValue, ValueEnum};
|
||||
use clap::ValueEnum;
|
||||
|
||||
use crate::nix::CopyOptions;
|
||||
|
||||
|
@ -36,12 +36,22 @@ pub struct Options {
|
|||
}
|
||||
|
||||
/// Which evaluator to use.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Default, ValueEnum)]
|
||||
pub enum EvaluatorType {
|
||||
#[default]
|
||||
Chunked,
|
||||
Streaming,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for EvaluatorType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(match self {
|
||||
Self::Chunked => "chunked",
|
||||
Self::Streaming => "streaming",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Options {
|
||||
pub fn set_substituters_push(&mut self, value: bool) {
|
||||
self.substituters_push = value;
|
||||
|
@ -98,16 +108,3 @@ impl Default for Options {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ValueEnum for EvaluatorType {
|
||||
fn value_variants<'a>() -> &'a [Self] {
|
||||
&[Self::Chunked, Self::Streaming]
|
||||
}
|
||||
|
||||
fn to_possible_value<'a>(&self) -> Option<PossibleValue> {
|
||||
match self {
|
||||
Self::Chunked => Some(PossibleValue::new("chunked")),
|
||||
Self::Streaming => Some(PossibleValue::new("streaming")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,12 +5,31 @@ use std::convert::AsRef;
|
|||
use std::iter::{FromIterator, Iterator};
|
||||
use std::str::FromStr;
|
||||
|
||||
use clap::Args;
|
||||
use glob::Pattern as GlobPattern;
|
||||
|
||||
use super::{ColmenaError, ColmenaResult, NodeConfig, NodeName};
|
||||
|
||||
#[derive(Debug, Args)]
|
||||
pub struct NodeFilterOpts {
|
||||
#[arg(
|
||||
long,
|
||||
value_name = "NODES",
|
||||
help = "Node selector",
|
||||
long_help = r#"Select a list of nodes to deploy to.
|
||||
|
||||
The list is comma-separated and globs are supported. To match tags, prepend the filter by @. Valid examples:
|
||||
|
||||
- host1,host2,host3
|
||||
- edge-*
|
||||
- edge-*,core-*
|
||||
- @a-tag,@tags-can-have-*"#
|
||||
)]
|
||||
pub on: Option<NodeFilter>,
|
||||
}
|
||||
|
||||
/// A node filter containing a list of rules.
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct NodeFilter {
|
||||
rules: Vec<Rule>,
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue