2023-03-27 17:08:16 +02:00
|
|
|
use super::DirectoryPutter;
|
2023-03-27 14:47:57 +02:00
|
|
|
use super::DirectoryService;
|
|
|
|
use crate::proto;
|
2023-05-18 20:43:33 +02:00
|
|
|
use crate::B3Digest;
|
2023-03-27 14:47:57 +02:00
|
|
|
use crate::Error;
|
2023-09-19 18:46:41 +02:00
|
|
|
use async_stream::stream;
|
|
|
|
use futures::Stream;
|
2023-03-27 14:47:57 +02:00
|
|
|
use std::collections::{HashSet, VecDeque};
|
2023-09-19 18:46:41 +02:00
|
|
|
use std::pin::Pin;
|
|
|
|
use tonic::async_trait;
|
|
|
|
use tracing::warn;
|
2023-03-27 14:47:57 +02:00
|
|
|
|
|
|
|
/// Traverses a [proto::Directory] from the root to the children.
|
|
|
|
///
|
|
|
|
/// This is mostly BFS, but directories are only returned once.
|
2023-09-19 18:46:41 +02:00
|
|
|
pub fn traverse_directory<DS: DirectoryService + 'static>(
|
2023-03-27 14:47:57 +02:00
|
|
|
directory_service: DS,
|
2023-09-19 18:46:41 +02:00
|
|
|
root_directory_digest: &B3Digest,
|
|
|
|
) -> Pin<Box<dyn Stream<Item = Result<proto::Directory, Error>> + Send>> {
|
|
|
|
// The list of all directories that still need to be traversed. The next
|
|
|
|
// element is picked from the front, new elements are enqueued at the
|
|
|
|
// back.
|
|
|
|
let mut worklist_directory_digests: VecDeque<B3Digest> =
|
|
|
|
VecDeque::from([root_directory_digest.clone()]);
|
|
|
|
// The list of directory digests already sent to the consumer.
|
|
|
|
// We omit sending the same directories multiple times.
|
|
|
|
let mut sent_directory_digests: HashSet<B3Digest> = HashSet::new();
|
|
|
|
|
|
|
|
let stream = stream! {
|
|
|
|
while let Some(current_directory_digest) = worklist_directory_digests.pop_front() {
|
|
|
|
match directory_service.get(¤t_directory_digest).await {
|
|
|
|
// if it's not there, we have an inconsistent store!
|
|
|
|
Ok(None) => {
|
|
|
|
warn!("directory {} does not exist", current_directory_digest);
|
|
|
|
yield Err(Error::StorageError(format!(
|
|
|
|
"directory {} does not exist",
|
|
|
|
current_directory_digest
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
warn!("failed to look up directory");
|
|
|
|
yield Err(Error::StorageError(format!(
|
|
|
|
"unable to look up directory {}: {}",
|
|
|
|
current_directory_digest, e
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we got it
|
|
|
|
Ok(Some(current_directory)) => {
|
|
|
|
// validate, we don't want to send invalid directories.
|
|
|
|
if let Err(e) = current_directory.validate() {
|
|
|
|
warn!("directory failed validation: {}", e.to_string());
|
|
|
|
yield Err(Error::StorageError(format!(
|
|
|
|
"invalid directory: {}",
|
2023-05-18 20:43:33 +02:00
|
|
|
current_directory_digest
|
2023-09-19 18:46:41 +02:00
|
|
|
)));
|
2023-03-27 14:47:57 +02:00
|
|
|
}
|
|
|
|
|
2023-09-19 18:46:41 +02:00
|
|
|
// We're about to send this directory, so let's avoid sending it again if a
|
|
|
|
// descendant has it.
|
|
|
|
sent_directory_digests.insert(current_directory_digest);
|
|
|
|
|
|
|
|
// enqueue all child directory digests to the work queue, as
|
|
|
|
// long as they're not part of the worklist or already sent.
|
|
|
|
// This panics if the digest looks invalid, it's supposed to be checked first.
|
|
|
|
for child_directory_node in ¤t_directory.directories {
|
|
|
|
// TODO: propagate error
|
|
|
|
let child_digest: B3Digest = child_directory_node.digest.clone().try_into().unwrap();
|
|
|
|
|
|
|
|
if worklist_directory_digests.contains(&child_digest)
|
|
|
|
|| sent_directory_digests.contains(&child_digest)
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
worklist_directory_digests.push_back(child_digest);
|
|
|
|
}
|
2023-03-27 14:47:57 +02:00
|
|
|
|
2023-09-19 18:46:41 +02:00
|
|
|
yield Ok(current_directory);
|
|
|
|
}
|
|
|
|
};
|
2023-03-27 14:47:57 +02:00
|
|
|
}
|
2023-09-19 18:46:41 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
Box::pin(stream)
|
2023-03-27 14:47:57 +02:00
|
|
|
}
|
2023-03-27 17:08:16 +02:00
|
|
|
|
|
|
|
/// This is a simple implementation of a Directory uploader.
|
|
|
|
/// TODO: verify connectivity? Factor out these checks into generic helpers?
|
|
|
|
pub struct SimplePutter<DS: DirectoryService> {
|
|
|
|
directory_service: DS,
|
2023-05-18 20:43:33 +02:00
|
|
|
last_directory_digest: Option<B3Digest>,
|
2023-06-09 11:26:34 +02:00
|
|
|
closed: bool,
|
2023-03-27 17:08:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<DS: DirectoryService> SimplePutter<DS> {
|
|
|
|
pub fn new(directory_service: DS) -> Self {
|
|
|
|
Self {
|
|
|
|
directory_service,
|
2023-06-09 11:26:34 +02:00
|
|
|
closed: false,
|
2023-03-27 17:08:16 +02:00
|
|
|
last_directory_digest: None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-19 18:46:41 +02:00
|
|
|
#[async_trait]
|
2023-03-27 17:08:16 +02:00
|
|
|
impl<DS: DirectoryService> DirectoryPutter for SimplePutter<DS> {
|
2023-09-19 18:46:41 +02:00
|
|
|
async fn put(&mut self, directory: proto::Directory) -> Result<(), Error> {
|
2023-06-09 11:26:34 +02:00
|
|
|
if self.closed {
|
|
|
|
return Err(Error::StorageError("already closed".to_string()));
|
|
|
|
}
|
|
|
|
|
2023-09-19 18:46:41 +02:00
|
|
|
let digest = self.directory_service.put(directory).await?;
|
2023-03-27 17:08:16 +02:00
|
|
|
|
|
|
|
// track the last directory digest
|
|
|
|
self.last_directory_digest = Some(digest);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// We need to be mutable here, as that's the signature of the trait.
|
2023-09-19 18:46:41 +02:00
|
|
|
async fn close(&mut self) -> Result<B3Digest, Error> {
|
2023-06-09 11:26:34 +02:00
|
|
|
if self.closed {
|
|
|
|
return Err(Error::StorageError("already closed".to_string()));
|
|
|
|
}
|
|
|
|
|
2023-05-18 20:43:33 +02:00
|
|
|
match &self.last_directory_digest {
|
2023-06-09 11:26:34 +02:00
|
|
|
Some(last_digest) => {
|
|
|
|
self.closed = true;
|
|
|
|
Ok(last_digest.clone())
|
|
|
|
}
|
2023-03-27 17:08:16 +02:00
|
|
|
None => Err(Error::InvalidRequest(
|
|
|
|
"no directories sent, can't show root digest".to_string(),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
}
|
2023-06-09 11:26:34 +02:00
|
|
|
|
|
|
|
fn is_closed(&self) -> bool {
|
|
|
|
self.closed
|
|
|
|
}
|
2023-03-27 17:08:16 +02:00
|
|
|
}
|