refactor(tvix/castore): ingest filesystem entries in parallel
Rather than carrying around an Future in the IngestionEntry::Regular, simply carry the plain B3Digest. Code reading through a non-seekable data stream has no choice but to read and upload blobs immediately, and code seeking through something seekable (like a filesystem) probably knows better what concurrency to pick when ingesting, rather than the consuming side. (Our only) one of these seekable source implementations is now doing exactly that. We produce a stream of futures, and then use [StreamExt::buffered] to process more than one, concurrently. We still keep the same order, to avoid shuffling things and violating the stream order. This also cleans up walk_path_for_ingestion in castore/import, as well as ingest_dir_entries in glue/tvix_store_io. Change-Id: I5eb70f3e1e372c74bcbfcf6b6e2653eba36e151d Reviewed-on: https://cl.tvl.fyi/c/depot/+/11491 Autosubmit: flokli <flokli@flokli.de> Reviewed-by: Connor Brewster <cbrewster@hey.com> Tested-by: BuildkiteCI
This commit is contained in:
parent
01239a4f6f
commit
5fc403587f
4 changed files with 53 additions and 82 deletions
|
@ -1,8 +1,8 @@
|
|||
use futures::stream::BoxStream;
|
||||
use futures::StreamExt;
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::Path;
|
||||
|
||||
use futures::stream::BoxStream;
|
||||
use tracing::instrument;
|
||||
use walkdir::DirEntry;
|
||||
use walkdir::WalkDir;
|
||||
|
@ -22,6 +22,9 @@ use super::IngestionEntry;
|
|||
/// [DirectoryService]. It returns the root node or an error.
|
||||
///
|
||||
/// It does not follow symlinks at the root, they will be ingested as actual symlinks.
|
||||
///
|
||||
/// This function will walk the filesystem using `walkdir` and will consume
|
||||
/// `O(#number of entries)` space.
|
||||
#[instrument(skip(blob_service, directory_service), fields(path), err)]
|
||||
pub async fn ingest_path<BS, DS, P>(
|
||||
blob_service: BS,
|
||||
|
@ -33,61 +36,51 @@ where
|
|||
BS: BlobService + Clone,
|
||||
DS: AsRef<dyn DirectoryService>,
|
||||
{
|
||||
let entry_stream = walk_path_for_ingestion(blob_service, path.as_ref());
|
||||
ingest_entries(directory_service, entry_stream).await
|
||||
}
|
||||
|
||||
/// Walk the filesystem at a given path and returns a stream of ingestion entries.
|
||||
///
|
||||
/// This is how [`ingest_path`] assembles the set of entries to pass on [`ingest_entries`].
|
||||
/// This low-level function can be used if additional filtering or processing is required on the
|
||||
/// entries.
|
||||
///
|
||||
/// It does not follow symlinks at the root, they will be ingested as actual symlinks.
|
||||
///
|
||||
/// This function will walk the filesystem using `walkdir` and will consume
|
||||
/// `O(#number of entries)` space.
|
||||
#[instrument(fields(path), skip(blob_service))]
|
||||
fn walk_path_for_ingestion<'a, BS>(
|
||||
blob_service: BS,
|
||||
path: &'a Path,
|
||||
) -> BoxStream<'a, Result<IngestionEntry<'a>, Error>>
|
||||
where
|
||||
BS: BlobService + Clone + 'a,
|
||||
{
|
||||
let iter = WalkDir::new(path)
|
||||
let iter = WalkDir::new(path.as_ref())
|
||||
.follow_links(false)
|
||||
.follow_root_links(false)
|
||||
.contents_first(true)
|
||||
.into_iter();
|
||||
|
||||
dir_entry_iter_to_ingestion_stream(blob_service, iter, path)
|
||||
let entries = dir_entries_to_ingestion_stream(blob_service, iter, path.as_ref());
|
||||
ingest_entries(directory_service, entries).await
|
||||
}
|
||||
|
||||
/// Converts an iterator of [walkdir::DirEntry]s into a stream of ingestion entries.
|
||||
/// This can then be fed into [ingest_entries] to ingest all the entries into the castore.
|
||||
///
|
||||
/// The produced stream is buffered, so uploads can happen concurrently.
|
||||
///
|
||||
/// The root is the [Path] in the filesystem that is being ingested into the castore.
|
||||
pub fn dir_entry_iter_to_ingestion_stream<'a, BS, I>(
|
||||
pub fn dir_entries_to_ingestion_stream<'a, BS, I>(
|
||||
blob_service: BS,
|
||||
iter: I,
|
||||
root: &'a Path,
|
||||
) -> BoxStream<'a, Result<IngestionEntry<'a>, Error>>
|
||||
) -> BoxStream<'a, Result<IngestionEntry, Error>>
|
||||
where
|
||||
BS: BlobService + Clone + 'a,
|
||||
I: Iterator<Item = Result<DirEntry, walkdir::Error>> + Send + 'a,
|
||||
{
|
||||
let prefix = root.parent().unwrap_or_else(|| Path::new(""));
|
||||
|
||||
let iter = iter.map(move |entry| match entry {
|
||||
Ok(entry) => dir_entry_to_ingestion_entry(blob_service.clone(), &entry, prefix),
|
||||
Err(error) => Err(Error::UnableToStat(
|
||||
root.to_path_buf(),
|
||||
error.into_io_error().expect("walkdir err must be some"),
|
||||
)),
|
||||
});
|
||||
|
||||
Box::pin(futures::stream::iter(iter))
|
||||
Box::pin(
|
||||
futures::stream::iter(iter)
|
||||
.map(move |x| {
|
||||
let blob_service = blob_service.clone();
|
||||
async move {
|
||||
match x {
|
||||
Ok(dir_entry) => {
|
||||
dir_entry_to_ingestion_entry(blob_service, &dir_entry, prefix).await
|
||||
}
|
||||
Err(e) => Err(Error::UnableToStat(
|
||||
prefix.to_path_buf(),
|
||||
e.into_io_error().expect("walkdir err must be some"),
|
||||
)),
|
||||
}
|
||||
}
|
||||
})
|
||||
.buffered(50),
|
||||
)
|
||||
}
|
||||
|
||||
/// Converts a [walkdir::DirEntry] into an [IngestionEntry], uploading blobs to the
|
||||
|
@ -95,13 +88,13 @@ where
|
|||
///
|
||||
/// The prefix path is stripped from the path of each entry. This is usually the parent path
|
||||
/// of the path being ingested so that the last element of the stream only has one component.
|
||||
fn dir_entry_to_ingestion_entry<'a, BS>(
|
||||
pub async fn dir_entry_to_ingestion_entry<BS>(
|
||||
blob_service: BS,
|
||||
entry: &DirEntry,
|
||||
prefix: &Path,
|
||||
) -> Result<IngestionEntry<'a>, Error>
|
||||
) -> Result<IngestionEntry, Error>
|
||||
where
|
||||
BS: BlobService + 'a,
|
||||
BS: BlobService,
|
||||
{
|
||||
let file_type = entry.file_type();
|
||||
|
||||
|
@ -123,11 +116,7 @@ where
|
|||
.metadata()
|
||||
.map_err(|e| Error::UnableToStat(entry.path().to_path_buf(), e.into()))?;
|
||||
|
||||
// TODO: In the future, for small files, hash right away and upload async.
|
||||
let digest = Box::pin(upload_blob_at_path(
|
||||
blob_service,
|
||||
entry.path().to_path_buf(),
|
||||
));
|
||||
let digest = upload_blob_at_path(blob_service, entry.path().to_path_buf()).await?;
|
||||
|
||||
Ok(IngestionEntry::Regular {
|
||||
path,
|
||||
|
|
|
@ -13,10 +13,9 @@ use crate::proto::DirectoryNode;
|
|||
use crate::proto::FileNode;
|
||||
use crate::proto::SymlinkNode;
|
||||
use crate::B3Digest;
|
||||
use futures::Future;
|
||||
use futures::{Stream, StreamExt};
|
||||
use std::fs::FileType;
|
||||
use std::pin::Pin;
|
||||
|
||||
use tracing::Level;
|
||||
|
||||
#[cfg(target_family = "unix")]
|
||||
|
@ -52,10 +51,10 @@ pub mod fs;
|
|||
///
|
||||
/// On success, returns the root node.
|
||||
#[instrument(skip_all, ret(level = Level::TRACE), err)]
|
||||
pub async fn ingest_entries<'a, DS, S>(directory_service: DS, mut entries: S) -> Result<Node, Error>
|
||||
pub async fn ingest_entries<DS, S>(directory_service: DS, mut entries: S) -> Result<Node, Error>
|
||||
where
|
||||
DS: AsRef<dyn DirectoryService>,
|
||||
S: Stream<Item = Result<IngestionEntry<'a>, Error>> + Send + std::marker::Unpin,
|
||||
S: Stream<Item = Result<IngestionEntry, Error>> + Send + std::marker::Unpin,
|
||||
{
|
||||
// For a given path, this holds the [Directory] structs as they are populated.
|
||||
let mut directories: HashMap<PathBuf, Directory> = HashMap::default();
|
||||
|
@ -124,7 +123,7 @@ where
|
|||
..
|
||||
} => Node::File(FileNode {
|
||||
name,
|
||||
digest: digest.await?.into(),
|
||||
digest: digest.to_owned().into(),
|
||||
size: *size,
|
||||
executable: *executable,
|
||||
}),
|
||||
|
@ -200,14 +199,12 @@ where
|
|||
Ok(digest)
|
||||
}
|
||||
|
||||
type BlobFut<'a> = Pin<Box<dyn Future<Output = Result<B3Digest, Error>> + Send + 'a>>;
|
||||
|
||||
pub enum IngestionEntry<'a> {
|
||||
pub enum IngestionEntry {
|
||||
Regular {
|
||||
path: PathBuf,
|
||||
size: u64,
|
||||
executable: bool,
|
||||
digest: BlobFut<'a>,
|
||||
digest: B3Digest,
|
||||
},
|
||||
Symlink {
|
||||
path: PathBuf,
|
||||
|
@ -222,7 +219,7 @@ pub enum IngestionEntry<'a> {
|
|||
},
|
||||
}
|
||||
|
||||
impl<'a> IngestionEntry<'a> {
|
||||
impl IngestionEntry {
|
||||
fn path(&self) -> &Path {
|
||||
match self {
|
||||
IngestionEntry::Regular { path, .. } => path,
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
use crate::builtins::errors::ImportError;
|
||||
use std::path::Path;
|
||||
use tvix_castore::import::ingest_entries;
|
||||
use tvix_eval::{
|
||||
builtin_macros::builtins,
|
||||
generators::{self, GenCo},
|
||||
|
@ -84,15 +85,19 @@ async fn filtered_ingest(
|
|||
entries.push(entry);
|
||||
}
|
||||
|
||||
let entries_iter = entries.into_iter().rev().map(Ok);
|
||||
let dir_entries = entries.into_iter().rev().map(Ok);
|
||||
|
||||
state.tokio_handle.block_on(async {
|
||||
state
|
||||
.ingest_dir_entries(entries_iter, path)
|
||||
let entries = tvix_castore::import::fs::dir_entries_to_ingestion_stream(
|
||||
&state.blob_service,
|
||||
dir_entries,
|
||||
path,
|
||||
);
|
||||
ingest_entries(&state.directory_service, entries)
|
||||
.await
|
||||
.map_err(|err| ErrorKind::IO {
|
||||
path: Some(path.to_path_buf()),
|
||||
error: err.into(),
|
||||
error: Rc::new(err.into()),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -20,10 +20,8 @@ use tokio_util::io::{InspectReader, SyncIoBridge};
|
|||
use tracing::{error, instrument, warn, Level};
|
||||
use tvix_build::buildservice::BuildService;
|
||||
use tvix_castore::import::archive::ingest_archive;
|
||||
use tvix_castore::import::fs::dir_entry_iter_to_ingestion_stream;
|
||||
use tvix_eval::{ErrorKind, EvalIO, FileType, StdIO};
|
||||
use tvix_store::utils::AsyncIoBridge;
|
||||
use walkdir::DirEntry;
|
||||
|
||||
use tvix_castore::{
|
||||
blobservice::BlobService,
|
||||
|
@ -54,9 +52,9 @@ use crate::tvix_build::derivation_to_build_request;
|
|||
/// implementation of "Tvix Store IO" which does not necessarily bring the concept of blob service,
|
||||
/// directory service or path info service.
|
||||
pub struct TvixStoreIO {
|
||||
blob_service: Arc<dyn BlobService>,
|
||||
directory_service: Arc<dyn DirectoryService>,
|
||||
// This is public so builtins can put PathInfos directly.
|
||||
// This is public so helper functions can interact with the stores directly.
|
||||
pub(crate) blob_service: Arc<dyn BlobService>,
|
||||
pub(crate) directory_service: Arc<dyn DirectoryService>,
|
||||
pub(crate) path_info_service: Arc<dyn PathInfoService>,
|
||||
std_io: StdIO,
|
||||
#[allow(dead_code)]
|
||||
|
@ -277,24 +275,6 @@ impl TvixStoreIO {
|
|||
.map_err(|e| std::io::Error::new(io::ErrorKind::Other, e))
|
||||
}
|
||||
|
||||
/// This forwards the ingestion to the [`tvix_castore::import::ingest_entries`],
|
||||
/// passing the blob_service and directory_service that's used.
|
||||
/// The error is mapped to std::io::Error for simplicity.
|
||||
pub(crate) async fn ingest_dir_entries<'a, I>(
|
||||
&'a self,
|
||||
iter: I,
|
||||
root: &Path,
|
||||
) -> io::Result<Node>
|
||||
where
|
||||
I: Iterator<Item = Result<DirEntry, walkdir::Error>> + Send + Sync + 'a,
|
||||
{
|
||||
let entries_stream = dir_entry_iter_to_ingestion_stream(&self.blob_service, iter, root);
|
||||
|
||||
tvix_castore::import::ingest_entries(&self.directory_service, entries_stream)
|
||||
.await
|
||||
.map_err(|err| std::io::Error::new(io::ErrorKind::Other, err))
|
||||
}
|
||||
|
||||
pub(crate) async fn node_to_path_info(
|
||||
&self,
|
||||
name: &str,
|
||||
|
|
Loading…
Reference in a new issue