da6cbb4a45
We previously kept the trait of a BlobService sync. This however had some annoying consequences: - It became more and more complicated to track when we're in a context with an async runtime in the context or not, producing bugs like https://b.tvl.fyi/issues/304 - The sync trait shielded away async clients from async worloads, requiring manual block_on code inside the gRPC client code, and spawn_blocking calls in consumers of the trait, even if they were async (like the gRPC server) - We had to write our own custom glue code (SyncReadIntoAsyncRead) to convert a sync io::Read into a tokio::io::AsyncRead, which already existed in tokio internally, but upstream ia hesitant to expose. This now makes the BlobService trait async (via the async_trait macro, like we already do in various gRPC parts), and replaces the sync readers and writers with their async counterparts. Tests interacting with a BlobService now need to have an async runtime available, the easiest way for this is to mark the test functions with the tokio::test macro, allowing us to directly .await in the test function. In places where we don't have an async runtime available from context (like tvix-cli), we can pass one down explicitly. Now that we don't provide a sync interface anymore, the (sync) FUSE library now holds a pointer to a tokio runtime handle, and needs to at least have 2 threads available when talking to a blob service (which is why some of the tests now use the multi_thread flavor). The FUSE tests got a bit more verbose, as we couldn't use the setup_and_mount function accepting a callback anymore. We can hopefully move some of the test fixture setup to rstest in the future to make this less repetitive. Co-Authored-By: Connor Brewster <cbrewster@hey.com> Change-Id: Ia0501b606e32c852d0108de9c9016b21c94a3c05 Reviewed-on: https://cl.tvl.fyi/c/depot/+/9329 Reviewed-by: Connor Brewster <cbrewster@hey.com> Tested-by: BuildkiteCI Reviewed-by: raitobezarius <tvl@lahfa.xyz>
249 lines
7 KiB
Rust
249 lines
7 KiB
Rust
use crate::nar::calculate_size_and_sha256;
|
|
use crate::nar::write_nar;
|
|
use crate::proto::DirectoryNode;
|
|
use crate::proto::FileNode;
|
|
use crate::proto::SymlinkNode;
|
|
use crate::tests::fixtures::*;
|
|
use crate::tests::utils::*;
|
|
use sha2::{Digest, Sha256};
|
|
use std::io;
|
|
|
|
#[test]
|
|
fn single_symlink() {
|
|
let mut buf: Vec<u8> = vec![];
|
|
|
|
write_nar(
|
|
&mut buf,
|
|
&crate::proto::node::Node::Symlink(SymlinkNode {
|
|
name: "doesntmatter".into(),
|
|
target: "/nix/store/somewhereelse".into(),
|
|
}),
|
|
// don't put anything in the stores, as we don't actually do any requests.
|
|
gen_blob_service(),
|
|
gen_directory_service(),
|
|
)
|
|
.expect("must succeed");
|
|
|
|
assert_eq!(buf, NAR_CONTENTS_SYMLINK.to_vec());
|
|
}
|
|
|
|
/// Make sure the NARRenderer fails if a referred blob doesn't exist.
|
|
#[tokio::test]
|
|
async fn single_file_missing_blob() {
|
|
let mut buf: Vec<u8> = vec![];
|
|
|
|
let e = tokio::task::spawn_blocking(move || {
|
|
write_nar(
|
|
&mut buf,
|
|
&crate::proto::node::Node::File(FileNode {
|
|
name: "doesntmatter".into(),
|
|
digest: HELLOWORLD_BLOB_DIGEST.clone().into(),
|
|
size: HELLOWORLD_BLOB_CONTENTS.len() as u32,
|
|
executable: false,
|
|
}),
|
|
// the blobservice is empty intentionally, to provoke the error.
|
|
gen_blob_service(),
|
|
gen_directory_service(),
|
|
)
|
|
})
|
|
.await
|
|
.unwrap()
|
|
.expect_err("must fail");
|
|
|
|
match e {
|
|
crate::nar::RenderError::NARWriterError(e) => {
|
|
assert_eq!(io::ErrorKind::NotFound, e.kind());
|
|
}
|
|
_ => panic!("unexpected error: {:?}", e),
|
|
}
|
|
}
|
|
|
|
/// Make sure the NAR Renderer fails if the returned blob meta has another size
|
|
/// than specified in the proto node.
|
|
#[tokio::test]
|
|
async fn single_file_wrong_blob_size() {
|
|
let blob_service = gen_blob_service();
|
|
|
|
// insert blob into the store
|
|
let mut writer = blob_service.open_write().await;
|
|
tokio::io::copy(
|
|
&mut io::Cursor::new(HELLOWORLD_BLOB_CONTENTS.to_vec()),
|
|
&mut writer,
|
|
)
|
|
.await
|
|
.unwrap();
|
|
assert_eq!(
|
|
HELLOWORLD_BLOB_DIGEST.clone(),
|
|
writer.close().await.unwrap()
|
|
);
|
|
|
|
let bs = blob_service.clone();
|
|
// Test with a root FileNode of a too big size
|
|
{
|
|
let mut buf: Vec<u8> = vec![];
|
|
|
|
let e = tokio::task::spawn_blocking(move || {
|
|
write_nar(
|
|
&mut buf,
|
|
&crate::proto::node::Node::File(FileNode {
|
|
name: "doesntmatter".into(),
|
|
digest: HELLOWORLD_BLOB_DIGEST.clone().into(),
|
|
size: 42, // <- note the wrong size here!
|
|
executable: false,
|
|
}),
|
|
bs,
|
|
gen_directory_service(),
|
|
)
|
|
})
|
|
.await
|
|
.unwrap()
|
|
.expect_err("must fail");
|
|
|
|
match e {
|
|
crate::nar::RenderError::NARWriterError(e) => {
|
|
assert_eq!(io::ErrorKind::UnexpectedEof, e.kind());
|
|
}
|
|
_ => panic!("unexpected error: {:?}", e),
|
|
}
|
|
}
|
|
|
|
let bs = blob_service.clone();
|
|
// Test with a root FileNode of a too small size
|
|
{
|
|
let mut buf: Vec<u8> = vec![];
|
|
|
|
let e = tokio::task::spawn_blocking(move || {
|
|
write_nar(
|
|
&mut buf,
|
|
&crate::proto::node::Node::File(FileNode {
|
|
name: "doesntmatter".into(),
|
|
digest: HELLOWORLD_BLOB_DIGEST.clone().into(),
|
|
size: 2, // <- note the wrong size here!
|
|
executable: false,
|
|
}),
|
|
bs,
|
|
gen_directory_service(),
|
|
)
|
|
.expect_err("must fail")
|
|
})
|
|
.await
|
|
.unwrap();
|
|
|
|
match e {
|
|
crate::nar::RenderError::NARWriterError(e) => {
|
|
assert_eq!(io::ErrorKind::InvalidInput, e.kind());
|
|
}
|
|
_ => panic!("unexpected error: {:?}", e),
|
|
}
|
|
}
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn single_file() {
|
|
let blob_service = gen_blob_service();
|
|
|
|
// insert blob into the store
|
|
let mut writer = blob_service.open_write().await;
|
|
tokio::io::copy(
|
|
&mut io::Cursor::new(HELLOWORLD_BLOB_CONTENTS.clone()),
|
|
&mut writer,
|
|
)
|
|
.await
|
|
.unwrap();
|
|
|
|
assert_eq!(
|
|
HELLOWORLD_BLOB_DIGEST.clone(),
|
|
writer.close().await.unwrap()
|
|
);
|
|
|
|
let mut buf: Vec<u8> = vec![];
|
|
|
|
let buf = tokio::task::spawn_blocking(move || {
|
|
write_nar(
|
|
&mut buf,
|
|
&crate::proto::node::Node::File(FileNode {
|
|
name: "doesntmatter".into(),
|
|
digest: HELLOWORLD_BLOB_DIGEST.clone().into(),
|
|
size: HELLOWORLD_BLOB_CONTENTS.len() as u32,
|
|
executable: false,
|
|
}),
|
|
blob_service,
|
|
gen_directory_service(),
|
|
)
|
|
.expect("must succeed");
|
|
|
|
buf
|
|
})
|
|
.await
|
|
.unwrap();
|
|
|
|
assert_eq!(buf, NAR_CONTENTS_HELLOWORLD.to_vec());
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_complicated() {
|
|
let blob_service = gen_blob_service();
|
|
let directory_service = gen_directory_service();
|
|
|
|
// put all data into the stores.
|
|
// insert blob into the store
|
|
let mut writer = blob_service.open_write().await;
|
|
tokio::io::copy(
|
|
&mut io::Cursor::new(EMPTY_BLOB_CONTENTS.clone()),
|
|
&mut writer,
|
|
)
|
|
.await
|
|
.unwrap();
|
|
assert_eq!(EMPTY_BLOB_DIGEST.clone(), writer.close().await.unwrap());
|
|
|
|
directory_service.put(DIRECTORY_WITH_KEEP.clone()).unwrap();
|
|
directory_service
|
|
.put(DIRECTORY_COMPLICATED.clone())
|
|
.unwrap();
|
|
|
|
let mut buf: Vec<u8> = vec![];
|
|
|
|
let bs = blob_service.clone();
|
|
let ds = directory_service.clone();
|
|
|
|
let buf = tokio::task::spawn_blocking(move || {
|
|
write_nar(
|
|
&mut buf,
|
|
&crate::proto::node::Node::Directory(DirectoryNode {
|
|
name: "doesntmatter".into(),
|
|
digest: DIRECTORY_COMPLICATED.digest().into(),
|
|
size: DIRECTORY_COMPLICATED.size(),
|
|
}),
|
|
bs,
|
|
ds,
|
|
)
|
|
.expect("must succeed");
|
|
buf
|
|
})
|
|
.await
|
|
.unwrap();
|
|
|
|
assert_eq!(buf, NAR_CONTENTS_COMPLICATED.to_vec());
|
|
|
|
// ensure calculate_nar does return the correct sha256 digest and sum.
|
|
let bs = blob_service.clone();
|
|
let ds = directory_service.clone();
|
|
let (nar_size, nar_digest) = tokio::task::spawn_blocking(move || {
|
|
calculate_size_and_sha256(
|
|
&crate::proto::node::Node::Directory(DirectoryNode {
|
|
name: "doesntmatter".into(),
|
|
digest: DIRECTORY_COMPLICATED.digest().into(),
|
|
size: DIRECTORY_COMPLICATED.size(),
|
|
}),
|
|
bs,
|
|
ds,
|
|
)
|
|
})
|
|
.await
|
|
.unwrap()
|
|
.expect("must succeed");
|
|
|
|
assert_eq!(NAR_CONTENTS_COMPLICATED.len() as u64, nar_size);
|
|
let d = Sha256::digest(NAR_CONTENTS_COMPLICATED.clone());
|
|
assert_eq!(d.as_slice(), nar_digest);
|
|
}
|