2023-06-08 22:00:37 +02:00
|
|
|
use crate::nar::calculate_size_and_sha256;
|
2023-06-09 09:28:02 +02:00
|
|
|
use crate::nar::write_nar;
|
2023-01-29 20:48:23 +01:00
|
|
|
use crate::proto::DirectoryNode;
|
|
|
|
use crate::proto::FileNode;
|
|
|
|
use crate::proto::SymlinkNode;
|
2023-02-27 09:12:09 +01:00
|
|
|
use crate::tests::fixtures::*;
|
2023-02-27 08:59:45 +01:00
|
|
|
use crate::tests::utils::*;
|
2023-06-08 22:00:37 +02:00
|
|
|
use sha2::{Digest, Sha256};
|
refactor(tvix/store): remove ChunkService
Whether chunking is involved or not, is an implementation detail of each
Blobstore. Consumers of a whole blob shouldn't need to worry about that.
It currently is not visible in the gRPC interface either. It
shouldn't bleed into everything.
Let the BlobService trait provide `open_read` and `open_write` methods,
which return handles providing io::Read or io::Write, and leave the
details up to the implementation.
This means, our custom BlobReader module can go away, and all the
chunking bits in there, too.
In the future, we might still want to add more chunking-aware syncing,
but as a syncing strategy some stores can expose, not as a fundamental
protocol component.
This currently needs "SyncReadIntoAsyncRead", taken and vendored in from
https://github.com/tokio-rs/tokio/pull/5669.
It provides a AsyncRead for a sync Read, which is necessary to connect
our (sync) BlobReader interface to a GRPC server implementation.
As an alternative, we could also make the BlobReader itself async, and
let consumers of the trait (EvalIO) deal with the async-ness, but this
is less of a change for now.
In terms of vendoring, I initially tried to move our tokio crate to
these commits, but ended up in version incompatibilities, so let's
vendor it in for now.
Change-Id: I5969ebbc4c0e1ceece47981be3b9e7cfb3f59ad0
Reviewed-on: https://cl.tvl.fyi/c/depot/+/8551
Tested-by: BuildkiteCI
Reviewed-by: tazjin <tazjin@tvl.su>
2023-05-11 14:49:01 +02:00
|
|
|
use std::io;
|
2023-01-29 20:48:23 +01:00
|
|
|
|
2023-02-13 16:44:26 +01:00
|
|
|
#[test]
|
2023-02-16 17:38:10 +01:00
|
|
|
fn single_symlink() {
|
2023-01-29 20:48:23 +01:00
|
|
|
let mut buf: Vec<u8> = vec![];
|
|
|
|
|
2023-06-09 09:28:02 +02:00
|
|
|
write_nar(
|
2023-06-08 22:00:37 +02:00
|
|
|
&mut buf,
|
|
|
|
&crate::proto::node::Node::Symlink(SymlinkNode {
|
|
|
|
name: "doesntmatter".to_string(),
|
|
|
|
target: "/nix/store/somewhereelse".to_string(),
|
|
|
|
}),
|
|
|
|
// don't put anything in the stores, as we don't actually do any requests.
|
|
|
|
&gen_blob_service(),
|
2023-06-09 11:26:34 +02:00
|
|
|
&gen_directory_service(),
|
2023-06-08 22:00:37 +02:00
|
|
|
)
|
|
|
|
.expect("must succeed");
|
2023-01-29 20:48:23 +01:00
|
|
|
|
2023-02-27 10:49:42 +01:00
|
|
|
assert_eq!(buf, NAR_CONTENTS_SYMLINK.to_vec());
|
2023-01-29 20:48:23 +01:00
|
|
|
}
|
|
|
|
|
2023-06-08 21:42:28 +02:00
|
|
|
/// Make sure the NARRenderer fails if a referred blob doesn't exist.
|
2023-02-16 17:11:09 +01:00
|
|
|
#[test]
|
|
|
|
fn single_file_missing_blob() {
|
|
|
|
let mut buf: Vec<u8> = vec![];
|
|
|
|
|
2023-06-09 09:28:02 +02:00
|
|
|
let e = write_nar(
|
2023-06-08 22:00:37 +02:00
|
|
|
&mut buf,
|
|
|
|
&crate::proto::node::Node::File(FileNode {
|
|
|
|
name: "doesntmatter".to_string(),
|
|
|
|
digest: HELLOWORLD_BLOB_DIGEST.to_vec(),
|
|
|
|
size: HELLOWORLD_BLOB_CONTENTS.len() as u32,
|
|
|
|
executable: false,
|
|
|
|
}),
|
|
|
|
// the blobservice is empty intentionally, to provoke the error.
|
|
|
|
&gen_blob_service(),
|
2023-06-09 11:26:34 +02:00
|
|
|
&gen_directory_service(),
|
2023-06-08 22:00:37 +02:00
|
|
|
)
|
|
|
|
.expect_err("must fail");
|
2023-02-16 17:11:09 +01:00
|
|
|
|
refactor(tvix/store): remove ChunkService
Whether chunking is involved or not, is an implementation detail of each
Blobstore. Consumers of a whole blob shouldn't need to worry about that.
It currently is not visible in the gRPC interface either. It
shouldn't bleed into everything.
Let the BlobService trait provide `open_read` and `open_write` methods,
which return handles providing io::Read or io::Write, and leave the
details up to the implementation.
This means, our custom BlobReader module can go away, and all the
chunking bits in there, too.
In the future, we might still want to add more chunking-aware syncing,
but as a syncing strategy some stores can expose, not as a fundamental
protocol component.
This currently needs "SyncReadIntoAsyncRead", taken and vendored in from
https://github.com/tokio-rs/tokio/pull/5669.
It provides a AsyncRead for a sync Read, which is necessary to connect
our (sync) BlobReader interface to a GRPC server implementation.
As an alternative, we could also make the BlobReader itself async, and
let consumers of the trait (EvalIO) deal with the async-ness, but this
is less of a change for now.
In terms of vendoring, I initially tried to move our tokio crate to
these commits, but ended up in version incompatibilities, so let's
vendor it in for now.
Change-Id: I5969ebbc4c0e1ceece47981be3b9e7cfb3f59ad0
Reviewed-on: https://cl.tvl.fyi/c/depot/+/8551
Tested-by: BuildkiteCI
Reviewed-by: tazjin <tazjin@tvl.su>
2023-05-11 14:49:01 +02:00
|
|
|
match e {
|
|
|
|
crate::nar::RenderError::NARWriterError(e) => {
|
|
|
|
assert_eq!(io::ErrorKind::NotFound, e.kind());
|
|
|
|
}
|
|
|
|
_ => panic!("unexpected error: {:?}", e),
|
2023-02-16 17:11:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Make sure the NAR Renderer fails if the returned blob meta has another size
|
|
|
|
/// than specified in the proto node.
|
|
|
|
#[test]
|
|
|
|
fn single_file_wrong_blob_size() {
|
2023-03-01 18:55:51 +01:00
|
|
|
let blob_service = gen_blob_service();
|
2023-02-16 17:11:09 +01:00
|
|
|
|
refactor(tvix/store): remove ChunkService
Whether chunking is involved or not, is an implementation detail of each
Blobstore. Consumers of a whole blob shouldn't need to worry about that.
It currently is not visible in the gRPC interface either. It
shouldn't bleed into everything.
Let the BlobService trait provide `open_read` and `open_write` methods,
which return handles providing io::Read or io::Write, and leave the
details up to the implementation.
This means, our custom BlobReader module can go away, and all the
chunking bits in there, too.
In the future, we might still want to add more chunking-aware syncing,
but as a syncing strategy some stores can expose, not as a fundamental
protocol component.
This currently needs "SyncReadIntoAsyncRead", taken and vendored in from
https://github.com/tokio-rs/tokio/pull/5669.
It provides a AsyncRead for a sync Read, which is necessary to connect
our (sync) BlobReader interface to a GRPC server implementation.
As an alternative, we could also make the BlobReader itself async, and
let consumers of the trait (EvalIO) deal with the async-ness, but this
is less of a change for now.
In terms of vendoring, I initially tried to move our tokio crate to
these commits, but ended up in version incompatibilities, so let's
vendor it in for now.
Change-Id: I5969ebbc4c0e1ceece47981be3b9e7cfb3f59ad0
Reviewed-on: https://cl.tvl.fyi/c/depot/+/8551
Tested-by: BuildkiteCI
Reviewed-by: tazjin <tazjin@tvl.su>
2023-05-11 14:49:01 +02:00
|
|
|
// insert blob into the store
|
|
|
|
let mut writer = blob_service.open_write().unwrap();
|
|
|
|
io::copy(
|
|
|
|
&mut io::Cursor::new(HELLOWORLD_BLOB_CONTENTS.to_vec()),
|
|
|
|
&mut writer,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2023-05-22 13:42:57 +02:00
|
|
|
assert_eq!(HELLOWORLD_BLOB_DIGEST.clone(), writer.close().unwrap());
|
refactor(tvix/store): remove ChunkService
Whether chunking is involved or not, is an implementation detail of each
Blobstore. Consumers of a whole blob shouldn't need to worry about that.
It currently is not visible in the gRPC interface either. It
shouldn't bleed into everything.
Let the BlobService trait provide `open_read` and `open_write` methods,
which return handles providing io::Read or io::Write, and leave the
details up to the implementation.
This means, our custom BlobReader module can go away, and all the
chunking bits in there, too.
In the future, we might still want to add more chunking-aware syncing,
but as a syncing strategy some stores can expose, not as a fundamental
protocol component.
This currently needs "SyncReadIntoAsyncRead", taken and vendored in from
https://github.com/tokio-rs/tokio/pull/5669.
It provides a AsyncRead for a sync Read, which is necessary to connect
our (sync) BlobReader interface to a GRPC server implementation.
As an alternative, we could also make the BlobReader itself async, and
let consumers of the trait (EvalIO) deal with the async-ness, but this
is less of a change for now.
In terms of vendoring, I initially tried to move our tokio crate to
these commits, but ended up in version incompatibilities, so let's
vendor it in for now.
Change-Id: I5969ebbc4c0e1ceece47981be3b9e7cfb3f59ad0
Reviewed-on: https://cl.tvl.fyi/c/depot/+/8551
Tested-by: BuildkiteCI
Reviewed-by: tazjin <tazjin@tvl.su>
2023-05-11 14:49:01 +02:00
|
|
|
|
|
|
|
// Test with a root FileNode of a too big size
|
|
|
|
{
|
|
|
|
let mut buf: Vec<u8> = vec![];
|
2023-06-08 22:00:37 +02:00
|
|
|
|
2023-06-09 09:28:02 +02:00
|
|
|
let e = write_nar(
|
2023-06-08 22:00:37 +02:00
|
|
|
&mut buf,
|
|
|
|
&crate::proto::node::Node::File(FileNode {
|
|
|
|
name: "doesntmatter".to_string(),
|
|
|
|
digest: HELLOWORLD_BLOB_DIGEST.to_vec(),
|
|
|
|
size: 42, // <- note the wrong size here!
|
|
|
|
executable: false,
|
|
|
|
}),
|
|
|
|
&blob_service,
|
2023-06-09 11:26:34 +02:00
|
|
|
&gen_directory_service(),
|
2023-06-08 22:00:37 +02:00
|
|
|
)
|
|
|
|
.expect_err("must fail");
|
refactor(tvix/store): remove ChunkService
Whether chunking is involved or not, is an implementation detail of each
Blobstore. Consumers of a whole blob shouldn't need to worry about that.
It currently is not visible in the gRPC interface either. It
shouldn't bleed into everything.
Let the BlobService trait provide `open_read` and `open_write` methods,
which return handles providing io::Read or io::Write, and leave the
details up to the implementation.
This means, our custom BlobReader module can go away, and all the
chunking bits in there, too.
In the future, we might still want to add more chunking-aware syncing,
but as a syncing strategy some stores can expose, not as a fundamental
protocol component.
This currently needs "SyncReadIntoAsyncRead", taken and vendored in from
https://github.com/tokio-rs/tokio/pull/5669.
It provides a AsyncRead for a sync Read, which is necessary to connect
our (sync) BlobReader interface to a GRPC server implementation.
As an alternative, we could also make the BlobReader itself async, and
let consumers of the trait (EvalIO) deal with the async-ness, but this
is less of a change for now.
In terms of vendoring, I initially tried to move our tokio crate to
these commits, but ended up in version incompatibilities, so let's
vendor it in for now.
Change-Id: I5969ebbc4c0e1ceece47981be3b9e7cfb3f59ad0
Reviewed-on: https://cl.tvl.fyi/c/depot/+/8551
Tested-by: BuildkiteCI
Reviewed-by: tazjin <tazjin@tvl.su>
2023-05-11 14:49:01 +02:00
|
|
|
|
|
|
|
match e {
|
|
|
|
crate::nar::RenderError::NARWriterError(e) => {
|
|
|
|
assert_eq!(io::ErrorKind::UnexpectedEof, e.kind());
|
|
|
|
}
|
|
|
|
_ => panic!("unexpected error: {:?}", e),
|
|
|
|
}
|
|
|
|
}
|
2023-02-16 17:11:09 +01:00
|
|
|
|
refactor(tvix/store): remove ChunkService
Whether chunking is involved or not, is an implementation detail of each
Blobstore. Consumers of a whole blob shouldn't need to worry about that.
It currently is not visible in the gRPC interface either. It
shouldn't bleed into everything.
Let the BlobService trait provide `open_read` and `open_write` methods,
which return handles providing io::Read or io::Write, and leave the
details up to the implementation.
This means, our custom BlobReader module can go away, and all the
chunking bits in there, too.
In the future, we might still want to add more chunking-aware syncing,
but as a syncing strategy some stores can expose, not as a fundamental
protocol component.
This currently needs "SyncReadIntoAsyncRead", taken and vendored in from
https://github.com/tokio-rs/tokio/pull/5669.
It provides a AsyncRead for a sync Read, which is necessary to connect
our (sync) BlobReader interface to a GRPC server implementation.
As an alternative, we could also make the BlobReader itself async, and
let consumers of the trait (EvalIO) deal with the async-ness, but this
is less of a change for now.
In terms of vendoring, I initially tried to move our tokio crate to
these commits, but ended up in version incompatibilities, so let's
vendor it in for now.
Change-Id: I5969ebbc4c0e1ceece47981be3b9e7cfb3f59ad0
Reviewed-on: https://cl.tvl.fyi/c/depot/+/8551
Tested-by: BuildkiteCI
Reviewed-by: tazjin <tazjin@tvl.su>
2023-05-11 14:49:01 +02:00
|
|
|
// Test with a root FileNode of a too small size
|
|
|
|
{
|
|
|
|
let mut buf: Vec<u8> = vec![];
|
2023-06-08 22:00:37 +02:00
|
|
|
|
2023-06-09 09:28:02 +02:00
|
|
|
let e = write_nar(
|
2023-06-08 22:00:37 +02:00
|
|
|
&mut buf,
|
|
|
|
&crate::proto::node::Node::File(FileNode {
|
|
|
|
name: "doesntmatter".to_string(),
|
|
|
|
digest: HELLOWORLD_BLOB_DIGEST.to_vec(),
|
|
|
|
size: 2, // <- note the wrong size here!
|
|
|
|
executable: false,
|
|
|
|
}),
|
|
|
|
&blob_service,
|
2023-06-09 11:26:34 +02:00
|
|
|
&gen_directory_service(),
|
2023-06-08 22:00:37 +02:00
|
|
|
)
|
|
|
|
.expect_err("must fail");
|
refactor(tvix/store): remove ChunkService
Whether chunking is involved or not, is an implementation detail of each
Blobstore. Consumers of a whole blob shouldn't need to worry about that.
It currently is not visible in the gRPC interface either. It
shouldn't bleed into everything.
Let the BlobService trait provide `open_read` and `open_write` methods,
which return handles providing io::Read or io::Write, and leave the
details up to the implementation.
This means, our custom BlobReader module can go away, and all the
chunking bits in there, too.
In the future, we might still want to add more chunking-aware syncing,
but as a syncing strategy some stores can expose, not as a fundamental
protocol component.
This currently needs "SyncReadIntoAsyncRead", taken and vendored in from
https://github.com/tokio-rs/tokio/pull/5669.
It provides a AsyncRead for a sync Read, which is necessary to connect
our (sync) BlobReader interface to a GRPC server implementation.
As an alternative, we could also make the BlobReader itself async, and
let consumers of the trait (EvalIO) deal with the async-ness, but this
is less of a change for now.
In terms of vendoring, I initially tried to move our tokio crate to
these commits, but ended up in version incompatibilities, so let's
vendor it in for now.
Change-Id: I5969ebbc4c0e1ceece47981be3b9e7cfb3f59ad0
Reviewed-on: https://cl.tvl.fyi/c/depot/+/8551
Tested-by: BuildkiteCI
Reviewed-by: tazjin <tazjin@tvl.su>
2023-05-11 14:49:01 +02:00
|
|
|
|
|
|
|
match e {
|
|
|
|
crate::nar::RenderError::NARWriterError(e) => {
|
|
|
|
assert_eq!(io::ErrorKind::InvalidInput, e.kind());
|
|
|
|
}
|
|
|
|
_ => panic!("unexpected error: {:?}", e),
|
|
|
|
}
|
2023-02-16 17:11:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-13 16:44:26 +01:00
|
|
|
#[test]
|
2023-02-16 17:38:10 +01:00
|
|
|
fn single_file() {
|
2023-03-01 18:55:51 +01:00
|
|
|
let blob_service = gen_blob_service();
|
2023-02-13 16:44:26 +01:00
|
|
|
|
refactor(tvix/store): remove ChunkService
Whether chunking is involved or not, is an implementation detail of each
Blobstore. Consumers of a whole blob shouldn't need to worry about that.
It currently is not visible in the gRPC interface either. It
shouldn't bleed into everything.
Let the BlobService trait provide `open_read` and `open_write` methods,
which return handles providing io::Read or io::Write, and leave the
details up to the implementation.
This means, our custom BlobReader module can go away, and all the
chunking bits in there, too.
In the future, we might still want to add more chunking-aware syncing,
but as a syncing strategy some stores can expose, not as a fundamental
protocol component.
This currently needs "SyncReadIntoAsyncRead", taken and vendored in from
https://github.com/tokio-rs/tokio/pull/5669.
It provides a AsyncRead for a sync Read, which is necessary to connect
our (sync) BlobReader interface to a GRPC server implementation.
As an alternative, we could also make the BlobReader itself async, and
let consumers of the trait (EvalIO) deal with the async-ness, but this
is less of a change for now.
In terms of vendoring, I initially tried to move our tokio crate to
these commits, but ended up in version incompatibilities, so let's
vendor it in for now.
Change-Id: I5969ebbc4c0e1ceece47981be3b9e7cfb3f59ad0
Reviewed-on: https://cl.tvl.fyi/c/depot/+/8551
Tested-by: BuildkiteCI
Reviewed-by: tazjin <tazjin@tvl.su>
2023-05-11 14:49:01 +02:00
|
|
|
// insert blob into the store
|
|
|
|
let mut writer = blob_service.open_write().unwrap();
|
|
|
|
io::copy(
|
|
|
|
&mut io::Cursor::new(HELLOWORLD_BLOB_CONTENTS.to_vec()),
|
|
|
|
&mut writer,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2023-05-22 13:42:57 +02:00
|
|
|
assert_eq!(HELLOWORLD_BLOB_DIGEST.clone(), writer.close().unwrap());
|
2023-02-13 16:44:26 +01:00
|
|
|
|
2023-01-29 20:48:23 +01:00
|
|
|
let mut buf: Vec<u8> = vec![];
|
2023-02-13 16:44:26 +01:00
|
|
|
|
2023-06-09 09:28:02 +02:00
|
|
|
write_nar(
|
2023-06-08 22:00:37 +02:00
|
|
|
&mut buf,
|
|
|
|
&crate::proto::node::Node::File(FileNode {
|
|
|
|
name: "doesntmatter".to_string(),
|
|
|
|
digest: HELLOWORLD_BLOB_DIGEST.to_vec(),
|
|
|
|
size: HELLOWORLD_BLOB_CONTENTS.len() as u32,
|
|
|
|
executable: false,
|
|
|
|
}),
|
|
|
|
&blob_service,
|
2023-06-09 11:26:34 +02:00
|
|
|
&gen_directory_service(),
|
2023-06-08 22:00:37 +02:00
|
|
|
)
|
|
|
|
.expect("must succeed");
|
2023-01-29 20:48:23 +01:00
|
|
|
|
2023-02-27 10:49:42 +01:00
|
|
|
assert_eq!(buf, NAR_CONTENTS_HELLOWORLD.to_vec());
|
2023-01-29 20:48:23 +01:00
|
|
|
}
|
|
|
|
|
2023-02-13 16:44:26 +01:00
|
|
|
#[test]
|
2023-02-16 17:38:10 +01:00
|
|
|
fn test_complicated() {
|
2023-03-01 18:55:51 +01:00
|
|
|
let blob_service = gen_blob_service();
|
|
|
|
let directory_service = gen_directory_service();
|
2023-02-13 16:44:26 +01:00
|
|
|
|
|
|
|
// put all data into the stores.
|
refactor(tvix/store): remove ChunkService
Whether chunking is involved or not, is an implementation detail of each
Blobstore. Consumers of a whole blob shouldn't need to worry about that.
It currently is not visible in the gRPC interface either. It
shouldn't bleed into everything.
Let the BlobService trait provide `open_read` and `open_write` methods,
which return handles providing io::Read or io::Write, and leave the
details up to the implementation.
This means, our custom BlobReader module can go away, and all the
chunking bits in there, too.
In the future, we might still want to add more chunking-aware syncing,
but as a syncing strategy some stores can expose, not as a fundamental
protocol component.
This currently needs "SyncReadIntoAsyncRead", taken and vendored in from
https://github.com/tokio-rs/tokio/pull/5669.
It provides a AsyncRead for a sync Read, which is necessary to connect
our (sync) BlobReader interface to a GRPC server implementation.
As an alternative, we could also make the BlobReader itself async, and
let consumers of the trait (EvalIO) deal with the async-ness, but this
is less of a change for now.
In terms of vendoring, I initially tried to move our tokio crate to
these commits, but ended up in version incompatibilities, so let's
vendor it in for now.
Change-Id: I5969ebbc4c0e1ceece47981be3b9e7cfb3f59ad0
Reviewed-on: https://cl.tvl.fyi/c/depot/+/8551
Tested-by: BuildkiteCI
Reviewed-by: tazjin <tazjin@tvl.su>
2023-05-11 14:49:01 +02:00
|
|
|
// insert blob into the store
|
|
|
|
let mut writer = blob_service.open_write().unwrap();
|
|
|
|
io::copy(
|
|
|
|
&mut io::Cursor::new(EMPTY_BLOB_CONTENTS.to_vec()),
|
|
|
|
&mut writer,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2023-05-22 13:42:57 +02:00
|
|
|
assert_eq!(EMPTY_BLOB_DIGEST.clone(), writer.close().unwrap());
|
2023-02-13 16:44:26 +01:00
|
|
|
|
|
|
|
directory_service.put(DIRECTORY_WITH_KEEP.clone()).unwrap();
|
|
|
|
directory_service
|
|
|
|
.put(DIRECTORY_COMPLICATED.clone())
|
|
|
|
.unwrap();
|
|
|
|
|
2023-01-29 20:48:23 +01:00
|
|
|
let mut buf: Vec<u8> = vec![];
|
2023-02-13 16:44:26 +01:00
|
|
|
|
2023-06-09 09:28:02 +02:00
|
|
|
write_nar(
|
2023-06-08 22:00:37 +02:00
|
|
|
&mut buf,
|
|
|
|
&crate::proto::node::Node::Directory(DirectoryNode {
|
|
|
|
name: "doesntmatter".to_string(),
|
|
|
|
digest: DIRECTORY_COMPLICATED.digest().to_vec(),
|
|
|
|
size: DIRECTORY_COMPLICATED.size(),
|
|
|
|
}),
|
|
|
|
&blob_service,
|
2023-06-09 11:26:34 +02:00
|
|
|
&directory_service,
|
2023-06-08 22:00:37 +02:00
|
|
|
)
|
|
|
|
.expect("must succeed");
|
2023-01-29 20:48:23 +01:00
|
|
|
|
2023-02-27 10:49:42 +01:00
|
|
|
assert_eq!(buf, NAR_CONTENTS_COMPLICATED.to_vec());
|
2023-06-08 22:00:37 +02:00
|
|
|
|
|
|
|
// ensure calculate_nar does return the correct sha256 digest and sum.
|
|
|
|
let (nar_size, nar_digest) = calculate_size_and_sha256(
|
|
|
|
&crate::proto::node::Node::Directory(DirectoryNode {
|
|
|
|
name: "doesntmatter".to_string(),
|
|
|
|
digest: DIRECTORY_COMPLICATED.digest().to_vec(),
|
|
|
|
size: DIRECTORY_COMPLICATED.size(),
|
|
|
|
}),
|
|
|
|
&blob_service,
|
2023-06-09 11:26:34 +02:00
|
|
|
&directory_service,
|
2023-06-08 22:00:37 +02:00
|
|
|
)
|
|
|
|
.expect("must succeed");
|
|
|
|
|
|
|
|
assert_eq!(NAR_CONTENTS_COMPLICATED.len() as u64, nar_size);
|
|
|
|
let d = Sha256::digest(NAR_CONTENTS_COMPLICATED.clone());
|
|
|
|
assert_eq!(d.as_slice(), nar_digest);
|
2023-01-29 20:48:23 +01:00
|
|
|
}
|