fix(tvix/castore): B3Digest::{to_vec -> as_slice}

Not a single call site actually makes use of the Vec.

Change-Id: I6cf31073c9f443d1702a21937a0c3938c2c643b8
Reviewed-on: https://cl.tvl.fyi/c/depot/+/9988
Tested-by: BuildkiteCI
Reviewed-by: flokli <flokli@flokli.de>
This commit is contained in:
edef 2023-11-07 12:09:37 +00:00
parent 9f5b1213f9
commit a8e7f4eadb
7 changed files with 17 additions and 19 deletions

View file

@ -61,7 +61,7 @@ impl BlobService for SledBlobService {
#[instrument(skip(self), fields(blob.digest=%digest))]
async fn has(&self, digest: &B3Digest) -> Result<bool, Error> {
match self.db.contains_key(digest.to_vec()) {
match self.db.contains_key(digest.as_slice()) {
Ok(has) => Ok(has),
Err(e) => Err(Error::StorageError(e.to_string())),
}
@ -69,7 +69,7 @@ impl BlobService for SledBlobService {
#[instrument(skip(self), fields(blob.digest=%digest))]
async fn open_read(&self, digest: &B3Digest) -> Result<Option<Box<dyn BlobReader>>, Error> {
match self.db.get(digest.to_vec()) {
match self.db.get(digest.as_slice()) {
Ok(None) => Ok(None),
Ok(Some(data)) => Ok(Some(Box::new(Cursor::new(data[..].to_vec())))),
Err(e) => Err(Error::StorageError(e.to_string())),
@ -158,12 +158,12 @@ impl BlobWriter for SledBlobWriter {
let digest: B3Digest = hasher.finalize().as_bytes().into();
// Only insert if the blob doesn't already exist.
if !self.db.contains_key(digest.to_vec()).map_err(|e| {
if !self.db.contains_key(digest.as_slice()).map_err(|e| {
Error::StorageError(format!("Unable to check if we have blob {}: {}", digest, e))
})? {
// put buf in there. This will move buf out.
self.db
.insert(digest.to_vec(), buf)
.insert(digest.as_slice(), buf)
.map_err(|e| Error::StorageError(format!("unable to insert blob: {}", e)))?;
}

View file

@ -15,9 +15,8 @@ pub enum Error {
pub const B3_LEN: usize = 32;
impl B3Digest {
// returns a copy of the inner [Vec<u8>].
pub fn to_vec(&self) -> Vec<u8> {
self.0.to_vec()
pub fn as_slice(&self) -> &[u8] {
&self.0[..]
}
}

View file

@ -64,7 +64,7 @@ impl DirectoryService for SledDirectoryService {
#[instrument(skip(self, digest), fields(directory.digest = %digest))]
async fn get(&self, digest: &B3Digest) -> Result<Option<proto::Directory>, Error> {
match self.db.get(digest.to_vec()) {
match self.db.get(digest.as_slice()) {
// The directory was not found, return
Ok(None) => Ok(None),
@ -114,7 +114,7 @@ impl DirectoryService for SledDirectoryService {
)));
}
// store it
let result = self.db.insert(digest.to_vec(), directory.encode_to_vec());
let result = self.db.insert(digest.as_slice(), directory.encode_to_vec());
if let Err(e) = result {
return Err(Error::StorageError(e.to_string()));
}

View file

@ -167,8 +167,7 @@ impl super::blob_service_server::BlobService for GRPCBlobServiceWrapper {
warn!("error closing stream: {}", e);
Status::internal("error closing stream")
})
.await?
.to_vec();
.await?;
Ok(Response::new(super::PutBlobResponse {
digest: digest.into(),

View file

@ -54,7 +54,7 @@ async fn put_read_stat() {
.expect("must succeed")
.into_inner();
assert_eq!(BLOB_A_DIGEST.to_vec(), put_resp.digest);
assert_eq!(BLOB_A_DIGEST.as_slice(), put_resp.digest);
// Stat for the digest of A.
// We currently don't ask for more granular chunking data, as we don't

View file

@ -74,7 +74,7 @@ async fn put_get() {
};
// the sent root_digest should match the calculated digest
assert_eq!(put_resp.root_digest, DIRECTORY_A.digest().to_vec());
assert_eq!(put_resp.root_digest, DIRECTORY_A.digest().as_slice());
// get it back
let items = get_directories(
@ -117,7 +117,7 @@ async fn put_get_multiple() {
.into_inner()
};
assert_eq!(DIRECTORY_B.digest().to_vec(), put_resp.root_digest);
assert_eq!(DIRECTORY_B.digest().as_slice(), put_resp.root_digest);
// now, request b, first in non-recursive mode.
let items = get_directories(
@ -167,7 +167,7 @@ async fn put_get_dedup() {
};
assert_eq!(
DIRECTORY_C.digest().to_vec(),
DIRECTORY_C.digest().as_slice(),
put_resp.into_inner().root_digest
);

View file

@ -199,8 +199,8 @@ async fn populate_directory_with_keep(
// upload empty blob
let mut bw = blob_service.open_write().await;
assert_eq!(
fixtures::EMPTY_BLOB_DIGEST.to_vec(),
bw.close().await.expect("must succeed closing").to_vec(),
fixtures::EMPTY_BLOB_DIGEST.as_slice(),
bw.close().await.expect("must succeed closing").as_slice(),
);
// upload directory
@ -282,8 +282,8 @@ async fn populate_directory_complicated(
// upload empty blob
let mut bw = blob_service.open_write().await;
assert_eq!(
fixtures::EMPTY_BLOB_DIGEST.to_vec(),
bw.close().await.expect("must succeed closing").to_vec(),
fixtures::EMPTY_BLOB_DIGEST.as_slice(),
bw.close().await.expect("must succeed closing").as_slice(),
);
// upload inner directory