refactor(tvix/castore/blobservice): use io::Result in trait
For all these calls, the caller has enough context about what it did, so it should be fine to use io::Result here. We pretty much only constructed crate::Error::StorageError before anyways, so this conveys *more* information. Change-Id: I5cabb3769c9c2314bab926d34dda748fda9d3ccc Reviewed-on: https://cl.tvl.fyi/c/depot/+/10328 Reviewed-by: raitobezarius <tvl@lahfa.xyz> Tested-by: BuildkiteCI Autosubmit: flokli <flokli@flokli.de>
This commit is contained in:
parent
91456c3520
commit
30d82efa77
7 changed files with 52 additions and 56 deletions
|
@ -1,7 +1,6 @@
|
||||||
use super::{naive_seeker::NaiveSeeker, BlobReader, BlobService, BlobWriter};
|
use super::{naive_seeker::NaiveSeeker, BlobReader, BlobService, BlobWriter};
|
||||||
use crate::{proto, B3Digest};
|
use crate::{proto, B3Digest};
|
||||||
use futures::sink::SinkExt;
|
use futures::sink::SinkExt;
|
||||||
use futures::TryFutureExt;
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::VecDeque,
|
collections::VecDeque,
|
||||||
io::{self},
|
io::{self},
|
||||||
|
@ -39,7 +38,7 @@ impl GRPCBlobService {
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl BlobService for GRPCBlobService {
|
impl BlobService for GRPCBlobService {
|
||||||
#[instrument(skip(self, digest), fields(blob.digest=%digest))]
|
#[instrument(skip(self, digest), fields(blob.digest=%digest))]
|
||||||
async fn has(&self, digest: &B3Digest) -> Result<bool, crate::Error> {
|
async fn has(&self, digest: &B3Digest) -> io::Result<bool> {
|
||||||
let mut grpc_client = self.grpc_client.clone();
|
let mut grpc_client = self.grpc_client.clone();
|
||||||
let resp = grpc_client
|
let resp = grpc_client
|
||||||
.stat(proto::StatBlobRequest {
|
.stat(proto::StatBlobRequest {
|
||||||
|
@ -51,16 +50,13 @@ impl BlobService for GRPCBlobService {
|
||||||
match resp {
|
match resp {
|
||||||
Ok(_blob_meta) => Ok(true),
|
Ok(_blob_meta) => Ok(true),
|
||||||
Err(e) if e.code() == Code::NotFound => Ok(false),
|
Err(e) if e.code() == Code::NotFound => Ok(false),
|
||||||
Err(e) => Err(crate::Error::StorageError(e.to_string())),
|
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// On success, this returns a Ok(Some(io::Read)), which can be used to read
|
// On success, this returns a Ok(Some(io::Read)), which can be used to read
|
||||||
// the contents of the Blob, identified by the digest.
|
// the contents of the Blob, identified by the digest.
|
||||||
async fn open_read(
|
async fn open_read(&self, digest: &B3Digest) -> io::Result<Option<Box<dyn BlobReader>>> {
|
||||||
&self,
|
|
||||||
digest: &B3Digest,
|
|
||||||
) -> Result<Option<Box<dyn BlobReader>>, crate::Error> {
|
|
||||||
// Get a stream of [proto::BlobChunk], or return an error if the blob
|
// Get a stream of [proto::BlobChunk], or return an error if the blob
|
||||||
// doesn't exist.
|
// doesn't exist.
|
||||||
let resp = self
|
let resp = self
|
||||||
|
@ -90,7 +86,7 @@ impl BlobService for GRPCBlobService {
|
||||||
Ok(Some(Box::new(NaiveSeeker::new(data_reader))))
|
Ok(Some(Box::new(NaiveSeeker::new(data_reader))))
|
||||||
}
|
}
|
||||||
Err(e) if e.code() == Code::NotFound => Ok(None),
|
Err(e) if e.code() == Code::NotFound => Ok(None),
|
||||||
Err(e) => Err(crate::Error::StorageError(e.to_string())),
|
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,25 +137,20 @@ pub struct GRPCBlobWriter<W: tokio::io::AsyncWrite> {
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<W: tokio::io::AsyncWrite + Send + Sync + Unpin + 'static> BlobWriter for GRPCBlobWriter<W> {
|
impl<W: tokio::io::AsyncWrite + Send + Sync + Unpin + 'static> BlobWriter for GRPCBlobWriter<W> {
|
||||||
async fn close(&mut self) -> Result<B3Digest, crate::Error> {
|
async fn close(&mut self) -> io::Result<B3Digest> {
|
||||||
if self.task_and_writer.is_none() {
|
if self.task_and_writer.is_none() {
|
||||||
// if we're already closed, return the b3 digest, which must exist.
|
// if we're already closed, return the b3 digest, which must exist.
|
||||||
// If it doesn't, we already closed and failed once, and didn't handle the error.
|
// If it doesn't, we already closed and failed once, and didn't handle the error.
|
||||||
match &self.digest {
|
match &self.digest {
|
||||||
Some(digest) => Ok(digest.clone()),
|
Some(digest) => Ok(digest.clone()),
|
||||||
None => Err(crate::Error::StorageError(
|
None => Err(io::Error::new(io::ErrorKind::BrokenPipe, "already closed")),
|
||||||
"previously closed with error".to_string(),
|
|
||||||
)),
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let (task, mut writer) = self.task_and_writer.take().unwrap();
|
let (task, mut writer) = self.task_and_writer.take().unwrap();
|
||||||
|
|
||||||
// invoke shutdown, so the inner writer closes its internal tx side of
|
// invoke shutdown, so the inner writer closes its internal tx side of
|
||||||
// the channel.
|
// the channel.
|
||||||
writer
|
writer.shutdown().await?;
|
||||||
.shutdown()
|
|
||||||
.map_err(|e| crate::Error::StorageError(e.to_string()))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// block on the RPC call to return.
|
// block on the RPC call to return.
|
||||||
// This ensures all chunks are sent out, and have been received by the
|
// This ensures all chunks are sent out, and have been received by the
|
||||||
|
@ -168,15 +159,17 @@ impl<W: tokio::io::AsyncWrite + Send + Sync + Unpin + 'static> BlobWriter for GR
|
||||||
match task.await? {
|
match task.await? {
|
||||||
Ok(resp) => {
|
Ok(resp) => {
|
||||||
// return the digest from the response, and store it in self.digest for subsequent closes.
|
// return the digest from the response, and store it in self.digest for subsequent closes.
|
||||||
|
let digest_len = resp.digest.len();
|
||||||
let digest: B3Digest = resp.digest.try_into().map_err(|_| {
|
let digest: B3Digest = resp.digest.try_into().map_err(|_| {
|
||||||
crate::Error::StorageError(
|
io::Error::new(
|
||||||
"invalid root digest length in response".to_string(),
|
io::ErrorKind::Other,
|
||||||
|
format!("invalid root digest length {} in response", digest_len),
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
self.digest = Some(digest.clone());
|
self.digest = Some(digest.clone());
|
||||||
Ok(digest)
|
Ok(digest)
|
||||||
}
|
}
|
||||||
Err(e) => Err(crate::Error::StorageError(e.to_string())),
|
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e.to_string())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,7 @@ use tonic::async_trait;
|
||||||
use tracing::instrument;
|
use tracing::instrument;
|
||||||
|
|
||||||
use super::{BlobReader, BlobService, BlobWriter};
|
use super::{BlobReader, BlobService, BlobWriter};
|
||||||
use crate::{B3Digest, Error};
|
use crate::B3Digest;
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
pub struct MemoryBlobService {
|
pub struct MemoryBlobService {
|
||||||
|
@ -18,13 +18,13 @@ pub struct MemoryBlobService {
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl BlobService for MemoryBlobService {
|
impl BlobService for MemoryBlobService {
|
||||||
#[instrument(skip_all, ret, err, fields(blob.digest=%digest))]
|
#[instrument(skip_all, ret, err, fields(blob.digest=%digest))]
|
||||||
async fn has(&self, digest: &B3Digest) -> Result<bool, Error> {
|
async fn has(&self, digest: &B3Digest) -> io::Result<bool> {
|
||||||
let db = self.db.read().unwrap();
|
let db = self.db.read().unwrap();
|
||||||
Ok(db.contains_key(digest))
|
Ok(db.contains_key(digest))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(skip_all, err, fields(blob.digest=%digest))]
|
#[instrument(skip_all, err, fields(blob.digest=%digest))]
|
||||||
async fn open_read(&self, digest: &B3Digest) -> Result<Option<Box<dyn BlobReader>>, Error> {
|
async fn open_read(&self, digest: &B3Digest) -> io::Result<Option<Box<dyn BlobReader>>> {
|
||||||
let db = self.db.read().unwrap();
|
let db = self.db.read().unwrap();
|
||||||
|
|
||||||
match db.get(digest).map(|x| Cursor::new(x.clone())) {
|
match db.get(digest).map(|x| Cursor::new(x.clone())) {
|
||||||
|
@ -100,13 +100,11 @@ impl tokio::io::AsyncWrite for MemoryBlobWriter {
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl BlobWriter for MemoryBlobWriter {
|
impl BlobWriter for MemoryBlobWriter {
|
||||||
async fn close(&mut self) -> Result<B3Digest, Error> {
|
async fn close(&mut self) -> io::Result<B3Digest> {
|
||||||
if self.writers.is_none() {
|
if self.writers.is_none() {
|
||||||
match &self.digest {
|
match &self.digest {
|
||||||
Some(digest) => Ok(digest.clone()),
|
Some(digest) => Ok(digest.clone()),
|
||||||
None => Err(crate::Error::StorageError(
|
None => Err(io::Error::new(io::ErrorKind::BrokenPipe, "already closed")),
|
||||||
"previously closed with error".to_string(),
|
|
||||||
)),
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let (buf, hasher) = self.writers.take().unwrap();
|
let (buf, hasher) = self.writers.take().unwrap();
|
||||||
|
@ -115,13 +113,17 @@ impl BlobWriter for MemoryBlobWriter {
|
||||||
let digest: B3Digest = hasher.finalize().as_bytes().into();
|
let digest: B3Digest = hasher.finalize().as_bytes().into();
|
||||||
|
|
||||||
// Only insert if the blob doesn't already exist.
|
// Only insert if the blob doesn't already exist.
|
||||||
let db = self.db.read()?;
|
let db = self.db.read().map_err(|e| {
|
||||||
|
io::Error::new(io::ErrorKind::BrokenPipe, format!("RwLock poisoned: {}", e))
|
||||||
|
})?;
|
||||||
if !db.contains_key(&digest) {
|
if !db.contains_key(&digest) {
|
||||||
// drop the read lock, so we can open for writing.
|
// drop the read lock, so we can open for writing.
|
||||||
drop(db);
|
drop(db);
|
||||||
|
|
||||||
// open the database for writing.
|
// open the database for writing.
|
||||||
let mut db = self.db.write()?;
|
let mut db = self.db.write().map_err(|e| {
|
||||||
|
io::Error::new(io::ErrorKind::BrokenPipe, format!("RwLock poisoned: {}", e))
|
||||||
|
})?;
|
||||||
|
|
||||||
// and put buf in there. This will move buf out.
|
// and put buf in there. This will move buf out.
|
||||||
db.insert(digest.clone(), buf);
|
db.insert(digest.clone(), buf);
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::io;
|
use std::io;
|
||||||
use tonic::async_trait;
|
use tonic::async_trait;
|
||||||
|
|
||||||
use crate::{B3Digest, Error};
|
use crate::B3Digest;
|
||||||
|
|
||||||
mod from_addr;
|
mod from_addr;
|
||||||
mod grpc;
|
mod grpc;
|
||||||
|
@ -25,10 +25,10 @@ pub use self::sled::SledBlobService;
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait BlobService: Send + Sync {
|
pub trait BlobService: Send + Sync {
|
||||||
/// Check if the service has the blob, by its content hash.
|
/// Check if the service has the blob, by its content hash.
|
||||||
async fn has(&self, digest: &B3Digest) -> Result<bool, Error>;
|
async fn has(&self, digest: &B3Digest) -> io::Result<bool>;
|
||||||
|
|
||||||
/// Request a blob from the store, by its content hash.
|
/// Request a blob from the store, by its content hash.
|
||||||
async fn open_read(&self, digest: &B3Digest) -> Result<Option<Box<dyn BlobReader>>, Error>;
|
async fn open_read(&self, digest: &B3Digest) -> io::Result<Option<Box<dyn BlobReader>>>;
|
||||||
|
|
||||||
/// Insert a new blob into the store. Returns a [BlobWriter], which
|
/// Insert a new blob into the store. Returns a [BlobWriter], which
|
||||||
/// implements [io::Write] and a [BlobWriter::close].
|
/// implements [io::Write] and a [BlobWriter::close].
|
||||||
|
@ -43,7 +43,7 @@ pub trait BlobWriter: tokio::io::AsyncWrite + Send + Sync + Unpin + 'static {
|
||||||
/// contents written.
|
/// contents written.
|
||||||
///
|
///
|
||||||
/// Closing a already-closed BlobWriter is a no-op.
|
/// Closing a already-closed BlobWriter is a no-op.
|
||||||
async fn close(&mut self) -> Result<B3Digest, Error>;
|
async fn close(&mut self) -> io::Result<B3Digest>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A [tokio::io::AsyncRead] that also allows seeking.
|
/// A [tokio::io::AsyncRead] that also allows seeking.
|
||||||
|
|
|
@ -34,19 +34,19 @@ impl SledBlobService {
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl BlobService for SledBlobService {
|
impl BlobService for SledBlobService {
|
||||||
#[instrument(skip(self), fields(blob.digest=%digest))]
|
#[instrument(skip(self), fields(blob.digest=%digest))]
|
||||||
async fn has(&self, digest: &B3Digest) -> Result<bool, Error> {
|
async fn has(&self, digest: &B3Digest) -> io::Result<bool> {
|
||||||
match self.db.contains_key(digest.as_slice()) {
|
match self.db.contains_key(digest.as_slice()) {
|
||||||
Ok(has) => Ok(has),
|
Ok(has) => Ok(has),
|
||||||
Err(e) => Err(Error::StorageError(e.to_string())),
|
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e.to_string())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(skip(self), fields(blob.digest=%digest))]
|
#[instrument(skip(self), fields(blob.digest=%digest))]
|
||||||
async fn open_read(&self, digest: &B3Digest) -> Result<Option<Box<dyn BlobReader>>, Error> {
|
async fn open_read(&self, digest: &B3Digest) -> io::Result<Option<Box<dyn BlobReader>>> {
|
||||||
match self.db.get(digest.as_slice()) {
|
match self.db.get(digest.as_slice()) {
|
||||||
Ok(None) => Ok(None),
|
Ok(None) => Ok(None),
|
||||||
Ok(Some(data)) => Ok(Some(Box::new(Cursor::new(data[..].to_vec())))),
|
Ok(Some(data)) => Ok(Some(Box::new(Cursor::new(data[..].to_vec())))),
|
||||||
Err(e) => Err(Error::StorageError(e.to_string())),
|
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e.to_string())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,12 +118,13 @@ impl tokio::io::AsyncWrite for SledBlobWriter {
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl BlobWriter for SledBlobWriter {
|
impl BlobWriter for SledBlobWriter {
|
||||||
async fn close(&mut self) -> Result<B3Digest, Error> {
|
async fn close(&mut self) -> io::Result<B3Digest> {
|
||||||
if self.writers.is_none() {
|
if self.writers.is_none() {
|
||||||
match &self.digest {
|
match &self.digest {
|
||||||
Some(digest) => Ok(digest.clone()),
|
Some(digest) => Ok(digest.clone()),
|
||||||
None => Err(crate::Error::StorageError(
|
None => Err(io::Error::new(
|
||||||
"previously closed with error".to_string(),
|
io::ErrorKind::NotConnected,
|
||||||
|
"already closed",
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -117,7 +117,10 @@ async fn process_entry<'a>(
|
||||||
return Err(Error::UnableToRead(entry.path().to_path_buf(), e));
|
return Err(Error::UnableToRead(entry.path().to_path_buf(), e));
|
||||||
};
|
};
|
||||||
|
|
||||||
let digest = writer.close().await?;
|
let digest = writer
|
||||||
|
.close()
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::UnableToRead(entry.path().to_path_buf(), e))?;
|
||||||
|
|
||||||
return Ok(Node::File(FileNode {
|
return Ok(Node::File(FileNode {
|
||||||
name: entry.file_name().as_bytes().to_vec().into(),
|
name: entry.file_name().as_bytes().to_vec().into(),
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use data_encoding::BASE64;
|
use data_encoding::BASE64;
|
||||||
use tvix_castore::{B3Digest, Error};
|
use tvix_castore::B3Digest;
|
||||||
|
|
||||||
mod import;
|
mod import;
|
||||||
mod renderer;
|
mod renderer;
|
||||||
|
@ -11,7 +11,7 @@ pub use renderer::write_nar;
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
pub enum RenderError {
|
pub enum RenderError {
|
||||||
#[error("failure talking to a backing store client: {0}")]
|
#[error("failure talking to a backing store client: {0}")]
|
||||||
StoreError(Error),
|
StoreError(#[source] std::io::Error),
|
||||||
|
|
||||||
#[error("unable to find directory {}, referred from {:?}", .0, .1)]
|
#[error("unable to find directory {}, referred from {:?}", .0, .1)]
|
||||||
DirectoryNotFound(B3Digest, bytes::Bytes),
|
DirectoryNotFound(B3Digest, bytes::Bytes),
|
||||||
|
|
|
@ -10,12 +10,10 @@ use std::{
|
||||||
};
|
};
|
||||||
use tokio::io::{self, AsyncWrite, BufReader};
|
use tokio::io::{self, AsyncWrite, BufReader};
|
||||||
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
use tokio_util::compat::{TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt};
|
||||||
use tracing::warn;
|
|
||||||
use tvix_castore::{
|
use tvix_castore::{
|
||||||
blobservice::BlobService,
|
blobservice::BlobService,
|
||||||
directoryservice::DirectoryService,
|
directoryservice::DirectoryService,
|
||||||
proto::{self as castorepb, NamedNode},
|
proto::{self as castorepb, NamedNode},
|
||||||
Error,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Invoke [write_nar], and return the size and sha256 digest of the produced
|
/// Invoke [write_nar], and return the size and sha256 digest of the produced
|
||||||
|
@ -110,14 +108,11 @@ async fn walk_node(
|
||||||
.map_err(RenderError::NARWriterError)?;
|
.map_err(RenderError::NARWriterError)?;
|
||||||
}
|
}
|
||||||
castorepb::node::Node::File(proto_file_node) => {
|
castorepb::node::Node::File(proto_file_node) => {
|
||||||
let digest = proto_file_node.digest.clone().try_into().map_err(|_e| {
|
let digest_len = proto_file_node.digest.len();
|
||||||
warn!(
|
let digest = proto_file_node.digest.clone().try_into().map_err(|_| {
|
||||||
file_node = ?proto_file_node,
|
RenderError::StoreError(io::Error::new(
|
||||||
"invalid digest length in file node",
|
io::ErrorKind::Other,
|
||||||
);
|
format!("invalid digest len {} in file node", digest_len),
|
||||||
|
|
||||||
RenderError::StoreError(Error::StorageError(
|
|
||||||
"invalid digest len in file node".to_string(),
|
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
@ -143,13 +138,15 @@ async fn walk_node(
|
||||||
.map_err(RenderError::NARWriterError)?;
|
.map_err(RenderError::NARWriterError)?;
|
||||||
}
|
}
|
||||||
castorepb::node::Node::Directory(proto_directory_node) => {
|
castorepb::node::Node::Directory(proto_directory_node) => {
|
||||||
|
let digest_len = proto_directory_node.digest.len();
|
||||||
let digest = proto_directory_node
|
let digest = proto_directory_node
|
||||||
.digest
|
.digest
|
||||||
.clone()
|
.clone()
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_e| {
|
.map_err(|_| {
|
||||||
RenderError::StoreError(Error::StorageError(
|
RenderError::StoreError(io::Error::new(
|
||||||
"invalid digest len in directory node".to_string(),
|
io::ErrorKind::InvalidData,
|
||||||
|
format!("invalid digest len {} in directory node", digest_len),
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
@ -157,7 +154,7 @@ async fn walk_node(
|
||||||
match directory_service
|
match directory_service
|
||||||
.get(&digest)
|
.get(&digest)
|
||||||
.await
|
.await
|
||||||
.map_err(RenderError::StoreError)?
|
.map_err(|e| RenderError::StoreError(e.into()))?
|
||||||
{
|
{
|
||||||
// if it's None, that's an error!
|
// if it's None, that's an error!
|
||||||
None => {
|
None => {
|
||||||
|
|
Loading…
Reference in a new issue