c4ee942b1c
Stat exposes metadata about a given blob, such as more granular chunking, baos. It implicitly allows checking for existence too, as asking this for a non-existing Blob will return a Status::not_found grpc error. The previous version returned a Status::not_found error on the Get request too, but there was no chance to prevent the server from starting to stream (except sending an immediate cancellation). Being able to check whether something exists in a BlobStore helps to prevent from uploading in first place. The granular chunking bits are an optional optimization - if the BlobStore implements no more granular chunking, the Stat response can simply contain a single chunk. Read returns a stream of BlobChunk, which is just a stream of bytes - not necessarily using the chunking that's returned in the reply of a Stat() call. It can be used to read blobs or chunks. Change-Id: I4b6030ef184ace5484c84ca273b49d710433731d Reviewed-on: https://cl.tvl.fyi/c/depot/+/7652 Reviewed-by: tazjin <tazjin@tvl.su> Tested-by: BuildkiteCI
73 lines
2.4 KiB
Protocol Buffer
73 lines
2.4 KiB
Protocol Buffer
// SPDX-License-Identifier: MIT
|
|
// Copyright © 2022 The Tvix Authors
|
|
syntax = "proto3";
|
|
|
|
package tvix.store.v1;
|
|
|
|
option go_package = "code.tvl.fyi/tvix/store/protos;storev1";
|
|
|
|
service BlobService {
|
|
// Stat exposes metadata about a given blob,
|
|
// such as more granular chunking, baos.
|
|
// It implicitly allows checking for existence too, as asking this for a
|
|
// non-existing Blob will return a Status::not_found grpc error.
|
|
// If there's no more granular chunking available, the response will simply
|
|
// contain a single chunk.
|
|
rpc Stat(StatBlobRequest) returns (BlobMeta);
|
|
|
|
// Read returns a stream of BlobChunk, which is just a stream of bytes - not necessarily
|
|
// using the chunking that's returned in the reply of a Stat() call.
|
|
rpc Read(ReadBlobRequest) returns (stream BlobChunk);
|
|
|
|
// Put uploads a Blob, by reading a stream of bytes.
|
|
rpc Put(stream BlobChunk) returns (PutBlobResponse);
|
|
}
|
|
|
|
message StatBlobRequest {
|
|
// The blake3 digest of the blob requested
|
|
bytes digest = 1;
|
|
|
|
// Whether to include the chunks field
|
|
bool include_chunks = 2;
|
|
// Whether to include the inline_bao field, containing an (outboard) bao.
|
|
// The [bao](https://github.com/oconnor663/bao/blob/master/docs/spec.md)
|
|
// can be used to validate chunks end up hashing to the same root digest.
|
|
// These only really matter when only downloading parts of a blob. Some
|
|
// caution needs to be applied when validating chunks - the bao works with
|
|
// 1K leaf nodes, which might not align with the chunk sizes - this might
|
|
// imply a neighboring chunk might need to be (partially) fetched to
|
|
// validate the hash.
|
|
bool include_bao = 3;
|
|
}
|
|
|
|
// BlobMeta provides more granular chunking information for the requested blob,
|
|
// and baos.
|
|
message BlobMeta {
|
|
// This provides a list of chunks.
|
|
// Concatenating their contents would produce a blob with the digest that
|
|
// was specified in the request.
|
|
repeated ChunkMeta chunks = 1;
|
|
|
|
message ChunkMeta {
|
|
bytes digest = 1;
|
|
uint32 size = 2;
|
|
}
|
|
|
|
bytes inline_bao = 2;
|
|
}
|
|
|
|
message ReadBlobRequest {
|
|
// The blake3 digest of the blob requested
|
|
bytes digest = 1;
|
|
}
|
|
|
|
// This represents some bytes of a blob.
|
|
// Blobs are sent in smaller chunks to keep message sizes manageable.
|
|
message BlobChunk {
|
|
bytes data = 1;
|
|
}
|
|
|
|
message PutBlobResponse {
|
|
// The blake3 digest of the data that was sent.
|
|
bytes digest = 1;
|
|
}
|