chore(tvix/castore/protof): buf format

Change-Id: Idf11de78b0d6eca69fda34a89f2c57a00ed89ad5
Reviewed-on: https://cl.tvl.fyi/c/depot/+/10237
Autosubmit: flokli <flokli@flokli.de>
Tested-by: BuildkiteCI
Reviewed-by: Adam Joseph <adam@westernsemico.com>
This commit is contained in:
Florian Klink 2023-12-09 15:22:33 +02:00 committed by clbot
parent db3ef5255f
commit 459d9e106f
5 changed files with 97 additions and 97 deletions

View file

@ -24,8 +24,8 @@ const (
)
// A Directory can contain Directory, File or Symlink nodes.
// Each of these nodes have a name attribute, which is the basename in that directory
// and node type specific attributes.
// Each of these nodes have a name attribute, which is the basename in that
// directory and node type specific attributes.
// The name attribute:
// - MUST not contain slashes or null bytes
// - MUST not be '.' or '..'
@ -108,14 +108,14 @@ type DirectoryNode struct {
Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"`
// Number of child elements in the Directory referred to by `digest`.
// Calculated by summing up the numbers of `directories`, `files` and
// `symlinks`, and for each directory, its size field. Used for inode
// number calculation.
// `symlinks`, and for each directory, its size field. Used for inode number
// calculation.
// This field is precisely as verifiable as any other Merkle tree edge.
// Resolve `digest`, and you can compute it incrementally. Resolve the
// entire tree, and you can fully compute it from scratch.
// Resolve `digest`, and you can compute it incrementally. Resolve the entire
// tree, and you can fully compute it from scratch.
// A credulous implementation won't reject an excessive size, but this is
// harmless: you'll have some ordinals without nodes. Undersizing is
// obvious and easy to reject: you won't have an ordinal for some nodes.
// harmless: you'll have some ordinals without nodes. Undersizing is obvious
// and easy to reject: you won't have an ordinal for some nodes.
Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
}

View file

@ -8,8 +8,8 @@ package tvix.castore.v1;
option go_package = "code.tvl.fyi/tvix/castore-go;castorev1";
// A Directory can contain Directory, File or Symlink nodes.
// Each of these nodes have a name attribute, which is the basename in that directory
// and node type specific attributes.
// Each of these nodes have a name attribute, which is the basename in that
// directory and node type specific attributes.
// The name attribute:
// - MUST not contain slashes or null bytes
// - MUST not be '.' or '..'
@ -17,56 +17,55 @@ option go_package = "code.tvl.fyi/tvix/castore-go;castorev1";
// Elements in each list need to be lexicographically ordered by the name
// attribute.
message Directory {
repeated DirectoryNode directories = 1;
repeated FileNode files = 2;
repeated SymlinkNode symlinks = 3;
repeated DirectoryNode directories = 1;
repeated FileNode files = 2;
repeated SymlinkNode symlinks = 3;
}
// A DirectoryNode represents a directory in a Directory.
message DirectoryNode {
// The (base)name of the directory
bytes name = 1;
// The blake3 hash of a Directory message, serialized in protobuf canonical form.
bytes digest = 2;
// Number of child elements in the Directory referred to by `digest`.
// Calculated by summing up the numbers of `directories`, `files` and
// `symlinks`, and for each directory, its size field. Used for inode
// number calculation.
// This field is precisely as verifiable as any other Merkle tree edge.
// Resolve `digest`, and you can compute it incrementally. Resolve the
// entire tree, and you can fully compute it from scratch.
// A credulous implementation won't reject an excessive size, but this is
// harmless: you'll have some ordinals without nodes. Undersizing is
// obvious and easy to reject: you won't have an ordinal for some nodes.
uint64 size = 3;
// The (base)name of the directory
bytes name = 1;
// The blake3 hash of a Directory message, serialized in protobuf canonical form.
bytes digest = 2;
// Number of child elements in the Directory referred to by `digest`.
// Calculated by summing up the numbers of `directories`, `files` and
// `symlinks`, and for each directory, its size field. Used for inode number
// calculation.
// This field is precisely as verifiable as any other Merkle tree edge.
// Resolve `digest`, and you can compute it incrementally. Resolve the entire
// tree, and you can fully compute it from scratch.
// A credulous implementation won't reject an excessive size, but this is
// harmless: you'll have some ordinals without nodes. Undersizing is obvious
// and easy to reject: you won't have an ordinal for some nodes.
uint64 size = 3;
}
// A FileNode represents a regular or executable file in a Directory.
message FileNode {
// The (base)name of the file
bytes name = 1;
// The blake3 digest of the file contents
bytes digest = 2;
// The file content size
uint64 size = 3;
// Whether the file is executable
bool executable = 4;
// The (base)name of the file
bytes name = 1;
// The blake3 digest of the file contents
bytes digest = 2;
// The file content size
uint64 size = 3;
// Whether the file is executable
bool executable = 4;
}
// A SymlinkNode represents a symbolic link in a Directory.
message SymlinkNode {
// The (base)name of the symlink
bytes name = 1;
// The target of the symlink.
bytes target = 2;
// The (base)name of the symlink
bytes name = 1;
// The target of the symlink.
bytes target = 2;
}
// A Node is either a DirectoryNode, FileNode or SymlinkNode.
message Node {
oneof node {
DirectoryNode directory = 1;
FileNode file = 2;
SymlinkNode symlink = 3;
}
oneof node {
DirectoryNode directory = 1;
FileNode file = 2;
SymlinkNode symlink = 3;
}
}

View file

@ -24,6 +24,7 @@
buildPhase = ''
export HOME=$TMPDIR
buf lint
buf format -d --exit-code
buf generate
mkdir -p $out

View file

@ -11,75 +11,75 @@ option go_package = "code.tvl.fyi/tvix/castore-go;castorev1";
// return the BLAKE3 digest of it, and that's the identifier used to Read/Stat
// them too.
service BlobService {
// Stat can be used to check for the existence of a blob, as well as
// gathering more data about it, like more granular chunking information
// or baos.
// Server implementations are not required to provide more granular chunking
// information, especially if the digest specified in [StatBlobRequest] is
// already a chunk of a blob.
rpc Stat(StatBlobRequest) returns (StatBlobResponse);
// Stat can be used to check for the existence of a blob, as well as
// gathering more data about it, like more granular chunking information
// or baos.
// Server implementations are not required to provide more granular chunking
// information, especially if the digest specified in [StatBlobRequest] is
// already a chunk of a blob.
rpc Stat(StatBlobRequest) returns (StatBlobResponse);
// Read allows reading (all) data of a blob/chunk by the BLAKE3 digest of
// its contents.
// If the backend communicated more granular chunks in the `Stat` request,
// this can also be used to read chunks.
// This request returns a stream of BlobChunk, which is just a container for
// a stream of bytes.
// The server may decide on whatever chunking it may seem fit as a size for
// the individual BlobChunk sent in the response stream, this is mostly to
// keep individual messages at a manageable size.
rpc Read(ReadBlobRequest) returns (stream BlobChunk);
// Read allows reading (all) data of a blob/chunk by the BLAKE3 digest of
// its contents.
// If the backend communicated more granular chunks in the `Stat` request,
// this can also be used to read chunks.
// This request returns a stream of BlobChunk, which is just a container for
// a stream of bytes.
// The server may decide on whatever chunking it may seem fit as a size for
// the individual BlobChunk sent in the response stream, this is mostly to
// keep individual messages at a manageable size.
rpc Read(ReadBlobRequest) returns (stream BlobChunk);
// Put uploads a Blob, by reading a stream of bytes.
//
// The way the data is chunked up in individual BlobChunk messages sent in
// the stream has no effect on how the server ends up chunking blobs up, if
// it does at all.
rpc Put(stream BlobChunk) returns (PutBlobResponse);
// Put uploads a Blob, by reading a stream of bytes.
//
// The way the data is chunked up in individual BlobChunk messages sent in
// the stream has no effect on how the server ends up chunking blobs up, if
// it does at all.
rpc Put(stream BlobChunk) returns (PutBlobResponse);
}
message StatBlobRequest {
// The blake3 digest of the blob requested
bytes digest = 1;
// The blake3 digest of the blob requested
bytes digest = 1;
// Whether the server should reply with a list of more granular chunks.
bool send_chunks = 2;
// Whether the server should reply with a list of more granular chunks.
bool send_chunks = 2;
// Whether the server should reply with a bao.
bool send_bao = 3;
// Whether the server should reply with a bao.
bool send_bao = 3;
}
message StatBlobResponse {
// If `send_chunks` was set to true, this MAY contain a list of more
// granular chunks, which then may be read individually via the `Read`
// method.
repeated ChunkMeta chunks = 2;
// If `send_chunks` was set to true, this MAY contain a list of more
// granular chunks, which then may be read individually via the `Read`
// method.
repeated ChunkMeta chunks = 2;
message ChunkMeta {
// Digest of that specific chunk
bytes digest = 1;
message ChunkMeta {
// Digest of that specific chunk
bytes digest = 1;
// Length of that chunk, in bytes.
uint64 size = 2;
}
// Length of that chunk, in bytes.
uint64 size = 2;
}
// If `send_bao` was set to true, this MAY contain a outboard bao.
// The exact format and message types here will still be fleshed out.
bytes bao = 3;
// If `send_bao` was set to true, this MAY contain a outboard bao.
// The exact format and message types here will still be fleshed out.
bytes bao = 3;
}
message ReadBlobRequest {
// The blake3 digest of the blob or chunk requested
bytes digest = 1;
// The blake3 digest of the blob or chunk requested
bytes digest = 1;
}
// This represents some bytes of a blob.
// Blobs are sent in smaller chunks to keep message sizes manageable.
message BlobChunk {
bytes data = 1;
bytes data = 1;
}
message PutBlobResponse {
// The blake3 digest of the data that was sent.
bytes digest = 1;
// The blake3 digest of the data that was sent.
bytes digest = 1;
}

View file

@ -30,10 +30,10 @@ service DirectoryService {
message GetDirectoryRequest {
oneof by_what {
// The blake3 hash of the (root) Directory message, serialized in
// protobuf canonical form.
// Keep in mind this can be a subtree of another root.
bytes digest = 1;
// The blake3 hash of the (root) Directory message, serialized in
// protobuf canonical form.
// Keep in mind this can be a subtree of another root.
bytes digest = 1;
}
// If set to true, recursively resolve all child Directory messages.