refactor(tvix/store/protos): have Export accept root node

We don't need the full PathInfo message, only the root node.

Change-Id: I667045ed766875dfbf8ac126a50b02baa2df67a4
Reviewed-on: https://cl.tvl.fyi/c/depot/+/9604
Tested-by: BuildkiteCI
Reviewed-by: edef <edef@edef.eu>
This commit is contained in:
Florian Klink 2023-10-10 21:24:36 +02:00 committed by flokli
parent fe963ae0a3
commit d94749ac22
2 changed files with 34 additions and 43 deletions

View file

@ -12,12 +12,12 @@ import (
type DirectoryLookupFn func([]byte) (*castorev1pb.Directory, error) type DirectoryLookupFn func([]byte) (*castorev1pb.Directory, error)
type BlobLookupFn func([]byte) (io.ReadCloser, error) type BlobLookupFn func([]byte) (io.ReadCloser, error)
// Export will traverse a given PathInfo structure, and write the contents // Export will traverse a given root node, and write the contents in NAR format
// in NAR format to the passed Writer. // to the passed Writer.
// It uses directoryLookupFn and blobLookupFn to resolve references. // It uses directoryLookupFn and blobLookupFn to resolve references.
func Export( func Export(
w io.Writer, w io.Writer,
pathInfo *PathInfo, rootNode *castorev1pb.Node,
directoryLookupFn DirectoryLookupFn, directoryLookupFn DirectoryLookupFn,
blobLookupFn BlobLookupFn, blobLookupFn BlobLookupFn,
) error { ) error {
@ -43,18 +43,17 @@ func Export(
// peek at the pathInfo root and assemble the root node and write to writer // peek at the pathInfo root and assemble the root node and write to writer
// in the case of a regular file, we retrieve and write the contents, close and exit // in the case of a regular file, we retrieve and write the contents, close and exit
// in the case of a symlink, we write the symlink, close and exit // in the case of a symlink, we write the symlink, close and exit
switch v := (pathInfo.GetNode().GetNode()).(type) { if fileNode := rootNode.GetFile(); fileNode != nil {
case *castorev1pb.Node_File:
rootHeader.Type = nar.TypeRegular rootHeader.Type = nar.TypeRegular
rootHeader.Size = int64(v.File.GetSize()) rootHeader.Size = int64(fileNode.GetSize())
rootHeader.Executable = v.File.GetExecutable() rootHeader.Executable = fileNode.GetExecutable()
err := narWriter.WriteHeader(rootHeader) err := narWriter.WriteHeader(rootHeader)
if err != nil { if err != nil {
return fmt.Errorf("unable to write root header: %w", err) return fmt.Errorf("unable to write root header: %w", err)
} }
// if it's a regular file, retrieve and write the contents // if it's a regular file, retrieve and write the contents
blobReader, err := blobLookupFn(v.File.GetDigest()) blobReader, err := blobLookupFn(fileNode.GetDigest())
if err != nil { if err != nil {
return fmt.Errorf("unable to lookup blob: %w", err) return fmt.Errorf("unable to lookup blob: %w", err)
} }
@ -76,10 +75,9 @@ func Export(
} }
return nil return nil
} else if symlinkNode := rootNode.GetSymlink(); symlinkNode != nil {
case *castorev1pb.Node_Symlink:
rootHeader.Type = nar.TypeSymlink rootHeader.Type = nar.TypeSymlink
rootHeader.LinkTarget = string(v.Symlink.GetTarget()) rootHeader.LinkTarget = string(symlinkNode.GetTarget())
err := narWriter.WriteHeader(rootHeader) err := narWriter.WriteHeader(rootHeader)
if err != nil { if err != nil {
return fmt.Errorf("unable to write root header: %w", err) return fmt.Errorf("unable to write root header: %w", err)
@ -89,11 +87,9 @@ func Export(
if err != nil { if err != nil {
return fmt.Errorf("unable to close nar reader: %w", err) return fmt.Errorf("unable to close nar reader: %w", err)
} }
} else if directoryNode := rootNode.GetDirectory(); directoryNode != nil {
return nil
case *castorev1pb.Node_Directory:
// We have a directory at the root, look it up and put in on the stack. // We have a directory at the root, look it up and put in on the stack.
directory, err := directoryLookupFn(v.Directory.Digest) directory, err := directoryLookupFn(directoryNode.GetDigest())
if err != nil { if err != nil {
return fmt.Errorf("unable to lookup directory: %w", err) return fmt.Errorf("unable to lookup directory: %w", err)
} }
@ -108,6 +104,8 @@ func Export(
if err != nil { if err != nil {
return fmt.Errorf("error writing header: %w", err) return fmt.Errorf("error writing header: %w", err)
} }
} else {
panic("invalid type") // unreachable
} }
// as long as the stack is not empty, we keep running. // as long as the stack is not empty, we keep running.

View file

@ -30,21 +30,18 @@ func mustBlobDigest(r io.Reader) []byte {
} }
func TestSymlink(t *testing.T) { func TestSymlink(t *testing.T) {
pathInfo := &storev1pb.PathInfo{ node := &castorev1pb.Node{
Node: &castorev1pb.Node_Symlink{
Node: &castorev1pb.Node{ Symlink: &castorev1pb.SymlinkNode{
Node: &castorev1pb.Node_Symlink{ Name: []byte("doesntmatter"),
Symlink: &castorev1pb.SymlinkNode{ Target: []byte("/nix/store/somewhereelse"),
Name: []byte("doesntmatter"),
Target: []byte("/nix/store/somewhereelse"),
},
}, },
}, },
} }
var buf bytes.Buffer var buf bytes.Buffer
err := storev1pb.Export(&buf, pathInfo, func([]byte) (*castorev1pb.Directory, error) { err := storev1pb.Export(&buf, node, func([]byte) (*castorev1pb.Directory, error) {
panic("no directories expected") panic("no directories expected")
}, func([]byte) (io.ReadCloser, error) { }, func([]byte) (io.ReadCloser, error) {
panic("no files expected") panic("no files expected")
@ -70,22 +67,20 @@ func TestRegular(t *testing.T) {
0x65, 0x2b, 0x65, 0x2b,
} }
pathInfo := &storev1pb.PathInfo{ node := &castorev1pb.Node{
Node: &castorev1pb.Node{ Node: &castorev1pb.Node_File{
Node: &castorev1pb.Node_File{ File: &castorev1pb.FileNode{
File: &castorev1pb.FileNode{ Name: []byte("doesntmatter"),
Name: []byte("doesntmatter"), Digest: BLAKE3_DIGEST_0X01,
Digest: BLAKE3_DIGEST_0X01, Size: 1,
Size: 1, Executable: false,
Executable: false,
},
}, },
}, },
} }
var buf bytes.Buffer var buf bytes.Buffer
err := storev1pb.Export(&buf, pathInfo, func([]byte) (*castorev1pb.Directory, error) { err := storev1pb.Export(&buf, node, func([]byte) (*castorev1pb.Directory, error) {
panic("no directories expected") panic("no directories expected")
}, func(blobRef []byte) (io.ReadCloser, error) { }, func(blobRef []byte) (io.ReadCloser, error) {
if !bytes.Equal(blobRef, BLAKE3_DIGEST_0X01) { if !bytes.Equal(blobRef, BLAKE3_DIGEST_0X01) {
@ -115,21 +110,19 @@ func TestEmptyDirectory(t *testing.T) {
} }
emptyDirectoryDigest := mustDirectoryDigest(emptyDirectory) emptyDirectoryDigest := mustDirectoryDigest(emptyDirectory)
pathInfo := &storev1pb.PathInfo{ node := &castorev1pb.Node{
Node: &castorev1pb.Node{ Node: &castorev1pb.Node_Directory{
Node: &castorev1pb.Node_Directory{ Directory: &castorev1pb.DirectoryNode{
Directory: &castorev1pb.DirectoryNode{ Name: []byte("doesntmatter"),
Name: []byte("doesntmatter"), Digest: emptyDirectoryDigest,
Digest: emptyDirectoryDigest, Size: 0,
Size: 0,
},
}, },
}, },
} }
var buf bytes.Buffer var buf bytes.Buffer
err := storev1pb.Export(&buf, pathInfo, func(directoryRef []byte) (*castorev1pb.Directory, error) { err := storev1pb.Export(&buf, node, func(directoryRef []byte) (*castorev1pb.Directory, error) {
if !bytes.Equal(directoryRef, emptyDirectoryDigest) { if !bytes.Equal(directoryRef, emptyDirectoryDigest) {
panic("unexpected directoryRef") panic("unexpected directoryRef")
} }