refactor(tvix/nar-bridge): have Export return root node
… and nar size / sha256 digest. Instead of producing sparse PathInfo messages when NARs are sent to nar-bridge, the nar-bridge http server now keeps a lookup table (narsha256) -> (rootNode, narSize) This removes a whole bunch of noise, because we don't need to keep sparse fields around. A convenience function `GenPathInfo(rootNode *castorev1pb.Node, narInfo *narinfo.NarInfo)` is added, which is used to produce PathInfo messages, either when receiving a NAR file over http and uploading it to a remote PathInfoService, or to synthesize the PathInfoMessage to return to the client, if nar-bridge is acting as a PathInfoService for a remove Nix HTTP Binary cache. Change-Id: Ibba1ab6238a050816c4fab29cb21ae88877d8613 Reviewed-on: https://cl.tvl.fyi/c/depot/+/9651 Tested-by: BuildkiteCI Reviewed-by: Brian McGee <brian@bmcgee.ie>
This commit is contained in:
parent
ceb1674e9f
commit
98c17147c6
10 changed files with 211 additions and 297 deletions
|
@ -29,16 +29,18 @@ func renderNar(
|
|||
log *log.Entry,
|
||||
directoryServiceClient castorev1pb.DirectoryServiceClient,
|
||||
blobServiceClient castorev1pb.BlobServiceClient,
|
||||
narHashToPathInfoMu *sync.Mutex,
|
||||
narHashToPathInfo map[string]*storev1pb.PathInfo,
|
||||
narHashDbMu *sync.Mutex,
|
||||
narHashDb map[string]*narData,
|
||||
w io.Writer,
|
||||
narHash *nixhash.Hash,
|
||||
headOnly bool,
|
||||
) error {
|
||||
// look in the lookup table
|
||||
narHashToPathInfoMu.Lock()
|
||||
pathInfo, found := narHashToPathInfo[narHash.SRIString()]
|
||||
narHashToPathInfoMu.Unlock()
|
||||
narHashDbMu.Lock()
|
||||
narData, found := narHashDb[narHash.SRIString()]
|
||||
narHashDbMu.Unlock()
|
||||
|
||||
rootNode := narData.rootNode
|
||||
|
||||
// if we didn't find anything, return 404.
|
||||
if !found {
|
||||
|
@ -53,7 +55,7 @@ func renderNar(
|
|||
directories := make(map[string]*castorev1pb.Directory)
|
||||
|
||||
// If the root node is a directory, ask the directory service for all directories
|
||||
if pathInfoDirectory := pathInfo.GetNode().GetDirectory(); pathInfoDirectory != nil {
|
||||
if pathInfoDirectory := rootNode.GetDirectory(); pathInfoDirectory != nil {
|
||||
rootDirectoryDigest := pathInfoDirectory.GetDigest()
|
||||
log = log.WithField("root_directory", base64.StdEncoding.EncodeToString(rootDirectoryDigest))
|
||||
|
||||
|
@ -95,7 +97,7 @@ func renderNar(
|
|||
// render the NAR file
|
||||
err := storev1pb.Export(
|
||||
w,
|
||||
pathInfo.Node,
|
||||
rootNode,
|
||||
func(directoryDigest []byte) (*castorev1pb.Directory, error) {
|
||||
log.WithField("directory", base64.StdEncoding.EncodeToString(directoryDigest)).Debug("Get directory")
|
||||
directoryRefStr := hex.EncodeToString(directoryDigest)
|
||||
|
@ -177,7 +179,7 @@ func registerNarGet(s *Server) {
|
|||
log := log.WithField("narhash_url", narHash.SRIString())
|
||||
|
||||
// TODO: inline more of that function here?
|
||||
err = renderNar(ctx, log, s.directoryServiceClient, s.blobServiceClient, &s.narHashToPathInfoMu, s.narHashToPathInfo, w, narHash, isHead)
|
||||
err = renderNar(ctx, log, s.directoryServiceClient, s.blobServiceClient, &s.narDbMu, s.narDb, w, narHash, isHead)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
|
|
|
@ -39,7 +39,7 @@ func registerNarPut(s *Server) {
|
|||
directoriesUploader := importer.NewDirectoriesUploader(ctx, s.directoryServiceClient)
|
||||
defer directoriesUploader.Done() //nolint:errcheck
|
||||
|
||||
pathInfo, err := importer.Import(
|
||||
rootNode, narSize, narSha256, err := importer.Import(
|
||||
ctx,
|
||||
// buffer the body by 10MiB
|
||||
bufio.NewReaderSize(r.Body, 10*1024*1024),
|
||||
|
@ -80,7 +80,7 @@ func registerNarPut(s *Server) {
|
|||
// This check ensures the server-side came up with the same root hash.
|
||||
|
||||
if directoriesPutResponse != nil {
|
||||
rootDigestPathInfo := pathInfo.GetNode().GetDirectory().GetDigest()
|
||||
rootDigestPathInfo := rootNode.GetDirectory().GetDigest()
|
||||
rootDigestDirectoriesPutResponse := directoriesPutResponse.GetRootDigest()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
|
@ -102,17 +102,18 @@ func registerNarPut(s *Server) {
|
|||
|
||||
// Compare the nar hash specified in the URL with the one that has been
|
||||
// calculated while processing the NAR file
|
||||
piNarHash, err := nixhash.ParseNixBase32(
|
||||
"sha256:" + nixbase32.EncodeToString(pathInfo.GetNarinfo().NarSha256),
|
||||
// TODO: bump go-nix and remove the parsing
|
||||
narHash, err := nixhash.ParseNixBase32(
|
||||
"sha256:" + nixbase32.EncodeToString(narSha256),
|
||||
)
|
||||
if err != nil {
|
||||
panic("must parse nixbase32")
|
||||
}
|
||||
|
||||
if !bytes.Equal(narHashFromUrl.Digest(), piNarHash.Digest()) {
|
||||
if !bytes.Equal(narHashFromUrl.Digest(), narHash.Digest()) {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"narhash_received_sha256": piNarHash.SRIString(),
|
||||
"narsize": pathInfo.GetNarinfo().GetNarSize(),
|
||||
"narhash_received_sha256": narHash.SRIString(),
|
||||
"narsize": narSize,
|
||||
})
|
||||
log.Error("received bytes don't match narhash from URL")
|
||||
|
||||
|
@ -123,7 +124,6 @@ func registerNarPut(s *Server) {
|
|||
}
|
||||
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
// Insert the partial pathinfo structs into our lookup map,
|
||||
|
@ -131,9 +131,12 @@ func registerNarPut(s *Server) {
|
|||
// The same might exist already, but it'll have the same contents (so
|
||||
// replacing will be a no-op), except maybe the root node Name field value, which
|
||||
// is safe to ignore (as not part of the NAR).
|
||||
s.narHashToPathInfoMu.Lock()
|
||||
s.narHashToPathInfo[piNarHash.SRIString()] = pathInfo
|
||||
s.narHashToPathInfoMu.Unlock()
|
||||
s.narDbMu.Lock()
|
||||
s.narDb[narHash.SRIString()] = &narData{
|
||||
rootNode: rootNode,
|
||||
narSize: narSize,
|
||||
}
|
||||
s.narDbMu.Unlock()
|
||||
|
||||
// Done!
|
||||
})
|
||||
|
|
|
@ -29,7 +29,7 @@ func renderNarinfo(
|
|||
log *log.Entry,
|
||||
pathInfoServiceClient storev1pb.PathInfoServiceClient,
|
||||
narHashToPathInfoMu *sync.Mutex,
|
||||
narHashToPathInfo map[string]*storev1pb.PathInfo,
|
||||
narHashToPathInfo map[string]*narData,
|
||||
outputHash []byte,
|
||||
w io.Writer,
|
||||
headOnly bool,
|
||||
|
@ -51,6 +51,7 @@ func renderNarinfo(
|
|||
return fmt.Errorf("unable to get pathinfo: %w", err)
|
||||
}
|
||||
|
||||
// TODO: don't parse
|
||||
narHash, err := nixhash.ParseNixBase32("sha256:" + nixbase32.EncodeToString(pathInfo.GetNarinfo().GetNarSha256()))
|
||||
if err != nil {
|
||||
// TODO: return proper error
|
||||
|
@ -59,7 +60,10 @@ func renderNarinfo(
|
|||
|
||||
// add things to the lookup table, in case the same process didn't handle the NAR hash yet.
|
||||
narHashToPathInfoMu.Lock()
|
||||
narHashToPathInfo[narHash.SRIString()] = pathInfo
|
||||
narHashToPathInfo[narHash.SRIString()] = &narData{
|
||||
rootNode: pathInfo.GetNode(),
|
||||
narSize: pathInfo.GetNarinfo().GetNarSize(),
|
||||
}
|
||||
narHashToPathInfoMu.Unlock()
|
||||
|
||||
if headOnly {
|
||||
|
@ -102,7 +106,7 @@ func registerNarinfoGet(s *Server) {
|
|||
return
|
||||
}
|
||||
|
||||
err = renderNarinfo(ctx, log, s.pathInfoServiceClient, &s.narHashToPathInfoMu, s.narHashToPathInfo, outputHash, w, false)
|
||||
err = renderNarinfo(ctx, log, s.pathInfoServiceClient, &s.narDbMu, s.narDb, outputHash, w, false)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
|
|
|
@ -2,14 +2,11 @@ package http
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
castorev1pb "code.tvl.fyi/tvix/castore/protos"
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/nix-community/go-nix/pkg/narinfo"
|
||||
"github.com/nix-community/go-nix/pkg/nixbase32"
|
||||
"github.com/nix-community/go-nix/pkg/storepath"
|
||||
"github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -43,12 +40,10 @@ func registerNarinfoPut(s *Server) {
|
|||
"output_path": narInfo.StorePath,
|
||||
})
|
||||
|
||||
var pathInfo *storev1pb.PathInfo
|
||||
|
||||
// look up the narHash in our temporary map
|
||||
s.narHashToPathInfoMu.Lock()
|
||||
pathInfo, found := s.narHashToPathInfo[narInfo.NarHash.SRIString()]
|
||||
s.narHashToPathInfoMu.Unlock()
|
||||
s.narDbMu.Lock()
|
||||
narData, found := s.narDb[narInfo.NarHash.SRIString()]
|
||||
s.narDbMu.Unlock()
|
||||
if !found {
|
||||
log.Error("unable to find referred NAR")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
|
@ -60,10 +55,12 @@ func registerNarinfoPut(s *Server) {
|
|||
return
|
||||
}
|
||||
|
||||
rootNode := narData.rootNode
|
||||
|
||||
// compare fields with what we computed while receiving the NAR file
|
||||
|
||||
// NarSize needs to match
|
||||
if pathInfo.Narinfo.NarSize != narInfo.NarSize {
|
||||
if narData.narSize != narInfo.NarSize {
|
||||
log.Error("narsize mismatch")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("unable to parse narinfo"))
|
||||
|
@ -73,90 +70,23 @@ func registerNarinfoPut(s *Server) {
|
|||
|
||||
return
|
||||
}
|
||||
// We know the narhash in the .narinfo matches one of the two narhashes in the partial pathInfo,
|
||||
// because that's how we found it.
|
||||
|
||||
// FUTUREWORK: We can't compare References yet, but it'd be a good idea to
|
||||
// do reference checking on .nar files server-side during upload.
|
||||
// We however still need to be parse them, because we store
|
||||
// the bytes in pathInfo.References, and the full strings in pathInfo.Narinfo.ReferenceNames.
|
||||
referencesBytes := make([][]byte, 0)
|
||||
for _, reference := range narInfo.References {
|
||||
storePath, err := storepath.FromString(reference)
|
||||
pathInfo, err := importer.GenPathInfo(rootNode, narInfo)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("unable to generate PathInfo")
|
||||
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
_, err := w.Write([]byte("unable to generate PathInfo"))
|
||||
if err != nil {
|
||||
log.WithField("reference", reference).WithError(err).Error("unable to parse reference")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
_, err := w.Write([]byte("unable to parse reference"))
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
|
||||
return
|
||||
log.WithError(err).Errorf("unable to write error message to client")
|
||||
}
|
||||
referencesBytes = append(referencesBytes, storePath.Digest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// assemble the []*storev1pb.NARInfo_Signature{} from narinfo.Signatures.
|
||||
pbNarinfoSignatures := make([]*storev1pb.NARInfo_Signature, 0)
|
||||
for _, narinfoSig := range narInfo.Signatures {
|
||||
log.WithField("pathInfo", pathInfo).Debug("inserted new pathInfo")
|
||||
|
||||
pbNarinfoSignatures = append(pbNarinfoSignatures, &storev1pb.NARInfo_Signature{
|
||||
Name: narinfoSig.Name,
|
||||
Data: narinfoSig.Data,
|
||||
})
|
||||
}
|
||||
|
||||
// If everything matches, We will add References, NAR signatures and the
|
||||
// output path name, and then upload to the pathinfo service.
|
||||
// We want a copy here, because we don't want to mutate the contents in the lookup table
|
||||
// until we get things back from the remote store.
|
||||
pathInfoToUpload := &storev1pb.PathInfo{
|
||||
Node: nil, // set below
|
||||
References: referencesBytes,
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: pathInfo.Narinfo.NarSize,
|
||||
NarSha256: pathInfo.Narinfo.NarSha256,
|
||||
Signatures: pbNarinfoSignatures,
|
||||
ReferenceNames: narInfo.References,
|
||||
},
|
||||
}
|
||||
|
||||
// We need to add the basename of the storepath from the .narinfo
|
||||
// to the pathInfo to be sent.
|
||||
switch v := (pathInfo.GetNode().GetNode()).(type) {
|
||||
case *castorev1pb.Node_File:
|
||||
pathInfoToUpload.Node = &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_File{
|
||||
File: &castorev1pb.FileNode{
|
||||
Name: []byte(path.Base(narInfo.StorePath)),
|
||||
Digest: v.File.Digest,
|
||||
Size: v.File.Size,
|
||||
Executable: v.File.Executable,
|
||||
},
|
||||
},
|
||||
}
|
||||
case *castorev1pb.Node_Symlink:
|
||||
pathInfoToUpload.Node = &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_Symlink{
|
||||
Symlink: &castorev1pb.SymlinkNode{
|
||||
Name: []byte(path.Base(narInfo.StorePath)),
|
||||
Target: v.Symlink.Target,
|
||||
},
|
||||
},
|
||||
}
|
||||
case *castorev1pb.Node_Directory:
|
||||
pathInfoToUpload.Node = &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_Directory{
|
||||
Directory: &castorev1pb.DirectoryNode{
|
||||
Name: []byte(path.Base(narInfo.StorePath)),
|
||||
Digest: v.Directory.Digest,
|
||||
Size: v.Directory.Size,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
receivedPathInfo, err := s.pathInfoServiceClient.Put(ctx, pathInfoToUpload)
|
||||
receivedPathInfo, err := s.pathInfoServiceClient.Put(ctx, pathInfo)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("unable to upload pathinfo to service")
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
|
@ -168,8 +98,6 @@ func registerNarinfoPut(s *Server) {
|
|||
return
|
||||
}
|
||||
|
||||
log.Debugf("received new pathInfo: %v+", receivedPathInfo)
|
||||
|
||||
// TODO: update the local temporary pathinfo with this?
|
||||
log.WithField("pathInfo", receivedPathInfo).Debug("got back PathInfo")
|
||||
})
|
||||
}
|
||||
|
|
|
@ -25,11 +25,17 @@ type Server struct {
|
|||
// When uploading NAR files to a HTTP binary cache, the .nar
|
||||
// files are uploaded before the .narinfo files.
|
||||
// We need *both* to be able to fully construct a PathInfo object.
|
||||
// Keep a in-memory map of narhash(es) (in SRI) to sparse PathInfo.
|
||||
// Keep a in-memory map of narhash(es) (in SRI) to (unnamed) root node and nar
|
||||
// size.
|
||||
// This is necessary until we can ask a PathInfoService for a node with a given
|
||||
// narSha256.
|
||||
narHashToPathInfoMu sync.Mutex
|
||||
narHashToPathInfo map[string]*storev1pb.PathInfo
|
||||
narDbMu sync.Mutex
|
||||
narDb map[string]*narData
|
||||
}
|
||||
|
||||
type narData struct {
|
||||
rootNode *castorev1pb.Node
|
||||
narSize uint64
|
||||
}
|
||||
|
||||
func New(
|
||||
|
@ -64,7 +70,7 @@ func New(
|
|||
directoryServiceClient: directoryServiceClient,
|
||||
blobServiceClient: blobServiceClient,
|
||||
pathInfoServiceClient: pathInfoServiceClient,
|
||||
narHashToPathInfo: make(map[string]*storev1pb.PathInfo),
|
||||
narDb: make(map[string]*narData),
|
||||
}
|
||||
|
||||
registerNarPut(s)
|
||||
|
|
62
tvix/nar-bridge/pkg/importer/gen_pathinfo.go
Normal file
62
tvix/nar-bridge/pkg/importer/gen_pathinfo.go
Normal file
|
@ -0,0 +1,62 @@
|
|||
package importer
|
||||
|
||||
import (
|
||||
castorev1pb "code.tvl.fyi/tvix/castore/protos"
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"fmt"
|
||||
"github.com/nix-community/go-nix/pkg/narinfo"
|
||||
"github.com/nix-community/go-nix/pkg/storepath"
|
||||
)
|
||||
|
||||
// GenPathInfo takes a rootNode and narInfo and assembles a PathInfo.
|
||||
// The rootNode is renamed to match the StorePath in the narInfo.
|
||||
func GenPathInfo(rootNode *castorev1pb.Node, narInfo *narinfo.NarInfo) (*storev1pb.PathInfo, error) {
|
||||
// parse the storePath from the .narinfo
|
||||
storePath, err := storepath.FromAbsolutePath(narInfo.StorePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse StorePath: %w", err)
|
||||
}
|
||||
|
||||
// construct the references, by parsing ReferenceNames and extracting the digest
|
||||
references := make([][]byte, len(narInfo.References))
|
||||
for i, referenceStr := range narInfo.References {
|
||||
// parse reference as store path
|
||||
referenceStorePath, err := storepath.FromString(referenceStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse reference %s as storepath: %w", referenceStr, err)
|
||||
}
|
||||
references[i] = referenceStorePath.Digest
|
||||
}
|
||||
|
||||
// construct the narInfo.Signatures[*] from pathInfo.Narinfo.Signatures[*]
|
||||
narinfoSignatures := make([]*storev1pb.NARInfo_Signature, len(narInfo.Signatures))
|
||||
for i, narinfoSig := range narInfo.Signatures {
|
||||
narinfoSignatures[i] = &storev1pb.NARInfo_Signature{
|
||||
Name: narinfoSig.Name,
|
||||
Data: narinfoSig.Data,
|
||||
}
|
||||
}
|
||||
|
||||
// assemble the PathInfo.
|
||||
pathInfo := &storev1pb.PathInfo{
|
||||
// embed a new root node with the name set to the store path basename.
|
||||
Node: castorev1pb.RenamedNode(rootNode, storePath.String()),
|
||||
References: references,
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: narInfo.NarSize,
|
||||
NarSha256: narInfo.FileHash.Digest(),
|
||||
Signatures: narinfoSignatures,
|
||||
ReferenceNames: narInfo.References,
|
||||
},
|
||||
}
|
||||
|
||||
// run Validate on the PathInfo, more as an additional sanity check our code is sound,
|
||||
// to make sure we populated everything properly, before returning it.
|
||||
// Fail hard if we fail validation, this is a code error.
|
||||
if _, err = pathInfo.Validate(); err != nil {
|
||||
panic(fmt.Sprintf("PathInfo failed validation: %v", err))
|
||||
}
|
||||
|
||||
return pathInfo, nil
|
||||
|
||||
}
|
|
@ -10,7 +10,6 @@ import (
|
|||
"strings"
|
||||
|
||||
castorev1pb "code.tvl.fyi/tvix/castore/protos"
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"github.com/nix-community/go-nix/pkg/nar"
|
||||
)
|
||||
|
||||
|
@ -20,8 +19,8 @@ type stackItem struct {
|
|||
directory *castorev1pb.Directory
|
||||
}
|
||||
|
||||
// Import reads NAR from a reader, and returns a (sparsely populated) PathInfo
|
||||
// object.
|
||||
// Import reads a NAR from a reader, and returns a the root node,
|
||||
// NAR size and NAR sha256 digest.
|
||||
func Import(
|
||||
// a context, to support cancellation
|
||||
ctx context.Context,
|
||||
|
@ -31,7 +30,7 @@ func Import(
|
|||
blobCb func(fileReader io.Reader) ([]byte, error),
|
||||
// callback function called with each finalized directory node
|
||||
directoryCb func(directory *castorev1pb.Directory) ([]byte, error),
|
||||
) (*storev1pb.PathInfo, error) {
|
||||
) (*castorev1pb.Node, uint64, []byte, error) {
|
||||
// We need to wrap the underlying reader a bit.
|
||||
// - we want to keep track of the number of bytes read in total
|
||||
// - we calculate the sha256 digest over all data read
|
||||
|
@ -42,7 +41,7 @@ func Import(
|
|||
multiW := io.MultiWriter(narCountW, sha256W)
|
||||
narReader, err := nar.NewReader(io.TeeReader(r, multiW))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to instantiate nar reader: %w", err)
|
||||
return nil, 0, nil, fmt.Errorf("failed to instantiate nar reader: %w", err)
|
||||
}
|
||||
defer narReader.Close()
|
||||
|
||||
|
@ -98,7 +97,7 @@ func Import(
|
|||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
return nil, 0, nil, ctx.Err()
|
||||
default:
|
||||
// call narReader.Next() to get the next element
|
||||
hdr, err := narReader.Next()
|
||||
|
@ -108,60 +107,49 @@ func Import(
|
|||
if err != nil {
|
||||
// if this returns no EOF, bail out
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return nil, fmt.Errorf("failed getting next nar element: %w", err)
|
||||
return nil, 0, nil, fmt.Errorf("failed getting next nar element: %w", err)
|
||||
}
|
||||
|
||||
// The NAR has been read all the way to the end…
|
||||
// Make sure we close the nar reader, which might read some final trailers.
|
||||
if err := narReader.Close(); err != nil {
|
||||
return nil, fmt.Errorf("unable to close nar reader: %w", err)
|
||||
return nil, 0, nil, fmt.Errorf("unable to close nar reader: %w", err)
|
||||
}
|
||||
|
||||
// Check the stack. While it's not empty, we need to pop things off the stack.
|
||||
for len(stack) > 0 {
|
||||
err := popFromStack()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to pop from stack: %w", err)
|
||||
return nil, 0, nil, fmt.Errorf("unable to pop from stack: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Stack is empty. We now either have a regular or symlink root node,
|
||||
// or we encountered at least one directory assemble pathInfo with these and
|
||||
// return.
|
||||
pi := &storev1pb.PathInfo{
|
||||
Node: nil,
|
||||
References: [][]byte{},
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: narCountW.BytesWritten(),
|
||||
NarSha256: sha256W.Sum(nil),
|
||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
||||
ReferenceNames: []string{},
|
||||
},
|
||||
}
|
||||
// Stack is empty.
|
||||
// Now either root{File,Symlink,Directory} is not nil,
|
||||
// and we can return the root node.
|
||||
narSize := narCountW.BytesWritten()
|
||||
narSha256 := sha256W.Sum(nil)
|
||||
|
||||
if rootFile != nil {
|
||||
pi.Node = &castorev1pb.Node{
|
||||
return &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_File{
|
||||
File: rootFile,
|
||||
},
|
||||
}
|
||||
}
|
||||
if rootSymlink != nil {
|
||||
pi.Node = &castorev1pb.Node{
|
||||
}, narSize, narSha256, nil
|
||||
} else if rootSymlink != nil {
|
||||
return &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_Symlink{
|
||||
Symlink: rootSymlink,
|
||||
},
|
||||
}
|
||||
}
|
||||
if stackDirectory != nil {
|
||||
}, narSize, narSha256, nil
|
||||
} else if stackDirectory != nil {
|
||||
// calculate directory digest (i.e. after we received all its contents)
|
||||
dgst, err := stackDirectory.Digest()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to calculate root directory digest: %w", err)
|
||||
return nil, 0, nil, fmt.Errorf("unable to calculate root directory digest: %w", err)
|
||||
}
|
||||
|
||||
pi.Node = &castorev1pb.Node{
|
||||
return &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_Directory{
|
||||
Directory: &castorev1pb.DirectoryNode{
|
||||
Name: []byte{},
|
||||
|
@ -169,9 +157,10 @@ func Import(
|
|||
Size: stackDirectory.Size(),
|
||||
},
|
||||
},
|
||||
}
|
||||
}, narSize, narSha256, nil
|
||||
} else {
|
||||
return nil, 0, nil, fmt.Errorf("no root set")
|
||||
}
|
||||
return pi, nil
|
||||
}
|
||||
|
||||
// Check for valid path transitions, pop from stack if needed
|
||||
|
@ -185,7 +174,7 @@ func Import(
|
|||
for len(stack) > 1 && !strings.HasPrefix(hdr.Path, stack[len(stack)-1].path+"/") {
|
||||
err := popFromStack()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to pop from stack: %w", err)
|
||||
return nil, 0, nil, fmt.Errorf("unable to pop from stack: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -209,7 +198,7 @@ func Import(
|
|||
|
||||
blobDigest, err := blobCb(blobReader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failure from blobCb: %w", err)
|
||||
return nil, 0, nil, fmt.Errorf("failure from blobCb: %w", err)
|
||||
}
|
||||
|
||||
// ensure blobCb did read all the way to the end.
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
|
||||
castorev1pb "code.tvl.fyi/tvix/castore/protos"
|
||||
"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
|
||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -18,7 +17,7 @@ func TestSymlink(t *testing.T) {
|
|||
f, err := os.Open("../../testdata/symlink.nar")
|
||||
require.NoError(t, err)
|
||||
|
||||
actualPathInfo, err := importer.Import(
|
||||
rootNode, narSize, narSha256, err := importer.Import(
|
||||
context.Background(),
|
||||
f,
|
||||
func(blobReader io.Reader) ([]byte, error) {
|
||||
|
@ -28,35 +27,25 @@ func TestSymlink(t *testing.T) {
|
|||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPathInfo := &storev1pb.PathInfo{
|
||||
Node: &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_Symlink{
|
||||
Symlink: &castorev1pb.SymlinkNode{
|
||||
Name: []byte(""),
|
||||
Target: []byte("/nix/store/somewhereelse"),
|
||||
},
|
||||
require.Equal(t, &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_Symlink{
|
||||
Symlink: &castorev1pb.SymlinkNode{
|
||||
Name: []byte(""),
|
||||
Target: []byte("/nix/store/somewhereelse"),
|
||||
},
|
||||
},
|
||||
References: [][]byte{},
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: 136,
|
||||
NarSha256: []byte{
|
||||
0x09, 0x7d, 0x39, 0x7e, 0x9b, 0x58, 0x26, 0x38, 0x4e, 0xaa, 0x16, 0xc4, 0x57, 0x71, 0x5d, 0x1c, 0x1a, 0x51, 0x67, 0x03, 0x13, 0xea, 0xd0, 0xf5, 0x85, 0x66, 0xe0, 0xb2, 0x32, 0x53, 0x9c, 0xf1,
|
||||
},
|
||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
||||
ReferenceNames: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
requireProtoEq(t, expectedPathInfo, actualPathInfo)
|
||||
}, rootNode)
|
||||
require.Equal(t, []byte{
|
||||
0x09, 0x7d, 0x39, 0x7e, 0x9b, 0x58, 0x26, 0x38, 0x4e, 0xaa, 0x16, 0xc4, 0x57, 0x71, 0x5d, 0x1c, 0x1a, 0x51, 0x67, 0x03, 0x13, 0xea, 0xd0, 0xf5, 0x85, 0x66, 0xe0, 0xb2, 0x32, 0x53, 0x9c, 0xf1,
|
||||
}, narSha256)
|
||||
require.Equal(t, uint64(136), narSize)
|
||||
}
|
||||
|
||||
func TestRegular(t *testing.T) {
|
||||
f, err := os.Open("../../testdata/onebyteregular.nar")
|
||||
require.NoError(t, err)
|
||||
|
||||
actualPathInfo, err := importer.Import(
|
||||
rootNode, narSize, narSha256, err := importer.Import(
|
||||
context.Background(),
|
||||
f,
|
||||
func(blobReader io.Reader) ([]byte, error) {
|
||||
|
@ -68,7 +57,6 @@ func TestRegular(t *testing.T) {
|
|||
panic("no directories expected!")
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The blake3 digest of the 0x01 byte.
|
||||
BLAKE3_DIGEST_0X01 := []byte{
|
||||
|
@ -77,29 +65,21 @@ func TestRegular(t *testing.T) {
|
|||
0x65, 0x2b,
|
||||
}
|
||||
|
||||
expectedPathInfo := &storev1pb.PathInfo{
|
||||
Node: &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_File{
|
||||
File: &castorev1pb.FileNode{
|
||||
Name: []byte(""),
|
||||
Digest: BLAKE3_DIGEST_0X01,
|
||||
Size: 1,
|
||||
Executable: false,
|
||||
},
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_File{
|
||||
File: &castorev1pb.FileNode{
|
||||
Name: []byte(""),
|
||||
Digest: BLAKE3_DIGEST_0X01,
|
||||
Size: 1,
|
||||
Executable: false,
|
||||
},
|
||||
},
|
||||
References: [][]byte{},
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: 120,
|
||||
NarSha256: []byte{
|
||||
0x73, 0x08, 0x50, 0xa8, 0x11, 0x25, 0x9d, 0xbf, 0x3a, 0x68, 0xdc, 0x2e, 0xe8, 0x7a, 0x79, 0xaa, 0x6c, 0xae, 0x9f, 0x71, 0x37, 0x5e, 0xdf, 0x39, 0x6f, 0x9d, 0x7a, 0x91, 0xfb, 0xe9, 0x13, 0x4d,
|
||||
},
|
||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
||||
ReferenceNames: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
requireProtoEq(t, expectedPathInfo, actualPathInfo)
|
||||
}, rootNode)
|
||||
require.Equal(t, []byte{
|
||||
0x73, 0x08, 0x50, 0xa8, 0x11, 0x25, 0x9d, 0xbf, 0x3a, 0x68, 0xdc, 0x2e, 0xe8, 0x7a, 0x79, 0xaa, 0x6c, 0xae, 0x9f, 0x71, 0x37, 0x5e, 0xdf, 0x39, 0x6f, 0x9d, 0x7a, 0x91, 0xfb, 0xe9, 0x13, 0x4d,
|
||||
}, narSha256)
|
||||
require.Equal(t, uint64(120), narSize)
|
||||
}
|
||||
|
||||
func TestEmptyDirectory(t *testing.T) {
|
||||
|
@ -111,7 +91,7 @@ func TestEmptyDirectory(t *testing.T) {
|
|||
Files: []*castorev1pb.FileNode{},
|
||||
Symlinks: []*castorev1pb.SymlinkNode{},
|
||||
}
|
||||
actualPathInfo, err := importer.Import(
|
||||
rootNode, narSize, narSha256, err := importer.Import(
|
||||
context.Background(),
|
||||
f,
|
||||
func(blobReader io.Reader) ([]byte, error) {
|
||||
|
@ -122,28 +102,19 @@ func TestEmptyDirectory(t *testing.T) {
|
|||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPathInfo := &storev1pb.PathInfo{
|
||||
Node: &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_Directory{
|
||||
Directory: &castorev1pb.DirectoryNode{
|
||||
Name: []byte(""),
|
||||
Digest: mustDirectoryDigest(expectedDirectory),
|
||||
Size: expectedDirectory.Size(),
|
||||
},
|
||||
require.Equal(t, &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_Directory{
|
||||
Directory: &castorev1pb.DirectoryNode{
|
||||
Name: []byte(""),
|
||||
Digest: mustDirectoryDigest(expectedDirectory),
|
||||
Size: expectedDirectory.Size(),
|
||||
},
|
||||
},
|
||||
References: [][]byte{},
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: 96,
|
||||
NarSha256: []byte{
|
||||
0xa5, 0x0a, 0x5a, 0xb6, 0xd9, 0x92, 0xf5, 0x59, 0x8e, 0xdd, 0x92, 0x10, 0x50, 0x59, 0xfa, 0xe9, 0xac, 0xfc, 0x19, 0x29, 0x81, 0xe0, 0x8b, 0xd8, 0x85, 0x34, 0xc2, 0x16, 0x7e, 0x92, 0x52, 0x6a,
|
||||
},
|
||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
||||
ReferenceNames: []string{},
|
||||
},
|
||||
}
|
||||
requireProtoEq(t, expectedPathInfo, actualPathInfo)
|
||||
}, rootNode)
|
||||
require.Equal(t, []byte{
|
||||
0xa5, 0x0a, 0x5a, 0xb6, 0xd9, 0x92, 0xf5, 0x59, 0x8e, 0xdd, 0x92, 0x10, 0x50, 0x59, 0xfa, 0xe9, 0xac, 0xfc, 0x19, 0x29, 0x81, 0xe0, 0x8b, 0xd8, 0x85, 0x34, 0xc2, 0x16, 0x7e, 0x92, 0x52, 0x6a,
|
||||
}, narSha256)
|
||||
require.Equal(t, uint64(96), narSize)
|
||||
}
|
||||
|
||||
func TestFull(t *testing.T) {
|
||||
|
@ -458,7 +429,7 @@ func TestFull(t *testing.T) {
|
|||
|
||||
numDirectoriesReceived := 0
|
||||
|
||||
actualPathInfo, err := importer.Import(
|
||||
rootNode, narSize, narSha256, err := importer.Import(
|
||||
context.Background(),
|
||||
f,
|
||||
func(blobReader io.Reader) ([]byte, error) {
|
||||
|
@ -480,28 +451,19 @@ func TestFull(t *testing.T) {
|
|||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedPathInfo := &storev1pb.PathInfo{
|
||||
Node: &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_Directory{
|
||||
Directory: &castorev1pb.DirectoryNode{
|
||||
Name: []byte(""),
|
||||
Digest: mustDirectoryDigest(expectedDirectories["/"]),
|
||||
Size: expectedDirectories["/"].Size(),
|
||||
},
|
||||
require.Equal(t, &castorev1pb.Node{
|
||||
Node: &castorev1pb.Node_Directory{
|
||||
Directory: &castorev1pb.DirectoryNode{
|
||||
Name: []byte(""),
|
||||
Digest: mustDirectoryDigest(expectedDirectories["/"]),
|
||||
Size: expectedDirectories["/"].Size(),
|
||||
},
|
||||
},
|
||||
References: [][]byte{},
|
||||
Narinfo: &storev1pb.NARInfo{
|
||||
NarSize: 464152,
|
||||
NarSha256: []byte{
|
||||
0xc6, 0xe1, 0x55, 0xb3, 0x45, 0x6e, 0x30, 0xb7, 0x61, 0x22, 0x63, 0xec, 0x09, 0x50, 0x70, 0x81, 0x1c, 0xaf, 0x8a, 0xbf, 0xd5, 0x9f, 0xaa, 0x72, 0xab, 0x82, 0xa5, 0x92, 0xef, 0xde, 0xb2, 0x53,
|
||||
},
|
||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
||||
ReferenceNames: []string{},
|
||||
},
|
||||
}
|
||||
requireProtoEq(t, expectedPathInfo, actualPathInfo)
|
||||
}, rootNode)
|
||||
require.Equal(t, []byte{
|
||||
0xc6, 0xe1, 0x55, 0xb3, 0x45, 0x6e, 0x30, 0xb7, 0x61, 0x22, 0x63, 0xec, 0x09, 0x50, 0x70, 0x81, 0x1c, 0xaf, 0x8a, 0xbf, 0xd5, 0x9f, 0xaa, 0x72, 0xab, 0x82, 0xa5, 0x92, 0xef, 0xde, 0xb2, 0x53,
|
||||
}, narSha256)
|
||||
require.Equal(t, uint64(464152), narSize)
|
||||
}
|
||||
|
||||
// TestCallbackErrors ensures that errors returned from the callback function
|
||||
|
@ -514,7 +476,7 @@ func TestCallbackErrors(t *testing.T) {
|
|||
|
||||
targetErr := errors.New("expected error")
|
||||
|
||||
_, err = importer.Import(
|
||||
_, _, _, err = importer.Import(
|
||||
context.Background(),
|
||||
f,
|
||||
func(blobReader io.Reader) ([]byte, error) {
|
||||
|
@ -532,7 +494,7 @@ func TestCallbackErrors(t *testing.T) {
|
|||
|
||||
targetErr := errors.New("expected error")
|
||||
|
||||
_, err = importer.Import(
|
||||
_, _, _, err = importer.Import(
|
||||
context.Background(),
|
||||
f,
|
||||
func(blobReader io.Reader) ([]byte, error) {
|
||||
|
@ -562,7 +524,7 @@ func TestPopDirectories(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
|
||||
_, err = importer.Import(
|
||||
_, _, _, err = importer.Import(
|
||||
context.Background(),
|
||||
f,
|
||||
func(blobReader io.Reader) ([]byte, error) { return mustBlobDigest(blobReader), nil },
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestRoundtrip(t *testing.T) {
|
|||
blobsMap := make(map[string][]byte, 0)
|
||||
directoriesMap := make(map[string]*castorev1pb.Directory)
|
||||
|
||||
pathInfo, err := importer.Import(
|
||||
rootNode, _, _, err := importer.Import(
|
||||
context.Background(),
|
||||
bytes.NewBuffer(narContents),
|
||||
func(blobReader io.Reader) ([]byte, error) {
|
||||
|
@ -56,10 +56,10 @@ func TestRoundtrip(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
// done populating everything, now actually test the export :-)
|
||||
var buf bytes.Buffer
|
||||
var narBuf bytes.Buffer
|
||||
err = storev1pb.Export(
|
||||
&buf,
|
||||
pathInfo.Node,
|
||||
&narBuf,
|
||||
rootNode,
|
||||
func(directoryDgst []byte) (*castorev1pb.Directory, error) {
|
||||
d, found := directoriesMap[base64.StdEncoding.EncodeToString(directoryDgst)]
|
||||
if !found {
|
||||
|
@ -77,5 +77,5 @@ func TestRoundtrip(t *testing.T) {
|
|||
)
|
||||
|
||||
require.NoError(t, err, "exporter shouldn't fail")
|
||||
require.Equal(t, narContents, buf.Bytes())
|
||||
require.Equal(t, narContents, narBuf.Bytes())
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
mh "github.com/multiformats/go-multihash/core"
|
||||
"github.com/nix-community/go-nix/pkg/narinfo"
|
||||
"github.com/nix-community/go-nix/pkg/nixbase32"
|
||||
"github.com/nix-community/go-nix/pkg/storepath"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/ulikunitz/xz"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -172,7 +171,7 @@ func (p *PathInfoServiceServer) Get(ctx context.Context, getPathInfoRequest *sto
|
|||
|
||||
blobUploaderCb := importer.GenBlobUploaderCb(ctx, p.blobServiceClient)
|
||||
|
||||
pathInfo, err := importer.Import(
|
||||
rootNode, _, importedNarSha256, err := importer.Import(
|
||||
ctx,
|
||||
narBody,
|
||||
func(blobReader io.Reader) ([]byte, error) {
|
||||
|
@ -207,8 +206,7 @@ func (p *PathInfoServiceServer) Get(ctx context.Context, getPathInfoRequest *sto
|
|||
}
|
||||
|
||||
// Compare NAR hash in the NARInfo with the one we calculated while reading the NAR
|
||||
// We already checked above that the digest is in sha256.
|
||||
importedNarSha256 := pathInfo.GetNarinfo().GetNarSha256()
|
||||
// We don't need to additionally compare the narSize.
|
||||
if !bytes.Equal(narInfo.NarHash.Digest(), importedNarSha256) {
|
||||
log := log.WithField("imported_nar_sha256", base64.StdEncoding.EncodeToString(importedNarSha256))
|
||||
log.Error("imported digest doesn't match NARInfo digest")
|
||||
|
@ -216,51 +214,11 @@ func (p *PathInfoServiceServer) Get(ctx context.Context, getPathInfoRequest *sto
|
|||
return nil, fmt.Errorf("imported digest doesn't match NARInfo digest")
|
||||
}
|
||||
|
||||
// annotate importedPathInfo with the rest of the metadata from NARINfo.
|
||||
|
||||
// extract the output digests
|
||||
for _, referenceStr := range narInfo.References {
|
||||
referenceStorePath, err := storepath.FromString(referenceStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse %s as StorePath: %w", referenceStr, err)
|
||||
}
|
||||
|
||||
pathInfo.References = append(pathInfo.References, referenceStorePath.Digest)
|
||||
}
|
||||
|
||||
// extract narInfo.References into pathInfo.NarInfo.ReferenceNames.
|
||||
pathInfo.Narinfo.ReferenceNames = narInfo.References
|
||||
|
||||
// copy over signatures from narInfo.signatures into pathInfo.NarInfo.Signatures.
|
||||
for _, signature := range narInfo.Signatures {
|
||||
pathInfo.Narinfo.Signatures = append(pathInfo.Narinfo.Signatures, &storev1pb.NARInfo_Signature{
|
||||
Name: signature.Name,
|
||||
Data: signature.Data,
|
||||
})
|
||||
}
|
||||
|
||||
// set the root node name to the basename of the output path in the narInfo.
|
||||
// currently the root node has no name yet.
|
||||
outPath, err := storepath.FromAbsolutePath(narInfo.StorePath)
|
||||
// generate PathInfo
|
||||
pathInfo, err := importer.GenPathInfo(rootNode, narInfo)
|
||||
if err != nil {
|
||||
// unreachable due to narInfo.Check()
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pathInfo.Node = castorev1pb.RenamedNode(pathInfo.Node, outPath.String())
|
||||
|
||||
// run Validate on the PathInfo, more as an additional sanity check our code is sound,
|
||||
// to make sure we populated everything properly, before returning it.
|
||||
validatedOutPath, err := pathInfo.Validate()
|
||||
if err != nil {
|
||||
panic("pathinfo failed validation")
|
||||
}
|
||||
if narInfo.StorePath != validatedOutPath.Absolute() {
|
||||
panic(fmt.Sprintf(
|
||||
"StorePath returned from Validate() mismatches the one from .narinfo (%s vs %s)",
|
||||
validatedOutPath.Absolute(),
|
||||
narInfo.StorePath),
|
||||
)
|
||||
log.WithError(err).Error("uable to generate PathInfo")
|
||||
return nil, status.Errorf(codes.Internal, "unable to generate PathInfo")
|
||||
}
|
||||
|
||||
return pathInfo, nil
|
||||
|
|
Loading…
Reference in a new issue