refactor(tvix/nar-bridge): have Export return root node
… and nar size / sha256 digest. Instead of producing sparse PathInfo messages when NARs are sent to nar-bridge, the nar-bridge http server now keeps a lookup table (narsha256) -> (rootNode, narSize) This removes a whole bunch of noise, because we don't need to keep sparse fields around. A convenience function `GenPathInfo(rootNode *castorev1pb.Node, narInfo *narinfo.NarInfo)` is added, which is used to produce PathInfo messages, either when receiving a NAR file over http and uploading it to a remote PathInfoService, or to synthesize the PathInfoMessage to return to the client, if nar-bridge is acting as a PathInfoService for a remove Nix HTTP Binary cache. Change-Id: Ibba1ab6238a050816c4fab29cb21ae88877d8613 Reviewed-on: https://cl.tvl.fyi/c/depot/+/9651 Tested-by: BuildkiteCI Reviewed-by: Brian McGee <brian@bmcgee.ie>
This commit is contained in:
parent
ceb1674e9f
commit
98c17147c6
10 changed files with 211 additions and 297 deletions
|
@ -29,16 +29,18 @@ func renderNar(
|
||||||
log *log.Entry,
|
log *log.Entry,
|
||||||
directoryServiceClient castorev1pb.DirectoryServiceClient,
|
directoryServiceClient castorev1pb.DirectoryServiceClient,
|
||||||
blobServiceClient castorev1pb.BlobServiceClient,
|
blobServiceClient castorev1pb.BlobServiceClient,
|
||||||
narHashToPathInfoMu *sync.Mutex,
|
narHashDbMu *sync.Mutex,
|
||||||
narHashToPathInfo map[string]*storev1pb.PathInfo,
|
narHashDb map[string]*narData,
|
||||||
w io.Writer,
|
w io.Writer,
|
||||||
narHash *nixhash.Hash,
|
narHash *nixhash.Hash,
|
||||||
headOnly bool,
|
headOnly bool,
|
||||||
) error {
|
) error {
|
||||||
// look in the lookup table
|
// look in the lookup table
|
||||||
narHashToPathInfoMu.Lock()
|
narHashDbMu.Lock()
|
||||||
pathInfo, found := narHashToPathInfo[narHash.SRIString()]
|
narData, found := narHashDb[narHash.SRIString()]
|
||||||
narHashToPathInfoMu.Unlock()
|
narHashDbMu.Unlock()
|
||||||
|
|
||||||
|
rootNode := narData.rootNode
|
||||||
|
|
||||||
// if we didn't find anything, return 404.
|
// if we didn't find anything, return 404.
|
||||||
if !found {
|
if !found {
|
||||||
|
@ -53,7 +55,7 @@ func renderNar(
|
||||||
directories := make(map[string]*castorev1pb.Directory)
|
directories := make(map[string]*castorev1pb.Directory)
|
||||||
|
|
||||||
// If the root node is a directory, ask the directory service for all directories
|
// If the root node is a directory, ask the directory service for all directories
|
||||||
if pathInfoDirectory := pathInfo.GetNode().GetDirectory(); pathInfoDirectory != nil {
|
if pathInfoDirectory := rootNode.GetDirectory(); pathInfoDirectory != nil {
|
||||||
rootDirectoryDigest := pathInfoDirectory.GetDigest()
|
rootDirectoryDigest := pathInfoDirectory.GetDigest()
|
||||||
log = log.WithField("root_directory", base64.StdEncoding.EncodeToString(rootDirectoryDigest))
|
log = log.WithField("root_directory", base64.StdEncoding.EncodeToString(rootDirectoryDigest))
|
||||||
|
|
||||||
|
@ -95,7 +97,7 @@ func renderNar(
|
||||||
// render the NAR file
|
// render the NAR file
|
||||||
err := storev1pb.Export(
|
err := storev1pb.Export(
|
||||||
w,
|
w,
|
||||||
pathInfo.Node,
|
rootNode,
|
||||||
func(directoryDigest []byte) (*castorev1pb.Directory, error) {
|
func(directoryDigest []byte) (*castorev1pb.Directory, error) {
|
||||||
log.WithField("directory", base64.StdEncoding.EncodeToString(directoryDigest)).Debug("Get directory")
|
log.WithField("directory", base64.StdEncoding.EncodeToString(directoryDigest)).Debug("Get directory")
|
||||||
directoryRefStr := hex.EncodeToString(directoryDigest)
|
directoryRefStr := hex.EncodeToString(directoryDigest)
|
||||||
|
@ -177,7 +179,7 @@ func registerNarGet(s *Server) {
|
||||||
log := log.WithField("narhash_url", narHash.SRIString())
|
log := log.WithField("narhash_url", narHash.SRIString())
|
||||||
|
|
||||||
// TODO: inline more of that function here?
|
// TODO: inline more of that function here?
|
||||||
err = renderNar(ctx, log, s.directoryServiceClient, s.blobServiceClient, &s.narHashToPathInfoMu, s.narHashToPathInfo, w, narHash, isHead)
|
err = renderNar(ctx, log, s.directoryServiceClient, s.blobServiceClient, &s.narDbMu, s.narDb, w, narHash, isHead)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
w.WriteHeader(http.StatusNotFound)
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
|
|
@ -39,7 +39,7 @@ func registerNarPut(s *Server) {
|
||||||
directoriesUploader := importer.NewDirectoriesUploader(ctx, s.directoryServiceClient)
|
directoriesUploader := importer.NewDirectoriesUploader(ctx, s.directoryServiceClient)
|
||||||
defer directoriesUploader.Done() //nolint:errcheck
|
defer directoriesUploader.Done() //nolint:errcheck
|
||||||
|
|
||||||
pathInfo, err := importer.Import(
|
rootNode, narSize, narSha256, err := importer.Import(
|
||||||
ctx,
|
ctx,
|
||||||
// buffer the body by 10MiB
|
// buffer the body by 10MiB
|
||||||
bufio.NewReaderSize(r.Body, 10*1024*1024),
|
bufio.NewReaderSize(r.Body, 10*1024*1024),
|
||||||
|
@ -80,7 +80,7 @@ func registerNarPut(s *Server) {
|
||||||
// This check ensures the server-side came up with the same root hash.
|
// This check ensures the server-side came up with the same root hash.
|
||||||
|
|
||||||
if directoriesPutResponse != nil {
|
if directoriesPutResponse != nil {
|
||||||
rootDigestPathInfo := pathInfo.GetNode().GetDirectory().GetDigest()
|
rootDigestPathInfo := rootNode.GetDirectory().GetDigest()
|
||||||
rootDigestDirectoriesPutResponse := directoriesPutResponse.GetRootDigest()
|
rootDigestDirectoriesPutResponse := directoriesPutResponse.GetRootDigest()
|
||||||
|
|
||||||
log := log.WithFields(logrus.Fields{
|
log := log.WithFields(logrus.Fields{
|
||||||
|
@ -102,17 +102,18 @@ func registerNarPut(s *Server) {
|
||||||
|
|
||||||
// Compare the nar hash specified in the URL with the one that has been
|
// Compare the nar hash specified in the URL with the one that has been
|
||||||
// calculated while processing the NAR file
|
// calculated while processing the NAR file
|
||||||
piNarHash, err := nixhash.ParseNixBase32(
|
// TODO: bump go-nix and remove the parsing
|
||||||
"sha256:" + nixbase32.EncodeToString(pathInfo.GetNarinfo().NarSha256),
|
narHash, err := nixhash.ParseNixBase32(
|
||||||
|
"sha256:" + nixbase32.EncodeToString(narSha256),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("must parse nixbase32")
|
panic("must parse nixbase32")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(narHashFromUrl.Digest(), piNarHash.Digest()) {
|
if !bytes.Equal(narHashFromUrl.Digest(), narHash.Digest()) {
|
||||||
log := log.WithFields(logrus.Fields{
|
log := log.WithFields(logrus.Fields{
|
||||||
"narhash_received_sha256": piNarHash.SRIString(),
|
"narhash_received_sha256": narHash.SRIString(),
|
||||||
"narsize": pathInfo.GetNarinfo().GetNarSize(),
|
"narsize": narSize,
|
||||||
})
|
})
|
||||||
log.Error("received bytes don't match narhash from URL")
|
log.Error("received bytes don't match narhash from URL")
|
||||||
|
|
||||||
|
@ -123,7 +124,6 @@ func registerNarPut(s *Server) {
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Insert the partial pathinfo structs into our lookup map,
|
// Insert the partial pathinfo structs into our lookup map,
|
||||||
|
@ -131,9 +131,12 @@ func registerNarPut(s *Server) {
|
||||||
// The same might exist already, but it'll have the same contents (so
|
// The same might exist already, but it'll have the same contents (so
|
||||||
// replacing will be a no-op), except maybe the root node Name field value, which
|
// replacing will be a no-op), except maybe the root node Name field value, which
|
||||||
// is safe to ignore (as not part of the NAR).
|
// is safe to ignore (as not part of the NAR).
|
||||||
s.narHashToPathInfoMu.Lock()
|
s.narDbMu.Lock()
|
||||||
s.narHashToPathInfo[piNarHash.SRIString()] = pathInfo
|
s.narDb[narHash.SRIString()] = &narData{
|
||||||
s.narHashToPathInfoMu.Unlock()
|
rootNode: rootNode,
|
||||||
|
narSize: narSize,
|
||||||
|
}
|
||||||
|
s.narDbMu.Unlock()
|
||||||
|
|
||||||
// Done!
|
// Done!
|
||||||
})
|
})
|
||||||
|
|
|
@ -29,7 +29,7 @@ func renderNarinfo(
|
||||||
log *log.Entry,
|
log *log.Entry,
|
||||||
pathInfoServiceClient storev1pb.PathInfoServiceClient,
|
pathInfoServiceClient storev1pb.PathInfoServiceClient,
|
||||||
narHashToPathInfoMu *sync.Mutex,
|
narHashToPathInfoMu *sync.Mutex,
|
||||||
narHashToPathInfo map[string]*storev1pb.PathInfo,
|
narHashToPathInfo map[string]*narData,
|
||||||
outputHash []byte,
|
outputHash []byte,
|
||||||
w io.Writer,
|
w io.Writer,
|
||||||
headOnly bool,
|
headOnly bool,
|
||||||
|
@ -51,6 +51,7 @@ func renderNarinfo(
|
||||||
return fmt.Errorf("unable to get pathinfo: %w", err)
|
return fmt.Errorf("unable to get pathinfo: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: don't parse
|
||||||
narHash, err := nixhash.ParseNixBase32("sha256:" + nixbase32.EncodeToString(pathInfo.GetNarinfo().GetNarSha256()))
|
narHash, err := nixhash.ParseNixBase32("sha256:" + nixbase32.EncodeToString(pathInfo.GetNarinfo().GetNarSha256()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: return proper error
|
// TODO: return proper error
|
||||||
|
@ -59,7 +60,10 @@ func renderNarinfo(
|
||||||
|
|
||||||
// add things to the lookup table, in case the same process didn't handle the NAR hash yet.
|
// add things to the lookup table, in case the same process didn't handle the NAR hash yet.
|
||||||
narHashToPathInfoMu.Lock()
|
narHashToPathInfoMu.Lock()
|
||||||
narHashToPathInfo[narHash.SRIString()] = pathInfo
|
narHashToPathInfo[narHash.SRIString()] = &narData{
|
||||||
|
rootNode: pathInfo.GetNode(),
|
||||||
|
narSize: pathInfo.GetNarinfo().GetNarSize(),
|
||||||
|
}
|
||||||
narHashToPathInfoMu.Unlock()
|
narHashToPathInfoMu.Unlock()
|
||||||
|
|
||||||
if headOnly {
|
if headOnly {
|
||||||
|
@ -102,7 +106,7 @@ func registerNarinfoGet(s *Server) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = renderNarinfo(ctx, log, s.pathInfoServiceClient, &s.narHashToPathInfoMu, s.narHashToPathInfo, outputHash, w, false)
|
err = renderNarinfo(ctx, log, s.pathInfoServiceClient, &s.narDbMu, s.narDb, outputHash, w, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
w.WriteHeader(http.StatusNotFound)
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
|
|
@ -2,14 +2,11 @@ package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
|
||||||
|
|
||||||
castorev1pb "code.tvl.fyi/tvix/castore/protos"
|
"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
|
||||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
|
||||||
"github.com/go-chi/chi/v5"
|
"github.com/go-chi/chi/v5"
|
||||||
"github.com/nix-community/go-nix/pkg/narinfo"
|
"github.com/nix-community/go-nix/pkg/narinfo"
|
||||||
"github.com/nix-community/go-nix/pkg/nixbase32"
|
"github.com/nix-community/go-nix/pkg/nixbase32"
|
||||||
"github.com/nix-community/go-nix/pkg/storepath"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
@ -43,12 +40,10 @@ func registerNarinfoPut(s *Server) {
|
||||||
"output_path": narInfo.StorePath,
|
"output_path": narInfo.StorePath,
|
||||||
})
|
})
|
||||||
|
|
||||||
var pathInfo *storev1pb.PathInfo
|
|
||||||
|
|
||||||
// look up the narHash in our temporary map
|
// look up the narHash in our temporary map
|
||||||
s.narHashToPathInfoMu.Lock()
|
s.narDbMu.Lock()
|
||||||
pathInfo, found := s.narHashToPathInfo[narInfo.NarHash.SRIString()]
|
narData, found := s.narDb[narInfo.NarHash.SRIString()]
|
||||||
s.narHashToPathInfoMu.Unlock()
|
s.narDbMu.Unlock()
|
||||||
if !found {
|
if !found {
|
||||||
log.Error("unable to find referred NAR")
|
log.Error("unable to find referred NAR")
|
||||||
w.WriteHeader(http.StatusBadRequest)
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
@ -60,10 +55,12 @@ func registerNarinfoPut(s *Server) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rootNode := narData.rootNode
|
||||||
|
|
||||||
// compare fields with what we computed while receiving the NAR file
|
// compare fields with what we computed while receiving the NAR file
|
||||||
|
|
||||||
// NarSize needs to match
|
// NarSize needs to match
|
||||||
if pathInfo.Narinfo.NarSize != narInfo.NarSize {
|
if narData.narSize != narInfo.NarSize {
|
||||||
log.Error("narsize mismatch")
|
log.Error("narsize mismatch")
|
||||||
w.WriteHeader(http.StatusBadRequest)
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
_, err := w.Write([]byte("unable to parse narinfo"))
|
_, err := w.Write([]byte("unable to parse narinfo"))
|
||||||
|
@ -73,90 +70,23 @@ func registerNarinfoPut(s *Server) {
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// We know the narhash in the .narinfo matches one of the two narhashes in the partial pathInfo,
|
|
||||||
// because that's how we found it.
|
|
||||||
|
|
||||||
// FUTUREWORK: We can't compare References yet, but it'd be a good idea to
|
pathInfo, err := importer.GenPathInfo(rootNode, narInfo)
|
||||||
// do reference checking on .nar files server-side during upload.
|
|
||||||
// We however still need to be parse them, because we store
|
|
||||||
// the bytes in pathInfo.References, and the full strings in pathInfo.Narinfo.ReferenceNames.
|
|
||||||
referencesBytes := make([][]byte, 0)
|
|
||||||
for _, reference := range narInfo.References {
|
|
||||||
storePath, err := storepath.FromString(reference)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithField("reference", reference).WithError(err).Error("unable to parse reference")
|
log.WithError(err).Error("unable to generate PathInfo")
|
||||||
w.WriteHeader(http.StatusBadRequest)
|
|
||||||
_, err := w.Write([]byte("unable to parse reference"))
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
_, err := w.Write([]byte("unable to generate PathInfo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Errorf("unable to write error message to client")
|
log.WithError(err).Errorf("unable to write error message to client")
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
referencesBytes = append(referencesBytes, storePath.Digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
// assemble the []*storev1pb.NARInfo_Signature{} from narinfo.Signatures.
|
log.WithField("pathInfo", pathInfo).Debug("inserted new pathInfo")
|
||||||
pbNarinfoSignatures := make([]*storev1pb.NARInfo_Signature, 0)
|
|
||||||
for _, narinfoSig := range narInfo.Signatures {
|
|
||||||
|
|
||||||
pbNarinfoSignatures = append(pbNarinfoSignatures, &storev1pb.NARInfo_Signature{
|
receivedPathInfo, err := s.pathInfoServiceClient.Put(ctx, pathInfo)
|
||||||
Name: narinfoSig.Name,
|
|
||||||
Data: narinfoSig.Data,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// If everything matches, We will add References, NAR signatures and the
|
|
||||||
// output path name, and then upload to the pathinfo service.
|
|
||||||
// We want a copy here, because we don't want to mutate the contents in the lookup table
|
|
||||||
// until we get things back from the remote store.
|
|
||||||
pathInfoToUpload := &storev1pb.PathInfo{
|
|
||||||
Node: nil, // set below
|
|
||||||
References: referencesBytes,
|
|
||||||
Narinfo: &storev1pb.NARInfo{
|
|
||||||
NarSize: pathInfo.Narinfo.NarSize,
|
|
||||||
NarSha256: pathInfo.Narinfo.NarSha256,
|
|
||||||
Signatures: pbNarinfoSignatures,
|
|
||||||
ReferenceNames: narInfo.References,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need to add the basename of the storepath from the .narinfo
|
|
||||||
// to the pathInfo to be sent.
|
|
||||||
switch v := (pathInfo.GetNode().GetNode()).(type) {
|
|
||||||
case *castorev1pb.Node_File:
|
|
||||||
pathInfoToUpload.Node = &castorev1pb.Node{
|
|
||||||
Node: &castorev1pb.Node_File{
|
|
||||||
File: &castorev1pb.FileNode{
|
|
||||||
Name: []byte(path.Base(narInfo.StorePath)),
|
|
||||||
Digest: v.File.Digest,
|
|
||||||
Size: v.File.Size,
|
|
||||||
Executable: v.File.Executable,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
case *castorev1pb.Node_Symlink:
|
|
||||||
pathInfoToUpload.Node = &castorev1pb.Node{
|
|
||||||
Node: &castorev1pb.Node_Symlink{
|
|
||||||
Symlink: &castorev1pb.SymlinkNode{
|
|
||||||
Name: []byte(path.Base(narInfo.StorePath)),
|
|
||||||
Target: v.Symlink.Target,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
case *castorev1pb.Node_Directory:
|
|
||||||
pathInfoToUpload.Node = &castorev1pb.Node{
|
|
||||||
Node: &castorev1pb.Node_Directory{
|
|
||||||
Directory: &castorev1pb.DirectoryNode{
|
|
||||||
Name: []byte(path.Base(narInfo.StorePath)),
|
|
||||||
Digest: v.Directory.Digest,
|
|
||||||
Size: v.Directory.Size,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
receivedPathInfo, err := s.pathInfoServiceClient.Put(ctx, pathInfoToUpload)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Error("unable to upload pathinfo to service")
|
log.WithError(err).Error("unable to upload pathinfo to service")
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
@ -168,8 +98,6 @@ func registerNarinfoPut(s *Server) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("received new pathInfo: %v+", receivedPathInfo)
|
log.WithField("pathInfo", receivedPathInfo).Debug("got back PathInfo")
|
||||||
|
|
||||||
// TODO: update the local temporary pathinfo with this?
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,11 +25,17 @@ type Server struct {
|
||||||
// When uploading NAR files to a HTTP binary cache, the .nar
|
// When uploading NAR files to a HTTP binary cache, the .nar
|
||||||
// files are uploaded before the .narinfo files.
|
// files are uploaded before the .narinfo files.
|
||||||
// We need *both* to be able to fully construct a PathInfo object.
|
// We need *both* to be able to fully construct a PathInfo object.
|
||||||
// Keep a in-memory map of narhash(es) (in SRI) to sparse PathInfo.
|
// Keep a in-memory map of narhash(es) (in SRI) to (unnamed) root node and nar
|
||||||
|
// size.
|
||||||
// This is necessary until we can ask a PathInfoService for a node with a given
|
// This is necessary until we can ask a PathInfoService for a node with a given
|
||||||
// narSha256.
|
// narSha256.
|
||||||
narHashToPathInfoMu sync.Mutex
|
narDbMu sync.Mutex
|
||||||
narHashToPathInfo map[string]*storev1pb.PathInfo
|
narDb map[string]*narData
|
||||||
|
}
|
||||||
|
|
||||||
|
type narData struct {
|
||||||
|
rootNode *castorev1pb.Node
|
||||||
|
narSize uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(
|
func New(
|
||||||
|
@ -64,7 +70,7 @@ func New(
|
||||||
directoryServiceClient: directoryServiceClient,
|
directoryServiceClient: directoryServiceClient,
|
||||||
blobServiceClient: blobServiceClient,
|
blobServiceClient: blobServiceClient,
|
||||||
pathInfoServiceClient: pathInfoServiceClient,
|
pathInfoServiceClient: pathInfoServiceClient,
|
||||||
narHashToPathInfo: make(map[string]*storev1pb.PathInfo),
|
narDb: make(map[string]*narData),
|
||||||
}
|
}
|
||||||
|
|
||||||
registerNarPut(s)
|
registerNarPut(s)
|
||||||
|
|
62
tvix/nar-bridge/pkg/importer/gen_pathinfo.go
Normal file
62
tvix/nar-bridge/pkg/importer/gen_pathinfo.go
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
package importer
|
||||||
|
|
||||||
|
import (
|
||||||
|
castorev1pb "code.tvl.fyi/tvix/castore/protos"
|
||||||
|
storev1pb "code.tvl.fyi/tvix/store/protos"
|
||||||
|
"fmt"
|
||||||
|
"github.com/nix-community/go-nix/pkg/narinfo"
|
||||||
|
"github.com/nix-community/go-nix/pkg/storepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GenPathInfo takes a rootNode and narInfo and assembles a PathInfo.
|
||||||
|
// The rootNode is renamed to match the StorePath in the narInfo.
|
||||||
|
func GenPathInfo(rootNode *castorev1pb.Node, narInfo *narinfo.NarInfo) (*storev1pb.PathInfo, error) {
|
||||||
|
// parse the storePath from the .narinfo
|
||||||
|
storePath, err := storepath.FromAbsolutePath(narInfo.StorePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to parse StorePath: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// construct the references, by parsing ReferenceNames and extracting the digest
|
||||||
|
references := make([][]byte, len(narInfo.References))
|
||||||
|
for i, referenceStr := range narInfo.References {
|
||||||
|
// parse reference as store path
|
||||||
|
referenceStorePath, err := storepath.FromString(referenceStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to parse reference %s as storepath: %w", referenceStr, err)
|
||||||
|
}
|
||||||
|
references[i] = referenceStorePath.Digest
|
||||||
|
}
|
||||||
|
|
||||||
|
// construct the narInfo.Signatures[*] from pathInfo.Narinfo.Signatures[*]
|
||||||
|
narinfoSignatures := make([]*storev1pb.NARInfo_Signature, len(narInfo.Signatures))
|
||||||
|
for i, narinfoSig := range narInfo.Signatures {
|
||||||
|
narinfoSignatures[i] = &storev1pb.NARInfo_Signature{
|
||||||
|
Name: narinfoSig.Name,
|
||||||
|
Data: narinfoSig.Data,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// assemble the PathInfo.
|
||||||
|
pathInfo := &storev1pb.PathInfo{
|
||||||
|
// embed a new root node with the name set to the store path basename.
|
||||||
|
Node: castorev1pb.RenamedNode(rootNode, storePath.String()),
|
||||||
|
References: references,
|
||||||
|
Narinfo: &storev1pb.NARInfo{
|
||||||
|
NarSize: narInfo.NarSize,
|
||||||
|
NarSha256: narInfo.FileHash.Digest(),
|
||||||
|
Signatures: narinfoSignatures,
|
||||||
|
ReferenceNames: narInfo.References,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// run Validate on the PathInfo, more as an additional sanity check our code is sound,
|
||||||
|
// to make sure we populated everything properly, before returning it.
|
||||||
|
// Fail hard if we fail validation, this is a code error.
|
||||||
|
if _, err = pathInfo.Validate(); err != nil {
|
||||||
|
panic(fmt.Sprintf("PathInfo failed validation: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return pathInfo, nil
|
||||||
|
|
||||||
|
}
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
castorev1pb "code.tvl.fyi/tvix/castore/protos"
|
castorev1pb "code.tvl.fyi/tvix/castore/protos"
|
||||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
|
||||||
"github.com/nix-community/go-nix/pkg/nar"
|
"github.com/nix-community/go-nix/pkg/nar"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,8 +19,8 @@ type stackItem struct {
|
||||||
directory *castorev1pb.Directory
|
directory *castorev1pb.Directory
|
||||||
}
|
}
|
||||||
|
|
||||||
// Import reads NAR from a reader, and returns a (sparsely populated) PathInfo
|
// Import reads a NAR from a reader, and returns a the root node,
|
||||||
// object.
|
// NAR size and NAR sha256 digest.
|
||||||
func Import(
|
func Import(
|
||||||
// a context, to support cancellation
|
// a context, to support cancellation
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
|
@ -31,7 +30,7 @@ func Import(
|
||||||
blobCb func(fileReader io.Reader) ([]byte, error),
|
blobCb func(fileReader io.Reader) ([]byte, error),
|
||||||
// callback function called with each finalized directory node
|
// callback function called with each finalized directory node
|
||||||
directoryCb func(directory *castorev1pb.Directory) ([]byte, error),
|
directoryCb func(directory *castorev1pb.Directory) ([]byte, error),
|
||||||
) (*storev1pb.PathInfo, error) {
|
) (*castorev1pb.Node, uint64, []byte, error) {
|
||||||
// We need to wrap the underlying reader a bit.
|
// We need to wrap the underlying reader a bit.
|
||||||
// - we want to keep track of the number of bytes read in total
|
// - we want to keep track of the number of bytes read in total
|
||||||
// - we calculate the sha256 digest over all data read
|
// - we calculate the sha256 digest over all data read
|
||||||
|
@ -42,7 +41,7 @@ func Import(
|
||||||
multiW := io.MultiWriter(narCountW, sha256W)
|
multiW := io.MultiWriter(narCountW, sha256W)
|
||||||
narReader, err := nar.NewReader(io.TeeReader(r, multiW))
|
narReader, err := nar.NewReader(io.TeeReader(r, multiW))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to instantiate nar reader: %w", err)
|
return nil, 0, nil, fmt.Errorf("failed to instantiate nar reader: %w", err)
|
||||||
}
|
}
|
||||||
defer narReader.Close()
|
defer narReader.Close()
|
||||||
|
|
||||||
|
@ -98,7 +97,7 @@ func Import(
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return nil, ctx.Err()
|
return nil, 0, nil, ctx.Err()
|
||||||
default:
|
default:
|
||||||
// call narReader.Next() to get the next element
|
// call narReader.Next() to get the next element
|
||||||
hdr, err := narReader.Next()
|
hdr, err := narReader.Next()
|
||||||
|
@ -108,60 +107,49 @@ func Import(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// if this returns no EOF, bail out
|
// if this returns no EOF, bail out
|
||||||
if !errors.Is(err, io.EOF) {
|
if !errors.Is(err, io.EOF) {
|
||||||
return nil, fmt.Errorf("failed getting next nar element: %w", err)
|
return nil, 0, nil, fmt.Errorf("failed getting next nar element: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The NAR has been read all the way to the end…
|
// The NAR has been read all the way to the end…
|
||||||
// Make sure we close the nar reader, which might read some final trailers.
|
// Make sure we close the nar reader, which might read some final trailers.
|
||||||
if err := narReader.Close(); err != nil {
|
if err := narReader.Close(); err != nil {
|
||||||
return nil, fmt.Errorf("unable to close nar reader: %w", err)
|
return nil, 0, nil, fmt.Errorf("unable to close nar reader: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check the stack. While it's not empty, we need to pop things off the stack.
|
// Check the stack. While it's not empty, we need to pop things off the stack.
|
||||||
for len(stack) > 0 {
|
for len(stack) > 0 {
|
||||||
err := popFromStack()
|
err := popFromStack()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to pop from stack: %w", err)
|
return nil, 0, nil, fmt.Errorf("unable to pop from stack: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
// Stack is empty.
|
||||||
|
// Now either root{File,Symlink,Directory} is not nil,
|
||||||
// Stack is empty. We now either have a regular or symlink root node,
|
// and we can return the root node.
|
||||||
// or we encountered at least one directory assemble pathInfo with these and
|
narSize := narCountW.BytesWritten()
|
||||||
// return.
|
narSha256 := sha256W.Sum(nil)
|
||||||
pi := &storev1pb.PathInfo{
|
|
||||||
Node: nil,
|
|
||||||
References: [][]byte{},
|
|
||||||
Narinfo: &storev1pb.NARInfo{
|
|
||||||
NarSize: narCountW.BytesWritten(),
|
|
||||||
NarSha256: sha256W.Sum(nil),
|
|
||||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
|
||||||
ReferenceNames: []string{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if rootFile != nil {
|
if rootFile != nil {
|
||||||
pi.Node = &castorev1pb.Node{
|
return &castorev1pb.Node{
|
||||||
Node: &castorev1pb.Node_File{
|
Node: &castorev1pb.Node_File{
|
||||||
File: rootFile,
|
File: rootFile,
|
||||||
},
|
},
|
||||||
}
|
}, narSize, narSha256, nil
|
||||||
}
|
} else if rootSymlink != nil {
|
||||||
if rootSymlink != nil {
|
return &castorev1pb.Node{
|
||||||
pi.Node = &castorev1pb.Node{
|
|
||||||
Node: &castorev1pb.Node_Symlink{
|
Node: &castorev1pb.Node_Symlink{
|
||||||
Symlink: rootSymlink,
|
Symlink: rootSymlink,
|
||||||
},
|
},
|
||||||
}
|
}, narSize, narSha256, nil
|
||||||
}
|
} else if stackDirectory != nil {
|
||||||
if stackDirectory != nil {
|
|
||||||
// calculate directory digest (i.e. after we received all its contents)
|
// calculate directory digest (i.e. after we received all its contents)
|
||||||
dgst, err := stackDirectory.Digest()
|
dgst, err := stackDirectory.Digest()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to calculate root directory digest: %w", err)
|
return nil, 0, nil, fmt.Errorf("unable to calculate root directory digest: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pi.Node = &castorev1pb.Node{
|
return &castorev1pb.Node{
|
||||||
Node: &castorev1pb.Node_Directory{
|
Node: &castorev1pb.Node_Directory{
|
||||||
Directory: &castorev1pb.DirectoryNode{
|
Directory: &castorev1pb.DirectoryNode{
|
||||||
Name: []byte{},
|
Name: []byte{},
|
||||||
|
@ -169,10 +157,11 @@ func Import(
|
||||||
Size: stackDirectory.Size(),
|
Size: stackDirectory.Size(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
}, narSize, narSha256, nil
|
||||||
|
} else {
|
||||||
|
return nil, 0, nil, fmt.Errorf("no root set")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return pi, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for valid path transitions, pop from stack if needed
|
// Check for valid path transitions, pop from stack if needed
|
||||||
// The nar reader already gives us some guarantees about ordering and illegal transitions,
|
// The nar reader already gives us some guarantees about ordering and illegal transitions,
|
||||||
|
@ -185,7 +174,7 @@ func Import(
|
||||||
for len(stack) > 1 && !strings.HasPrefix(hdr.Path, stack[len(stack)-1].path+"/") {
|
for len(stack) > 1 && !strings.HasPrefix(hdr.Path, stack[len(stack)-1].path+"/") {
|
||||||
err := popFromStack()
|
err := popFromStack()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to pop from stack: %w", err)
|
return nil, 0, nil, fmt.Errorf("unable to pop from stack: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -209,7 +198,7 @@ func Import(
|
||||||
|
|
||||||
blobDigest, err := blobCb(blobReader)
|
blobDigest, err := blobCb(blobReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failure from blobCb: %w", err)
|
return nil, 0, nil, fmt.Errorf("failure from blobCb: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensure blobCb did read all the way to the end.
|
// ensure blobCb did read all the way to the end.
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
|
|
||||||
castorev1pb "code.tvl.fyi/tvix/castore/protos"
|
castorev1pb "code.tvl.fyi/tvix/castore/protos"
|
||||||
"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
|
"code.tvl.fyi/tvix/nar-bridge/pkg/importer"
|
||||||
storev1pb "code.tvl.fyi/tvix/store/protos"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -18,7 +17,7 @@ func TestSymlink(t *testing.T) {
|
||||||
f, err := os.Open("../../testdata/symlink.nar")
|
f, err := os.Open("../../testdata/symlink.nar")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
actualPathInfo, err := importer.Import(
|
rootNode, narSize, narSha256, err := importer.Import(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
f,
|
f,
|
||||||
func(blobReader io.Reader) ([]byte, error) {
|
func(blobReader io.Reader) ([]byte, error) {
|
||||||
|
@ -28,35 +27,25 @@ func TestSymlink(t *testing.T) {
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, &castorev1pb.Node{
|
||||||
expectedPathInfo := &storev1pb.PathInfo{
|
|
||||||
Node: &castorev1pb.Node{
|
|
||||||
Node: &castorev1pb.Node_Symlink{
|
Node: &castorev1pb.Node_Symlink{
|
||||||
Symlink: &castorev1pb.SymlinkNode{
|
Symlink: &castorev1pb.SymlinkNode{
|
||||||
Name: []byte(""),
|
Name: []byte(""),
|
||||||
Target: []byte("/nix/store/somewhereelse"),
|
Target: []byte("/nix/store/somewhereelse"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
}, rootNode)
|
||||||
References: [][]byte{},
|
require.Equal(t, []byte{
|
||||||
Narinfo: &storev1pb.NARInfo{
|
|
||||||
NarSize: 136,
|
|
||||||
NarSha256: []byte{
|
|
||||||
0x09, 0x7d, 0x39, 0x7e, 0x9b, 0x58, 0x26, 0x38, 0x4e, 0xaa, 0x16, 0xc4, 0x57, 0x71, 0x5d, 0x1c, 0x1a, 0x51, 0x67, 0x03, 0x13, 0xea, 0xd0, 0xf5, 0x85, 0x66, 0xe0, 0xb2, 0x32, 0x53, 0x9c, 0xf1,
|
0x09, 0x7d, 0x39, 0x7e, 0x9b, 0x58, 0x26, 0x38, 0x4e, 0xaa, 0x16, 0xc4, 0x57, 0x71, 0x5d, 0x1c, 0x1a, 0x51, 0x67, 0x03, 0x13, 0xea, 0xd0, 0xf5, 0x85, 0x66, 0xe0, 0xb2, 0x32, 0x53, 0x9c, 0xf1,
|
||||||
},
|
}, narSha256)
|
||||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
require.Equal(t, uint64(136), narSize)
|
||||||
ReferenceNames: []string{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
requireProtoEq(t, expectedPathInfo, actualPathInfo)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRegular(t *testing.T) {
|
func TestRegular(t *testing.T) {
|
||||||
f, err := os.Open("../../testdata/onebyteregular.nar")
|
f, err := os.Open("../../testdata/onebyteregular.nar")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
actualPathInfo, err := importer.Import(
|
rootNode, narSize, narSha256, err := importer.Import(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
f,
|
f,
|
||||||
func(blobReader io.Reader) ([]byte, error) {
|
func(blobReader io.Reader) ([]byte, error) {
|
||||||
|
@ -68,7 +57,6 @@ func TestRegular(t *testing.T) {
|
||||||
panic("no directories expected!")
|
panic("no directories expected!")
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// The blake3 digest of the 0x01 byte.
|
// The blake3 digest of the 0x01 byte.
|
||||||
BLAKE3_DIGEST_0X01 := []byte{
|
BLAKE3_DIGEST_0X01 := []byte{
|
||||||
|
@ -77,8 +65,8 @@ func TestRegular(t *testing.T) {
|
||||||
0x65, 0x2b,
|
0x65, 0x2b,
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedPathInfo := &storev1pb.PathInfo{
|
require.NoError(t, err)
|
||||||
Node: &castorev1pb.Node{
|
require.Equal(t, &castorev1pb.Node{
|
||||||
Node: &castorev1pb.Node_File{
|
Node: &castorev1pb.Node_File{
|
||||||
File: &castorev1pb.FileNode{
|
File: &castorev1pb.FileNode{
|
||||||
Name: []byte(""),
|
Name: []byte(""),
|
||||||
|
@ -87,19 +75,11 @@ func TestRegular(t *testing.T) {
|
||||||
Executable: false,
|
Executable: false,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
}, rootNode)
|
||||||
References: [][]byte{},
|
require.Equal(t, []byte{
|
||||||
Narinfo: &storev1pb.NARInfo{
|
|
||||||
NarSize: 120,
|
|
||||||
NarSha256: []byte{
|
|
||||||
0x73, 0x08, 0x50, 0xa8, 0x11, 0x25, 0x9d, 0xbf, 0x3a, 0x68, 0xdc, 0x2e, 0xe8, 0x7a, 0x79, 0xaa, 0x6c, 0xae, 0x9f, 0x71, 0x37, 0x5e, 0xdf, 0x39, 0x6f, 0x9d, 0x7a, 0x91, 0xfb, 0xe9, 0x13, 0x4d,
|
0x73, 0x08, 0x50, 0xa8, 0x11, 0x25, 0x9d, 0xbf, 0x3a, 0x68, 0xdc, 0x2e, 0xe8, 0x7a, 0x79, 0xaa, 0x6c, 0xae, 0x9f, 0x71, 0x37, 0x5e, 0xdf, 0x39, 0x6f, 0x9d, 0x7a, 0x91, 0xfb, 0xe9, 0x13, 0x4d,
|
||||||
},
|
}, narSha256)
|
||||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
require.Equal(t, uint64(120), narSize)
|
||||||
ReferenceNames: []string{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
requireProtoEq(t, expectedPathInfo, actualPathInfo)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyDirectory(t *testing.T) {
|
func TestEmptyDirectory(t *testing.T) {
|
||||||
|
@ -111,7 +91,7 @@ func TestEmptyDirectory(t *testing.T) {
|
||||||
Files: []*castorev1pb.FileNode{},
|
Files: []*castorev1pb.FileNode{},
|
||||||
Symlinks: []*castorev1pb.SymlinkNode{},
|
Symlinks: []*castorev1pb.SymlinkNode{},
|
||||||
}
|
}
|
||||||
actualPathInfo, err := importer.Import(
|
rootNode, narSize, narSha256, err := importer.Import(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
f,
|
f,
|
||||||
func(blobReader io.Reader) ([]byte, error) {
|
func(blobReader io.Reader) ([]byte, error) {
|
||||||
|
@ -122,9 +102,7 @@ func TestEmptyDirectory(t *testing.T) {
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, &castorev1pb.Node{
|
||||||
expectedPathInfo := &storev1pb.PathInfo{
|
|
||||||
Node: &castorev1pb.Node{
|
|
||||||
Node: &castorev1pb.Node_Directory{
|
Node: &castorev1pb.Node_Directory{
|
||||||
Directory: &castorev1pb.DirectoryNode{
|
Directory: &castorev1pb.DirectoryNode{
|
||||||
Name: []byte(""),
|
Name: []byte(""),
|
||||||
|
@ -132,18 +110,11 @@ func TestEmptyDirectory(t *testing.T) {
|
||||||
Size: expectedDirectory.Size(),
|
Size: expectedDirectory.Size(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
}, rootNode)
|
||||||
References: [][]byte{},
|
require.Equal(t, []byte{
|
||||||
Narinfo: &storev1pb.NARInfo{
|
|
||||||
NarSize: 96,
|
|
||||||
NarSha256: []byte{
|
|
||||||
0xa5, 0x0a, 0x5a, 0xb6, 0xd9, 0x92, 0xf5, 0x59, 0x8e, 0xdd, 0x92, 0x10, 0x50, 0x59, 0xfa, 0xe9, 0xac, 0xfc, 0x19, 0x29, 0x81, 0xe0, 0x8b, 0xd8, 0x85, 0x34, 0xc2, 0x16, 0x7e, 0x92, 0x52, 0x6a,
|
0xa5, 0x0a, 0x5a, 0xb6, 0xd9, 0x92, 0xf5, 0x59, 0x8e, 0xdd, 0x92, 0x10, 0x50, 0x59, 0xfa, 0xe9, 0xac, 0xfc, 0x19, 0x29, 0x81, 0xe0, 0x8b, 0xd8, 0x85, 0x34, 0xc2, 0x16, 0x7e, 0x92, 0x52, 0x6a,
|
||||||
},
|
}, narSha256)
|
||||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
require.Equal(t, uint64(96), narSize)
|
||||||
ReferenceNames: []string{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
requireProtoEq(t, expectedPathInfo, actualPathInfo)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFull(t *testing.T) {
|
func TestFull(t *testing.T) {
|
||||||
|
@ -458,7 +429,7 @@ func TestFull(t *testing.T) {
|
||||||
|
|
||||||
numDirectoriesReceived := 0
|
numDirectoriesReceived := 0
|
||||||
|
|
||||||
actualPathInfo, err := importer.Import(
|
rootNode, narSize, narSha256, err := importer.Import(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
f,
|
f,
|
||||||
func(blobReader io.Reader) ([]byte, error) {
|
func(blobReader io.Reader) ([]byte, error) {
|
||||||
|
@ -480,9 +451,7 @@ func TestFull(t *testing.T) {
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, &castorev1pb.Node{
|
||||||
expectedPathInfo := &storev1pb.PathInfo{
|
|
||||||
Node: &castorev1pb.Node{
|
|
||||||
Node: &castorev1pb.Node_Directory{
|
Node: &castorev1pb.Node_Directory{
|
||||||
Directory: &castorev1pb.DirectoryNode{
|
Directory: &castorev1pb.DirectoryNode{
|
||||||
Name: []byte(""),
|
Name: []byte(""),
|
||||||
|
@ -490,18 +459,11 @@ func TestFull(t *testing.T) {
|
||||||
Size: expectedDirectories["/"].Size(),
|
Size: expectedDirectories["/"].Size(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
}, rootNode)
|
||||||
References: [][]byte{},
|
require.Equal(t, []byte{
|
||||||
Narinfo: &storev1pb.NARInfo{
|
|
||||||
NarSize: 464152,
|
|
||||||
NarSha256: []byte{
|
|
||||||
0xc6, 0xe1, 0x55, 0xb3, 0x45, 0x6e, 0x30, 0xb7, 0x61, 0x22, 0x63, 0xec, 0x09, 0x50, 0x70, 0x81, 0x1c, 0xaf, 0x8a, 0xbf, 0xd5, 0x9f, 0xaa, 0x72, 0xab, 0x82, 0xa5, 0x92, 0xef, 0xde, 0xb2, 0x53,
|
0xc6, 0xe1, 0x55, 0xb3, 0x45, 0x6e, 0x30, 0xb7, 0x61, 0x22, 0x63, 0xec, 0x09, 0x50, 0x70, 0x81, 0x1c, 0xaf, 0x8a, 0xbf, 0xd5, 0x9f, 0xaa, 0x72, 0xab, 0x82, 0xa5, 0x92, 0xef, 0xde, 0xb2, 0x53,
|
||||||
},
|
}, narSha256)
|
||||||
Signatures: []*storev1pb.NARInfo_Signature{},
|
require.Equal(t, uint64(464152), narSize)
|
||||||
ReferenceNames: []string{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
requireProtoEq(t, expectedPathInfo, actualPathInfo)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestCallbackErrors ensures that errors returned from the callback function
|
// TestCallbackErrors ensures that errors returned from the callback function
|
||||||
|
@ -514,7 +476,7 @@ func TestCallbackErrors(t *testing.T) {
|
||||||
|
|
||||||
targetErr := errors.New("expected error")
|
targetErr := errors.New("expected error")
|
||||||
|
|
||||||
_, err = importer.Import(
|
_, _, _, err = importer.Import(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
f,
|
f,
|
||||||
func(blobReader io.Reader) ([]byte, error) {
|
func(blobReader io.Reader) ([]byte, error) {
|
||||||
|
@ -532,7 +494,7 @@ func TestCallbackErrors(t *testing.T) {
|
||||||
|
|
||||||
targetErr := errors.New("expected error")
|
targetErr := errors.New("expected error")
|
||||||
|
|
||||||
_, err = importer.Import(
|
_, _, _, err = importer.Import(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
f,
|
f,
|
||||||
func(blobReader io.Reader) ([]byte, error) {
|
func(blobReader io.Reader) ([]byte, error) {
|
||||||
|
@ -562,7 +524,7 @@ func TestPopDirectories(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
_, err = importer.Import(
|
_, _, _, err = importer.Import(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
f,
|
f,
|
||||||
func(blobReader io.Reader) ([]byte, error) { return mustBlobDigest(blobReader), nil },
|
func(blobReader io.Reader) ([]byte, error) { return mustBlobDigest(blobReader), nil },
|
||||||
|
|
|
@ -30,7 +30,7 @@ func TestRoundtrip(t *testing.T) {
|
||||||
blobsMap := make(map[string][]byte, 0)
|
blobsMap := make(map[string][]byte, 0)
|
||||||
directoriesMap := make(map[string]*castorev1pb.Directory)
|
directoriesMap := make(map[string]*castorev1pb.Directory)
|
||||||
|
|
||||||
pathInfo, err := importer.Import(
|
rootNode, _, _, err := importer.Import(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
bytes.NewBuffer(narContents),
|
bytes.NewBuffer(narContents),
|
||||||
func(blobReader io.Reader) ([]byte, error) {
|
func(blobReader io.Reader) ([]byte, error) {
|
||||||
|
@ -56,10 +56,10 @@ func TestRoundtrip(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// done populating everything, now actually test the export :-)
|
// done populating everything, now actually test the export :-)
|
||||||
var buf bytes.Buffer
|
var narBuf bytes.Buffer
|
||||||
err = storev1pb.Export(
|
err = storev1pb.Export(
|
||||||
&buf,
|
&narBuf,
|
||||||
pathInfo.Node,
|
rootNode,
|
||||||
func(directoryDgst []byte) (*castorev1pb.Directory, error) {
|
func(directoryDgst []byte) (*castorev1pb.Directory, error) {
|
||||||
d, found := directoriesMap[base64.StdEncoding.EncodeToString(directoryDgst)]
|
d, found := directoriesMap[base64.StdEncoding.EncodeToString(directoryDgst)]
|
||||||
if !found {
|
if !found {
|
||||||
|
@ -77,5 +77,5 @@ func TestRoundtrip(t *testing.T) {
|
||||||
)
|
)
|
||||||
|
|
||||||
require.NoError(t, err, "exporter shouldn't fail")
|
require.NoError(t, err, "exporter shouldn't fail")
|
||||||
require.Equal(t, narContents, buf.Bytes())
|
require.Equal(t, narContents, narBuf.Bytes())
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,6 @@ import (
|
||||||
mh "github.com/multiformats/go-multihash/core"
|
mh "github.com/multiformats/go-multihash/core"
|
||||||
"github.com/nix-community/go-nix/pkg/narinfo"
|
"github.com/nix-community/go-nix/pkg/narinfo"
|
||||||
"github.com/nix-community/go-nix/pkg/nixbase32"
|
"github.com/nix-community/go-nix/pkg/nixbase32"
|
||||||
"github.com/nix-community/go-nix/pkg/storepath"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/ulikunitz/xz"
|
"github.com/ulikunitz/xz"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
@ -172,7 +171,7 @@ func (p *PathInfoServiceServer) Get(ctx context.Context, getPathInfoRequest *sto
|
||||||
|
|
||||||
blobUploaderCb := importer.GenBlobUploaderCb(ctx, p.blobServiceClient)
|
blobUploaderCb := importer.GenBlobUploaderCb(ctx, p.blobServiceClient)
|
||||||
|
|
||||||
pathInfo, err := importer.Import(
|
rootNode, _, importedNarSha256, err := importer.Import(
|
||||||
ctx,
|
ctx,
|
||||||
narBody,
|
narBody,
|
||||||
func(blobReader io.Reader) ([]byte, error) {
|
func(blobReader io.Reader) ([]byte, error) {
|
||||||
|
@ -207,8 +206,7 @@ func (p *PathInfoServiceServer) Get(ctx context.Context, getPathInfoRequest *sto
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compare NAR hash in the NARInfo with the one we calculated while reading the NAR
|
// Compare NAR hash in the NARInfo with the one we calculated while reading the NAR
|
||||||
// We already checked above that the digest is in sha256.
|
// We don't need to additionally compare the narSize.
|
||||||
importedNarSha256 := pathInfo.GetNarinfo().GetNarSha256()
|
|
||||||
if !bytes.Equal(narInfo.NarHash.Digest(), importedNarSha256) {
|
if !bytes.Equal(narInfo.NarHash.Digest(), importedNarSha256) {
|
||||||
log := log.WithField("imported_nar_sha256", base64.StdEncoding.EncodeToString(importedNarSha256))
|
log := log.WithField("imported_nar_sha256", base64.StdEncoding.EncodeToString(importedNarSha256))
|
||||||
log.Error("imported digest doesn't match NARInfo digest")
|
log.Error("imported digest doesn't match NARInfo digest")
|
||||||
|
@ -216,51 +214,11 @@ func (p *PathInfoServiceServer) Get(ctx context.Context, getPathInfoRequest *sto
|
||||||
return nil, fmt.Errorf("imported digest doesn't match NARInfo digest")
|
return nil, fmt.Errorf("imported digest doesn't match NARInfo digest")
|
||||||
}
|
}
|
||||||
|
|
||||||
// annotate importedPathInfo with the rest of the metadata from NARINfo.
|
// generate PathInfo
|
||||||
|
pathInfo, err := importer.GenPathInfo(rootNode, narInfo)
|
||||||
// extract the output digests
|
|
||||||
for _, referenceStr := range narInfo.References {
|
|
||||||
referenceStorePath, err := storepath.FromString(referenceStr)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to parse %s as StorePath: %w", referenceStr, err)
|
log.WithError(err).Error("uable to generate PathInfo")
|
||||||
}
|
return nil, status.Errorf(codes.Internal, "unable to generate PathInfo")
|
||||||
|
|
||||||
pathInfo.References = append(pathInfo.References, referenceStorePath.Digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
// extract narInfo.References into pathInfo.NarInfo.ReferenceNames.
|
|
||||||
pathInfo.Narinfo.ReferenceNames = narInfo.References
|
|
||||||
|
|
||||||
// copy over signatures from narInfo.signatures into pathInfo.NarInfo.Signatures.
|
|
||||||
for _, signature := range narInfo.Signatures {
|
|
||||||
pathInfo.Narinfo.Signatures = append(pathInfo.Narinfo.Signatures, &storev1pb.NARInfo_Signature{
|
|
||||||
Name: signature.Name,
|
|
||||||
Data: signature.Data,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// set the root node name to the basename of the output path in the narInfo.
|
|
||||||
// currently the root node has no name yet.
|
|
||||||
outPath, err := storepath.FromAbsolutePath(narInfo.StorePath)
|
|
||||||
if err != nil {
|
|
||||||
// unreachable due to narInfo.Check()
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pathInfo.Node = castorev1pb.RenamedNode(pathInfo.Node, outPath.String())
|
|
||||||
|
|
||||||
// run Validate on the PathInfo, more as an additional sanity check our code is sound,
|
|
||||||
// to make sure we populated everything properly, before returning it.
|
|
||||||
validatedOutPath, err := pathInfo.Validate()
|
|
||||||
if err != nil {
|
|
||||||
panic("pathinfo failed validation")
|
|
||||||
}
|
|
||||||
if narInfo.StorePath != validatedOutPath.Absolute() {
|
|
||||||
panic(fmt.Sprintf(
|
|
||||||
"StorePath returned from Validate() mismatches the one from .narinfo (%s vs %s)",
|
|
||||||
validatedOutPath.Absolute(),
|
|
||||||
narInfo.StorePath),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return pathInfo, nil
|
return pathInfo, nil
|
||||||
|
|
Loading…
Reference in a new issue