refactor(server): Only compress symlink forest layer once
Instead of compressing & decompressing again to get the underlying tar hash, use a similar mechanism as for store path layers for the symlink layer and only compress it once while uploading.
This commit is contained in:
parent
e22ff5d176
commit
1853c74998
2 changed files with 24 additions and 16 deletions
|
@ -126,9 +126,9 @@ let
|
|||
# Image layer that contains the symlink forest created above. This
|
||||
# must be included in the image to ensure that the filesystem has a
|
||||
# useful layout at runtime.
|
||||
symlinkLayer = runCommand "symlink-layer.tar.gz" {} ''
|
||||
symlinkLayer = runCommand "symlink-layer.tar" {} ''
|
||||
cp -r ${contentsEnv}/ ./layer
|
||||
tar --transform='s|^\./||' -C layer --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 -czf $out .
|
||||
tar --transform='s|^\./||' -C layer --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 -cf $out .
|
||||
'';
|
||||
|
||||
# Metadata about the symlink layer which is required for serving it.
|
||||
|
@ -137,14 +137,11 @@ let
|
|||
symlinkLayerMeta = fromJSON (readFile (runCommand "symlink-layer-meta.json" {
|
||||
buildInputs = with pkgs; [ coreutils jq openssl ];
|
||||
}''
|
||||
gzipHash=$(sha256sum ${symlinkLayer} | cut -d ' ' -f1)
|
||||
tarHash=$(cat ${symlinkLayer} | gzip -d | sha256sum | cut -d ' ' -f1)
|
||||
tarHash=$(sha256sum ${symlinkLayer} | cut -d ' ' -f1)
|
||||
layerSize=$(stat --printf '%s' ${symlinkLayer})
|
||||
|
||||
jq -n -c --arg gzipHash $gzipHash --arg tarHash $tarHash --arg size $layerSize \
|
||||
--arg path ${symlinkLayer} \
|
||||
'{ size: ($size | tonumber), tarHash: $tarHash, gzipHash: $gzipHash, path: $path }' \
|
||||
>> $out
|
||||
jq -n -c --arg tarHash $tarHash --arg size $layerSize --arg path ${symlinkLayer} \
|
||||
'{ size: ($size | tonumber), tarHash: $tarHash, path: $path }' >> $out
|
||||
''));
|
||||
|
||||
# Final output structure returned to Nixery if the build succeeded
|
||||
|
|
|
@ -20,6 +20,7 @@ package builder
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
|
@ -117,10 +118,9 @@ type ImageResult struct {
|
|||
// These fields are populated in case of success
|
||||
Graph layers.RuntimeGraph `json:"runtimeGraph"`
|
||||
SymlinkLayer struct {
|
||||
Size int `json:"size"`
|
||||
TarHash string `json:"tarHash"`
|
||||
GzipHash string `json:"gzipHash"`
|
||||
Path string `json:"path"`
|
||||
Size int `json:"size"`
|
||||
TarHash string `json:"tarHash"`
|
||||
Path string `json:"path"`
|
||||
} `json:"symlinkLayer"`
|
||||
}
|
||||
|
||||
|
@ -309,9 +309,22 @@ func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageRes
|
|||
|
||||
// Symlink layer (built in the first Nix build) needs to be
|
||||
// included here manually:
|
||||
slkey := result.SymlinkLayer.GzipHash
|
||||
slkey := result.SymlinkLayer.TarHash
|
||||
entry, err := uploadHashLayer(ctx, s, slkey, func(w io.Writer) error {
|
||||
f, err := os.Open(result.SymlinkLayer.Path)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(log.Fields{
|
||||
"image": image.Name,
|
||||
"tag": image.Tag,
|
||||
"layer": slkey,
|
||||
}).Error("failed to open symlink layer")
|
||||
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
gz := gzip.NewWriter(w)
|
||||
_, err = io.Copy(gz, f)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(log.Fields{
|
||||
"image": image.Name,
|
||||
|
@ -321,10 +334,8 @@ func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageRes
|
|||
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = io.Copy(w, f)
|
||||
return err
|
||||
return gz.Close()
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in a new issue