2022-04-20 16:41:20 +02:00
|
|
|
// Copyright 2022 The TVL Contributors
|
|
|
|
// SPDX-License-Identifier: Apache-2.0
|
2019-08-14 18:20:01 +02:00
|
|
|
|
2019-11-11 22:07:16 +01:00
|
|
|
// Package builder implements the logic for assembling container
|
|
|
|
// images. It shells out to Nix to retrieve all required Nix-packages
|
|
|
|
// and assemble the symlink layer and then creates the required
|
|
|
|
// tarballs in-process.
|
2019-08-14 18:20:01 +02:00
|
|
|
package builder
|
|
|
|
|
|
|
|
import (
|
2019-09-21 16:04:13 +02:00
|
|
|
"bufio"
|
2019-08-14 18:20:01 +02:00
|
|
|
"bytes"
|
2019-10-11 13:26:18 +02:00
|
|
|
"compress/gzip"
|
2019-08-14 18:20:01 +02:00
|
|
|
"context"
|
2019-09-30 18:40:01 +02:00
|
|
|
"crypto/sha256"
|
2019-08-14 18:20:01 +02:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"os/exec"
|
2019-08-17 11:18:15 +02:00
|
|
|
"sort"
|
2019-08-14 18:20:01 +02:00
|
|
|
"strings"
|
|
|
|
|
2019-11-11 22:07:16 +01:00
|
|
|
"github.com/google/nixery/config"
|
2022-05-13 17:54:06 +02:00
|
|
|
"github.com/google/nixery/layers"
|
2019-11-11 22:07:16 +01:00
|
|
|
"github.com/google/nixery/manifest"
|
|
|
|
"github.com/google/nixery/storage"
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
"github.com/im7mortal/kmutex"
|
2019-10-04 23:17:11 +02:00
|
|
|
log "github.com/sirupsen/logrus"
|
2019-08-14 18:20:01 +02:00
|
|
|
)
|
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
// The maximum number of layers in an image is 125. To allow for
|
|
|
|
// extensibility, the actual number of layers Nixery is "allowed" to
|
|
|
|
// use up is set at a lower point.
|
|
|
|
const LayerBudget int = 94
|
|
|
|
|
2019-10-03 13:49:26 +02:00
|
|
|
// State holds the runtime state that is carried around in Nixery and
|
|
|
|
// passed to builder functions.
|
|
|
|
type State struct {
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
Storage storage.Backend
|
|
|
|
Cache *LocalCache
|
|
|
|
Cfg config.Config
|
|
|
|
Pop layers.Popularity
|
|
|
|
UploadMutex *kmutex.Kmutex
|
2019-10-03 13:49:26 +02:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:57:10 +01:00
|
|
|
// Architecture represents the possible CPU architectures for which
|
|
|
|
// container images can be built.
|
|
|
|
//
|
|
|
|
// The default architecture is amd64, but support for ARM platforms is
|
|
|
|
// available within nixpkgs and can be toggled via meta-packages.
|
|
|
|
type Architecture struct {
|
|
|
|
// Name of the system tuple to pass to Nix
|
|
|
|
nixSystem string
|
|
|
|
|
|
|
|
// Name of the architecture as used in the OCI manifests
|
|
|
|
imageArch string
|
|
|
|
}
|
|
|
|
|
|
|
|
var amd64 = Architecture{"x86_64-linux", "amd64"}
|
2019-11-05 15:03:48 +01:00
|
|
|
var arm64 = Architecture{"aarch64-linux", "arm64"}
|
2019-11-05 13:57:10 +01:00
|
|
|
|
2019-08-14 18:20:01 +02:00
|
|
|
// Image represents the information necessary for building a container image.
|
|
|
|
// This can be either a list of package names (corresponding to keys in the
|
|
|
|
// nixpkgs set) or a Nix expression that results in a *list* of derivations.
|
|
|
|
type Image struct {
|
|
|
|
Name string
|
|
|
|
Tag string
|
|
|
|
|
|
|
|
// Names of packages to include in the image. These must correspond
|
|
|
|
// directly to top-level names of Nix packages in the nixpkgs tree.
|
|
|
|
Packages []string
|
2019-11-05 13:57:10 +01:00
|
|
|
|
|
|
|
// Architecture for which to build the image. Nixery defaults
|
|
|
|
// this to amd64 if not specified via meta-packages.
|
|
|
|
Arch *Architecture
|
2019-08-14 18:20:01 +02:00
|
|
|
}
|
|
|
|
|
2019-10-02 16:31:57 +02:00
|
|
|
// BuildResult represents the data returned from the server to the
|
|
|
|
// HTTP handlers. Error information is propagated straight from Nix
|
|
|
|
// for errors inside of the build that should be fed back to the
|
|
|
|
// client (such as missing packages).
|
2019-09-30 18:40:01 +02:00
|
|
|
type BuildResult struct {
|
2019-10-02 00:27:26 +02:00
|
|
|
Error string `json:"error"`
|
|
|
|
Pkgs []string `json:"pkgs"`
|
|
|
|
Manifest json.RawMessage `json:"manifest"`
|
2019-09-30 18:40:01 +02:00
|
|
|
}
|
|
|
|
|
2019-08-14 18:20:01 +02:00
|
|
|
// ImageFromName parses an image name into the corresponding structure which can
|
|
|
|
// be used to invoke Nix.
|
|
|
|
//
|
|
|
|
// It will expand convenience names under the hood (see the `convenienceNames`
|
2019-10-03 23:13:40 +02:00
|
|
|
// function below) and append packages that are always included (cacert, iana-etc).
|
2019-08-17 11:18:15 +02:00
|
|
|
//
|
|
|
|
// Once assembled the image structure uses a sorted representation of
|
|
|
|
// the name. This is to avoid unnecessarily cache-busting images if
|
|
|
|
// only the order of requested packages has changed.
|
2019-08-14 18:20:01 +02:00
|
|
|
func ImageFromName(name string, tag string) Image {
|
2019-08-17 11:18:15 +02:00
|
|
|
pkgs := strings.Split(name, "/")
|
2019-11-05 15:03:48 +01:00
|
|
|
arch, expanded := metaPackages(pkgs)
|
2019-10-03 23:13:40 +02:00
|
|
|
expanded = append(expanded, "cacert", "iana-etc")
|
2019-08-17 11:18:15 +02:00
|
|
|
|
|
|
|
sort.Strings(pkgs)
|
|
|
|
sort.Strings(expanded)
|
|
|
|
|
2019-08-14 18:20:01 +02:00
|
|
|
return Image{
|
2019-08-17 11:18:15 +02:00
|
|
|
Name: strings.Join(pkgs, "/"),
|
2019-08-14 18:20:01 +02:00
|
|
|
Tag: tag,
|
2019-08-17 11:18:15 +02:00
|
|
|
Packages: expanded,
|
2019-11-05 15:03:48 +01:00
|
|
|
Arch: arch,
|
2019-08-14 18:20:01 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
// ImageResult represents the output of calling the Nix derivation
|
|
|
|
// responsible for preparing an image.
|
|
|
|
type ImageResult struct {
|
|
|
|
// These fields are populated in case of an error
|
|
|
|
Error string `json:"error"`
|
|
|
|
Pkgs []string `json:"pkgs"`
|
|
|
|
|
|
|
|
// These fields are populated in case of success
|
2022-05-13 17:54:06 +02:00
|
|
|
Graph layers.RuntimeGraph `json:"runtimeGraph"`
|
2019-09-30 18:40:01 +02:00
|
|
|
SymlinkLayer struct {
|
2019-10-11 13:26:18 +02:00
|
|
|
Size int `json:"size"`
|
|
|
|
TarHash string `json:"tarHash"`
|
|
|
|
Path string `json:"path"`
|
2019-09-30 18:40:01 +02:00
|
|
|
} `json:"symlinkLayer"`
|
2019-08-14 18:20:01 +02:00
|
|
|
}
|
|
|
|
|
2019-11-05 15:03:48 +01:00
|
|
|
// metaPackages expands package names defined by Nixery which either
|
|
|
|
// include sets of packages or trigger certain image-building
|
|
|
|
// behaviour.
|
2019-08-14 18:20:01 +02:00
|
|
|
//
|
2019-11-05 15:03:48 +01:00
|
|
|
// Meta-packages must be specified as the first packages in an image
|
|
|
|
// name.
|
2019-08-14 18:20:01 +02:00
|
|
|
//
|
2019-11-05 15:03:48 +01:00
|
|
|
// Currently defined meta-packages are:
|
2019-08-14 18:20:01 +02:00
|
|
|
//
|
|
|
|
// * `shell`: Includes bash, coreutils and other common command-line tools
|
2019-11-05 15:03:48 +01:00
|
|
|
// * `arm64`: Causes Nixery to build images for the ARM64 architecture
|
|
|
|
func metaPackages(packages []string) (*Architecture, []string) {
|
|
|
|
arch := &amd64
|
2019-11-27 12:44:26 +01:00
|
|
|
|
2019-11-05 15:03:48 +01:00
|
|
|
var metapkgs []string
|
2019-11-27 12:44:26 +01:00
|
|
|
lastMeta := 0
|
2019-11-05 15:03:48 +01:00
|
|
|
for idx, p := range packages {
|
|
|
|
if p == "shell" || p == "arm64" {
|
|
|
|
metapkgs = append(metapkgs, p)
|
2019-11-27 12:44:26 +01:00
|
|
|
lastMeta = idx + 1
|
2019-11-05 15:03:48 +01:00
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2019-08-14 18:20:01 +02:00
|
|
|
|
2019-11-27 12:44:26 +01:00
|
|
|
// Chop off the meta-packages from the front of the package
|
|
|
|
// list
|
|
|
|
packages = packages[lastMeta:]
|
|
|
|
|
2019-11-05 15:03:48 +01:00
|
|
|
for _, p := range metapkgs {
|
|
|
|
switch p {
|
|
|
|
case "shell":
|
|
|
|
packages = append(packages, "bashInteractive", "coreutils", "moreutils", "nano")
|
|
|
|
case "arm64":
|
|
|
|
arch = &arm64
|
|
|
|
}
|
2019-08-14 18:20:01 +02:00
|
|
|
}
|
|
|
|
|
2019-11-05 15:03:48 +01:00
|
|
|
return arch, packages
|
2019-08-14 18:20:01 +02:00
|
|
|
}
|
|
|
|
|
2019-09-21 16:04:13 +02:00
|
|
|
// logNix logs each output line from Nix. It runs in a goroutine per
|
|
|
|
// output channel that should be live-logged.
|
2019-10-06 04:18:38 +02:00
|
|
|
func logNix(image, cmd string, r io.ReadCloser) {
|
2019-09-21 16:04:13 +02:00
|
|
|
scanner := bufio.NewScanner(r)
|
|
|
|
for scanner.Scan() {
|
2019-10-06 04:18:38 +02:00
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"image": image,
|
|
|
|
"cmd": cmd,
|
|
|
|
}).Info("[nix] " + scanner.Text())
|
2019-09-21 16:04:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-06 04:18:38 +02:00
|
|
|
func callNix(program, image string, args []string) ([]byte, error) {
|
2019-09-30 18:40:01 +02:00
|
|
|
cmd := exec.Command(program, args...)
|
2019-09-10 12:13:10 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
outpipe, err := cmd.StdoutPipe()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-09-10 12:13:10 +02:00
|
|
|
}
|
2019-08-14 18:20:01 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
errpipe, err := cmd.StderrPipe()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-01-19 03:21:08 +01:00
|
|
|
go logNix(image, program, errpipe)
|
2019-08-14 18:20:01 +02:00
|
|
|
|
2019-10-02 00:27:26 +02:00
|
|
|
if err = cmd.Start(); err != nil {
|
2019-10-06 15:48:24 +02:00
|
|
|
log.WithError(err).WithFields(log.Fields{
|
2019-10-06 04:18:38 +02:00
|
|
|
"image": image,
|
|
|
|
"cmd": program,
|
2019-10-06 15:48:24 +02:00
|
|
|
}).Error("error invoking Nix")
|
2019-10-06 04:18:38 +02:00
|
|
|
|
2019-10-02 00:27:26 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-10-06 04:18:38 +02:00
|
|
|
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"cmd": program,
|
|
|
|
"image": image,
|
|
|
|
}).Info("invoked Nix build")
|
2019-10-02 00:27:26 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
stdout, _ := ioutil.ReadAll(outpipe)
|
2019-09-08 22:53:22 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
if err = cmd.Wait(); err != nil {
|
2019-10-06 15:48:24 +02:00
|
|
|
log.WithError(err).WithFields(log.Fields{
|
2019-10-06 04:18:38 +02:00
|
|
|
"image": image,
|
|
|
|
"cmd": program,
|
|
|
|
"stdout": stdout,
|
2019-10-06 15:48:24 +02:00
|
|
|
}).Info("failed to invoke Nix")
|
2019-10-06 04:18:38 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-08-14 18:20:01 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
resultFile := strings.TrimSpace(string(stdout))
|
|
|
|
buildOutput, err := ioutil.ReadFile(resultFile)
|
|
|
|
if err != nil {
|
2019-10-06 15:48:24 +02:00
|
|
|
log.WithError(err).WithFields(log.Fields{
|
2019-10-06 04:18:38 +02:00
|
|
|
"image": image,
|
|
|
|
"file": resultFile,
|
|
|
|
}).Info("failed to read Nix result file")
|
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-09-21 13:15:38 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
return buildOutput, nil
|
|
|
|
}
|
2019-08-14 18:20:01 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
// Call out to Nix and request metadata for the image to be built. All
|
|
|
|
// required store paths for the image will be realised, but layers
|
|
|
|
// will not yet be created from them.
|
|
|
|
//
|
|
|
|
// This function is only invoked if the manifest is not found in any
|
|
|
|
// cache.
|
|
|
|
func prepareImage(s *State, image *Image) (*ImageResult, error) {
|
|
|
|
packages, err := json.Marshal(image.Packages)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-08-14 18:20:01 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
srcType, srcArgs := s.Cfg.Pkgs.Render(image.Tag)
|
2019-08-14 18:20:01 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
args := []string{
|
|
|
|
"--timeout", s.Cfg.Timeout,
|
|
|
|
"--argstr", "packages", string(packages),
|
|
|
|
"--argstr", "srcType", srcType,
|
|
|
|
"--argstr", "srcArgs", srcArgs,
|
2019-11-05 13:57:10 +01:00
|
|
|
"--argstr", "system", image.Arch.nixSystem,
|
2019-09-30 18:40:01 +02:00
|
|
|
}
|
2019-08-14 18:20:01 +02:00
|
|
|
|
2019-11-11 22:07:16 +01:00
|
|
|
output, err := callNix("nixery-prepare-image", image.Name, args)
|
2019-09-30 18:40:01 +02:00
|
|
|
if err != nil {
|
2019-10-06 04:18:38 +02:00
|
|
|
// granular error logging is performed in callNix already
|
2019-09-30 18:40:01 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-10-06 04:18:38 +02:00
|
|
|
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"image": image.Name,
|
|
|
|
"tag": image.Tag,
|
|
|
|
}).Info("finished image preparation via Nix")
|
2019-08-14 18:20:01 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
var result ImageResult
|
|
|
|
err = json.Unmarshal(output, &result)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-08-14 21:02:52 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
return &result, nil
|
|
|
|
}
|
2019-08-14 21:02:52 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
// Groups layers and checks whether they are present in the cache
|
|
|
|
// already, otherwise calls out to Nix to assemble layers.
|
|
|
|
//
|
2019-10-03 13:12:06 +02:00
|
|
|
// Newly built layers are uploaded to the bucket. Cache entries are
|
|
|
|
// added only after successful uploads, which guarantees that entries
|
|
|
|
// retrieved from the cache are present in the bucket.
|
|
|
|
func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageResult) ([]manifest.Entry, error) {
|
2022-05-13 17:54:06 +02:00
|
|
|
grouped := layers.GroupLayers(&result.Graph, &s.Pop, LayerBudget)
|
2019-10-03 13:12:06 +02:00
|
|
|
|
|
|
|
var entries []manifest.Entry
|
|
|
|
|
|
|
|
// Splits the layers into those which are already present in
|
2019-10-03 19:18:52 +02:00
|
|
|
// the cache, and those that are missing.
|
|
|
|
//
|
|
|
|
// Missing layers are built and uploaded to the storage
|
|
|
|
// bucket.
|
2019-10-03 13:12:06 +02:00
|
|
|
for _, l := range grouped {
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
lh := l.Hash()
|
|
|
|
|
|
|
|
// While packing store paths, the SHA sum of
|
|
|
|
// the uncompressed layer is computed and
|
|
|
|
// written to `tarhash`.
|
|
|
|
//
|
|
|
|
// TODO(tazjin): Refactor this to make the
|
|
|
|
// flow of data cleaner.
|
|
|
|
lw := func(w io.Writer) (string, error) {
|
|
|
|
tarhash, err := packStorePaths(&l, w)
|
2019-10-03 19:18:52 +02:00
|
|
|
if err != nil {
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
return "", err
|
2019-10-03 19:18:52 +02:00
|
|
|
}
|
2019-10-03 13:12:06 +02:00
|
|
|
|
2019-10-06 04:18:38 +02:00
|
|
|
var pkgs []string
|
|
|
|
for _, p := range l.Contents {
|
2022-05-13 17:54:06 +02:00
|
|
|
pkgs = append(pkgs, layers.PackageFromPath(p))
|
2019-10-06 04:18:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"layer": lh,
|
|
|
|
"packages": pkgs,
|
2019-10-11 12:57:14 +02:00
|
|
|
"tarhash": tarhash,
|
2019-10-06 04:18:38 +02:00
|
|
|
}).Info("created image layer")
|
|
|
|
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
return tarhash, err
|
|
|
|
}
|
|
|
|
|
|
|
|
entry, err := uploadHashLayer(ctx, s, lh, l.MergeRating, lw)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-10-03 13:12:06 +02:00
|
|
|
}
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
|
|
|
|
entries = append(entries, *entry)
|
2019-09-30 18:40:01 +02:00
|
|
|
}
|
2019-09-10 12:13:10 +02:00
|
|
|
|
2019-10-03 19:18:52 +02:00
|
|
|
// Symlink layer (built in the first Nix build) needs to be
|
|
|
|
// included here manually:
|
2019-10-11 13:26:18 +02:00
|
|
|
slkey := result.SymlinkLayer.TarHash
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
entry, err := uploadHashLayer(ctx, s, slkey, 0, func(w io.Writer) (string, error) {
|
2019-10-03 19:18:52 +02:00
|
|
|
f, err := os.Open(result.SymlinkLayer.Path)
|
|
|
|
if err != nil {
|
2019-10-06 15:48:24 +02:00
|
|
|
log.WithError(err).WithFields(log.Fields{
|
2019-10-06 04:18:38 +02:00
|
|
|
"image": image.Name,
|
|
|
|
"tag": image.Tag,
|
|
|
|
"layer": slkey,
|
2019-10-11 13:26:18 +02:00
|
|
|
}).Error("failed to open symlink layer")
|
2019-10-06 04:18:38 +02:00
|
|
|
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
return "", err
|
2019-09-10 12:13:10 +02:00
|
|
|
}
|
2019-10-03 19:18:52 +02:00
|
|
|
defer f.Close()
|
2019-10-02 00:27:26 +02:00
|
|
|
|
2019-10-11 13:26:18 +02:00
|
|
|
gz := gzip.NewWriter(w)
|
|
|
|
_, err = io.Copy(gz, f)
|
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).WithFields(log.Fields{
|
|
|
|
"image": image.Name,
|
|
|
|
"tag": image.Tag,
|
|
|
|
"layer": slkey,
|
|
|
|
}).Error("failed to upload symlink layer")
|
|
|
|
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
return "", err
|
2019-10-11 13:26:18 +02:00
|
|
|
}
|
|
|
|
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
return "sha256:" + slkey, gz.Close()
|
2019-10-03 19:18:52 +02:00
|
|
|
})
|
2019-09-30 18:40:01 +02:00
|
|
|
|
2019-08-14 18:20:01 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-10-03 19:18:52 +02:00
|
|
|
entries = append(entries, *entry)
|
2019-09-30 18:40:01 +02:00
|
|
|
|
2019-10-03 19:18:52 +02:00
|
|
|
return entries, nil
|
2019-09-30 18:40:01 +02:00
|
|
|
}
|
|
|
|
|
2019-10-03 19:18:52 +02:00
|
|
|
// layerWriter is the type for functions that can write a layer to the
|
|
|
|
// multiwriter used for uploading & hashing.
|
|
|
|
//
|
|
|
|
// This type exists to avoid duplication between the handling of
|
|
|
|
// symlink layers and store path layers.
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
type layerWriter func(w io.Writer) (string, error)
|
2019-10-03 19:18:52 +02:00
|
|
|
|
|
|
|
// byteCounter is a special io.Writer that counts all bytes written to
|
|
|
|
// it and does nothing else.
|
|
|
|
//
|
|
|
|
// This is required because the ad-hoc writing of tarballs leaves no
|
|
|
|
// single place to count the final tarball size otherwise.
|
|
|
|
type byteCounter struct {
|
|
|
|
count int64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *byteCounter) Write(p []byte) (n int, err error) {
|
|
|
|
b.count += int64(len(p))
|
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upload a layer tarball to the storage bucket, while hashing it at
|
|
|
|
// the same time. The supplied function is expected to provide the
|
|
|
|
// layer data to the writer.
|
2019-09-30 18:40:01 +02:00
|
|
|
//
|
|
|
|
// The initial upload is performed in a 'staging' folder, as the
|
|
|
|
// SHA256-hash is not yet available when the upload is initiated.
|
|
|
|
//
|
|
|
|
// After a successful upload, the file is moved to its final location
|
|
|
|
// in the bucket and the build cache is populated.
|
|
|
|
//
|
|
|
|
// The return value is the layer's SHA256 hash, which is used in the
|
|
|
|
// image manifest.
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
func uploadHashLayer(ctx context.Context, s *State, key string, mrating uint64, lw layerWriter) (*manifest.Entry, error) {
|
|
|
|
s.UploadMutex.Lock(key)
|
|
|
|
defer s.UploadMutex.Unlock(key)
|
|
|
|
|
|
|
|
if entry, cached := layerFromCache(ctx, s, key); cached {
|
|
|
|
return entry, nil
|
|
|
|
}
|
|
|
|
|
2019-10-27 16:49:54 +01:00
|
|
|
path := "staging/" + key
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
var tarhash string
|
2020-10-29 16:13:53 +01:00
|
|
|
sha256sum, size, err := s.Storage.Persist(ctx, path, manifest.LayerType, func(sw io.Writer) (string, int64, error) {
|
2019-10-27 16:49:54 +01:00
|
|
|
// Sets up a "multiwriter" that simultaneously runs both hash
|
|
|
|
// algorithms and uploads to the storage backend.
|
|
|
|
shasum := sha256.New()
|
|
|
|
counter := &byteCounter{}
|
|
|
|
multi := io.MultiWriter(sw, shasum, counter)
|
|
|
|
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
var err error
|
|
|
|
tarhash, err = lw(multi)
|
2019-10-27 16:49:54 +01:00
|
|
|
sha256sum := fmt.Sprintf("%x", shasum.Sum([]byte{}))
|
|
|
|
|
|
|
|
return sha256sum, counter.count, err
|
|
|
|
})
|
2019-09-30 18:40:01 +02:00
|
|
|
|
|
|
|
if err != nil {
|
2019-10-27 16:49:54 +01:00
|
|
|
log.WithError(err).WithFields(log.Fields{
|
|
|
|
"layer": key,
|
|
|
|
"backend": s.Storage.Name(),
|
|
|
|
}).Error("failed to create and store layer")
|
2019-10-06 04:18:38 +02:00
|
|
|
|
2019-10-02 00:27:26 +02:00
|
|
|
return nil, err
|
2019-09-30 18:40:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Hashes are now known and the object is in the bucket, what
|
|
|
|
// remains is to move it to the correct location and cache it.
|
2019-10-28 18:32:02 +01:00
|
|
|
err = s.Storage.Move(ctx, "staging/"+key, "layers/"+sha256sum)
|
2019-09-30 18:40:01 +02:00
|
|
|
if err != nil {
|
2019-10-06 15:48:24 +02:00
|
|
|
log.WithError(err).WithField("layer", key).
|
|
|
|
Error("failed to move layer from staging")
|
2019-10-06 04:18:38 +02:00
|
|
|
|
2019-10-02 00:27:26 +02:00
|
|
|
return nil, err
|
2019-09-30 18:40:01 +02:00
|
|
|
}
|
|
|
|
|
2019-10-06 04:18:38 +02:00
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"layer": key,
|
|
|
|
"sha256": sha256sum,
|
|
|
|
"size": size,
|
2019-10-28 18:18:17 +01:00
|
|
|
}).Info("created and persisted layer")
|
2019-09-30 18:40:01 +02:00
|
|
|
|
2019-10-03 13:11:46 +02:00
|
|
|
entry := manifest.Entry{
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
Digest: "sha256:" + sha256sum,
|
|
|
|
Size: size,
|
|
|
|
TarHash: tarhash,
|
|
|
|
MergeRating: mrating,
|
2019-10-03 13:11:46 +02:00
|
|
|
}
|
|
|
|
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
cacheLayer(ctx, s, key, entry)
|
|
|
|
|
2019-10-03 13:11:46 +02:00
|
|
|
return &entry, nil
|
2019-09-30 18:40:01 +02:00
|
|
|
}
|
|
|
|
|
2019-10-02 00:27:26 +02:00
|
|
|
func BuildImage(ctx context.Context, s *State, image *Image) (*BuildResult, error) {
|
2019-10-03 12:23:04 +02:00
|
|
|
key := s.Cfg.Pkgs.CacheKey(image.Packages, image.Tag)
|
|
|
|
if key != "" {
|
|
|
|
if m, c := manifestFromCache(ctx, s, key); c {
|
|
|
|
return &BuildResult{
|
|
|
|
Manifest: m,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
}
|
2019-10-02 00:27:26 +02:00
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
imageResult, err := prepareImage(s, image)
|
2019-08-14 18:20:01 +02:00
|
|
|
if err != nil {
|
2019-10-06 15:48:24 +02:00
|
|
|
return nil, err
|
2019-08-14 18:20:01 +02:00
|
|
|
}
|
|
|
|
|
2019-09-30 18:40:01 +02:00
|
|
|
if imageResult.Error != "" {
|
|
|
|
return &BuildResult{
|
|
|
|
Error: imageResult.Error,
|
|
|
|
Pkgs: imageResult.Pkgs,
|
|
|
|
}, nil
|
|
|
|
}
|
2019-08-14 21:02:52 +02:00
|
|
|
|
2019-10-03 13:12:06 +02:00
|
|
|
layers, err := prepareLayers(ctx, s, image, imageResult)
|
2019-09-30 18:40:01 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-08-14 18:20:01 +02:00
|
|
|
}
|
|
|
|
|
2021-12-23 12:19:39 +01:00
|
|
|
// If the requested packages include a shell,
|
|
|
|
// set cmd accordingly.
|
|
|
|
cmd := ""
|
|
|
|
for _, pkg := range image.Packages {
|
|
|
|
if pkg == "bashInteractive" {
|
|
|
|
cmd = "bash"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m, c := manifest.Manifest(image.Arch.imageArch, layers, cmd)
|
2019-10-03 19:18:52 +02:00
|
|
|
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
lw := func(w io.Writer) (string, error) {
|
2019-10-03 19:18:52 +02:00
|
|
|
r := bytes.NewReader(c.Config)
|
|
|
|
_, err := io.Copy(w, r)
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
return "", err
|
2019-10-03 19:18:52 +02:00
|
|
|
}
|
|
|
|
|
fix(nixery): Avoid race when the same image is fetched in parallel
Remove a race condition which appears when uploadHashLayer is called
with the same key from multiple threads simultaneously. This can
easily happen when the same image path is requested by multiple
clients at the same time. When it does, a 500 status is returned and
the following error message is logged:
{
"context": {
"filePath": "github.com/google/nixery/builder/builder.go",
"lineNumber": 440,
"functionName": "github.com/google/nixery/builder.uploadHashLayer"
},
"error": "rename /var/lib/nixery/staging/<hash> /var/lib/nixery/layers/<hash>: no such file or directory",
"eventTime": "...",
"layer": "<hash>",
"message": "failed to move layer from staging",
...
}
To solve this issue, introduce a mutex keyed on the uploaded hash and
move all layer caching into uploadHashLayer. This could additionally
provide a small performance benefit when an already built image is
requested and NIXERY_PKGS_PATH is set, since symlink layers and config
layers are now also cached.
Change-Id: I50788a7ec7940cb5e5760f244692e361019a9bb7
Reviewed-on: https://cl.tvl.fyi/c/depot/+/6695
Reviewed-by: tazjin <tazjin@tvl.su>
Tested-by: BuildkiteCI
2022-09-19 09:30:28 +02:00
|
|
|
if _, err = uploadHashLayer(ctx, s, c.SHA256, 0, lw); err != nil {
|
2019-10-06 15:48:24 +02:00
|
|
|
log.WithError(err).WithFields(log.Fields{
|
2019-10-06 04:18:38 +02:00
|
|
|
"image": image.Name,
|
|
|
|
"tag": image.Tag,
|
|
|
|
}).Error("failed to upload config")
|
|
|
|
|
2019-10-02 00:27:26 +02:00
|
|
|
return nil, err
|
2019-08-14 18:20:01 +02:00
|
|
|
}
|
|
|
|
|
2019-10-03 12:23:04 +02:00
|
|
|
if key != "" {
|
|
|
|
go cacheManifest(ctx, s, key, m)
|
|
|
|
}
|
|
|
|
|
2019-10-02 00:27:26 +02:00
|
|
|
result := BuildResult{
|
|
|
|
Manifest: m,
|
|
|
|
}
|
|
|
|
return &result, nil
|
2019-08-14 18:20:01 +02:00
|
|
|
}
|