feat(ci): add integration tests to GitHub Actions, remove .travis.yaml
This copies the integration tests from `.travis.yaml` into a script, documents the assumptions it makes, and wires it into GitHub Actions. Contrary to the travis version, we don't use Nixery's GCS backend, as handing out access to the bucket used, especially for PRs, needs to be done carefully. Adding back GCS to the integration test can be done at a later point, either by using a mock server, or by only exposing the credentials for master builds (and have the test script decide on whether GOOGLE_APPLICATION_CREDENTIALS is set or not). The previous travis version had some complicated post-mortem log gathering - instead of doing this, we can just `docker run` nixery, but fork it into the background with the shell - causing it to still be able to log its output as it's running. An additional `--rm` is appended, so the container gets cleaned up on termination - this allows subsequent runs on non-CI infrastructure (like developer laptops), without having to manually clean up containers. Fixes #119.
This commit is contained in:
parent
ee48bd891c
commit
970f492235
3 changed files with 53 additions and 78 deletions
|
@ -26,3 +26,5 @@ jobs:
|
||||||
run: "test -z $(gofmt -l .)"
|
run: "test -z $(gofmt -l .)"
|
||||||
- name: Build Nixery
|
- name: Build Nixery
|
||||||
run: "nix-build --arg maxLayers 2"
|
run: "nix-build --arg maxLayers 2"
|
||||||
|
- name: Run integration test
|
||||||
|
run: scripts/integration-test.sh
|
||||||
|
|
|
@ -1,78 +0,0 @@
|
||||||
language: nix
|
|
||||||
arch:
|
|
||||||
- amd64
|
|
||||||
- arm64
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
env:
|
|
||||||
- NIX_PATH=nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/0a40a3999eb4d577418515da842a2622a64880c5.tar.gz
|
|
||||||
before_script:
|
|
||||||
- echo "Running Nixery CI build on $(uname -m)"
|
|
||||||
- mkdir test-files
|
|
||||||
- echo ${GOOGLE_KEY} | base64 -d > test-files/key.json
|
|
||||||
- echo ${GCS_SIGNING_PEM} | base64 -d > test-files/gcs.pem
|
|
||||||
- nix-env -f '<nixpkgs>' -iA -A go
|
|
||||||
script:
|
|
||||||
- test -z $(gofmt -l server/ build-image/)
|
|
||||||
- nix-build --arg maxLayers 2
|
|
||||||
|
|
||||||
# This integration test makes sure that the container image built
|
|
||||||
# for Nixery itself runs fine in Docker, and that images pulled
|
|
||||||
# from it work in Docker.
|
|
||||||
#
|
|
||||||
# Output from the Nixery container is printed at the end of the
|
|
||||||
# test regardless of test status.
|
|
||||||
- IMG=$(docker load -q -i $(nix-build -A nixery-image) | awk '{ print $3 }')
|
|
||||||
- echo "Loaded Nixery image as ${IMG}"
|
|
||||||
|
|
||||||
- |
|
|
||||||
docker run -d -p 8080:8080 --name nixery \
|
|
||||||
-v ${PWD}/test-files:/var/nixery \
|
|
||||||
-e PORT=8080 \
|
|
||||||
-e GCS_BUCKET=nixery-ci-tests \
|
|
||||||
-e GOOGLE_CLOUD_PROJECT=nixery \
|
|
||||||
-e GOOGLE_APPLICATION_CREDENTIALS=/var/nixery/key.json \
|
|
||||||
-e NIXERY_CHANNEL=nixos-unstable \
|
|
||||||
-e NIXERY_STORAGE_BACKEND=gcs \
|
|
||||||
${IMG}
|
|
||||||
|
|
||||||
# print all of the container's logs regardless of success
|
|
||||||
- |
|
|
||||||
function print_logs {
|
|
||||||
echo "Nixery container logs:"
|
|
||||||
docker logs nixery
|
|
||||||
}
|
|
||||||
trap print_logs EXIT
|
|
||||||
|
|
||||||
# Give the container ~20 seconds to come up
|
|
||||||
- |
|
|
||||||
attempts=0
|
|
||||||
echo -n "Waiting for Nixery to start ..."
|
|
||||||
until $(curl --fail --silent "http://localhost:8080/v2/"); do
|
|
||||||
[[ attempts -eq 30 ]] && echo "Nixery container failed to start!" && exit 1
|
|
||||||
((attempts++))
|
|
||||||
echo -n "."
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
# Pull and run an image of the current CPU architecture
|
|
||||||
- |
|
|
||||||
case $(uname -m) in
|
|
||||||
x86_64)
|
|
||||||
docker run --rm localhost:8080/hello hello
|
|
||||||
;;
|
|
||||||
aarch64)
|
|
||||||
docker run --rm localhost:8080/arm64/hello hello
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# Pull an image of the opposite CPU architecture (but without running it)
|
|
||||||
- |
|
|
||||||
case $(uname -m) in
|
|
||||||
x86_64)
|
|
||||||
docker pull localhost:8080/arm64/hello
|
|
||||||
;;
|
|
||||||
aarch64)
|
|
||||||
docker pull localhost:8080/hello
|
|
||||||
;;
|
|
||||||
esac
|
|
51
tools/nixery/scripts/integration-test.sh
Executable file
51
tools/nixery/scripts/integration-test.sh
Executable file
|
@ -0,0 +1,51 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -eou pipefail
|
||||||
|
|
||||||
|
# This integration test makes sure that the container image built
|
||||||
|
# for Nixery itself runs fine in Docker, and that images pulled
|
||||||
|
# from it work in Docker.
|
||||||
|
|
||||||
|
IMG=$(docker load -q -i "$(nix-build -A nixery-image)" | awk '{ print $3 }')
|
||||||
|
echo "Loaded Nixery image as ${IMG}"
|
||||||
|
|
||||||
|
# Run the built nixery docker image in the background, but keep printing its
|
||||||
|
# output as it occurs.
|
||||||
|
docker run --rm -p 8080:8080 --name nixery \
|
||||||
|
-e PORT=8080 \
|
||||||
|
--mount type=tmpfs,destination=/var/cache/nixery \
|
||||||
|
-e NIXERY_CHANNEL=nixos-unstable \
|
||||||
|
-e NIXERY_STORAGE_BACKEND=filesystem \
|
||||||
|
-e STORAGE_PATH=/var/cache/nixery \
|
||||||
|
"${IMG}" &
|
||||||
|
|
||||||
|
# Give the container ~20 seconds to come up
|
||||||
|
set +e
|
||||||
|
attempts=0
|
||||||
|
echo -n "Waiting for Nixery to start ..."
|
||||||
|
until curl --fail --silent "http://localhost:8080/v2/"; do
|
||||||
|
[[ attempts -eq 30 ]] && echo "Nixery container failed to start!" && exit 1
|
||||||
|
((attempts++))
|
||||||
|
echo -n "."
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Pull and run an image of the current CPU architecture
|
||||||
|
case $(uname -m) in
|
||||||
|
x86_64)
|
||||||
|
docker run --rm localhost:8080/hello hello
|
||||||
|
;;
|
||||||
|
aarch64)
|
||||||
|
docker run --rm localhost:8080/arm64/hello hello
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Pull an image of the opposite CPU architecture (but without running it)
|
||||||
|
case $(uname -m) in
|
||||||
|
x86_64)
|
||||||
|
docker pull localhost:8080/arm64/hello
|
||||||
|
;;
|
||||||
|
aarch64)
|
||||||
|
docker pull localhost:8080/hello
|
||||||
|
;;
|
||||||
|
esac
|
Loading…
Reference in a new issue